@@ -348,10 +477,14 @@
'openshift-dedicated': ['docs_dedicated_v4'],
'openshift-online': ['docs_online', version],
'openshift-enterprise': ['docs_cp', version],
+ 'openshift-telco': ['docs_telco', version],
'openshift-aro' : ['docs_aro', version],
'openshift-rosa' : ['docs_rosa'],
'openshift-acs' : ['docs_acs', version],
- 'openshift-serverless' : ['docs_serverless', version]
+ 'openshift-serverless' : ['docs_serverless', version],
+ 'openshift-pipelines' : ['docs_pipelines', version],
+ 'openshift-builds' : ['docs_builds', version],
+ 'openshift-gitops' : ['docs_gitops', version]
};
// only OSD v3 docs have the version variable specified
diff --git a/_templates/_search.html.erb b/_templates/_search.html.erb
index 0b81539b01a4..53a2428338c7 100644
--- a/_templates/_search.html.erb
+++ b/_templates/_search.html.erb
@@ -6,6 +6,10 @@
<%= render("_templates/_search_online.html") %>
<% elsif distro_key == 'openshift-enterprise' %>
<%= render("_templates/_search_enterprise.html.erb", :version => version) %>
+<% elsif distro_key == 'openshift-telco' %>
+<%= render("_templates/_search_telco.html.erb", :version => version) %>
+<% elsif distro_key == 'openshift-serverless' %>
+<%= render("_templates/_search_serverless.html.erb", :version => version) %>
<% else %>
<%= render("_templates/_search_other.html") %>
<% end %>
diff --git a/_templates/_search_serverless.html.erb b/_templates/_search_serverless.html.erb
new file mode 100644
index 000000000000..a0977d514304
--- /dev/null
+++ b/_templates/_search_serverless.html.erb
@@ -0,0 +1,21 @@
+
+
+ Search
+
+
+
+
+
×
+
+
+
+
+ Show more results
+
+
+
+
+
+
diff --git a/_templates/_search_telco.html.erb b/_templates/_search_telco.html.erb
new file mode 100644
index 000000000000..31720005f650
--- /dev/null
+++ b/_templates/_search_telco.html.erb
@@ -0,0 +1,21 @@
+
+
+ Search
+
+
+
+
+
×
+
+
+
+
+ Show more results
+
+
+
+
+
+
diff --git a/_topic_maps/_topic_map.yml b/_topic_maps/_topic_map.yml
index 32d779227cc5..2303f354ce14 100644
--- a/_topic_maps/_topic_map.yml
+++ b/_topic_maps/_topic_map.yml
@@ -24,7 +24,7 @@
---
Name: About
Dir: welcome
-Distros: openshift-enterprise,openshift-webscale,openshift-origin,openshift-online,openshift-dpu
+Distros: openshift-enterprise,openshift-webscale,openshift-origin,openshift-online,openshift-dpu,openshift-telco
Topics:
- Name: Welcome
File: index
@@ -51,21 +51,21 @@ Name: Release notes
Dir: release_notes
Distros: openshift-enterprise
Topics:
-- Name: OpenShift Container Platform 4.14 release notes
- File: ocp-4-14-release-notes
+- Name: OpenShift Container Platform 4.15 release notes
+ File: ocp-4-15-release-notes
---
- Name: Getting started
- Dir: getting_started
- Distros: openshift-enterprise
- Topics:
- - Name: Kubernetes overview
- File: kubernetes-overview
- - Name: OpenShift Container Platform overview
- File: openshift-overview
- - Name: Web console walkthrough
- File: openshift-web-console
- - Name: Command-line walkthrough
- File: openshift-cli
+Name: Getting started
+Dir: getting_started
+Distros: openshift-enterprise
+Topics:
+- Name: Kubernetes overview
+ File: kubernetes-overview
+- Name: OpenShift Container Platform overview
+ File: openshift-overview
+- Name: Web console walkthrough
+ File: openshift-web-console
+- Name: Command-line walkthrough
+ File: openshift-cli
---
Name: Architecture
Dir: architecture
@@ -87,6 +87,9 @@ Topics:
- Name: Control plane architecture
File: control-plane
Distros: openshift-enterprise,openshift-origin,openshift-online
+- Name: NVIDIA GPU architecture overview
+ File: nvidia-gpu-architecture-overview
+ Distros: openshift-enterprise
- Name: Understanding OpenShift development
File: understanding-development
Distros: openshift-enterprise
@@ -132,20 +135,22 @@ Topics:
Dir: installing_alibaba
Distros: openshift-origin,openshift-enterprise
Topics:
- - Name: Preparing to install on Alibaba Cloud
- File: preparing-to-install-on-alibaba
- - Name: Creating the required Alibaba Cloud resources
- File: manually-creating-alibaba-ram
- - Name: Installing a cluster quickly on Alibaba Cloud
- File: installing-alibaba-default
- - Name: Installing a cluster on Alibaba Cloud with customizations
- File: installing-alibaba-customizations
- - Name: Installing a cluster on Alibaba Cloud with network customizations
- File: installing-alibaba-network-customizations
- - Name: Installing a cluster on Alibaba Cloud into an existing VPC
- File: installing-alibaba-vpc
- - Name: Uninstalling a cluster on Alibaba Cloud
- File: uninstall-cluster-alibaba
+ - Name: Preparing to install on Alibaba Cloud
+ File: preparing-to-install-on-alibaba
+ - Name: Creating the required Alibaba Cloud resources
+ File: manually-creating-alibaba-ram
+ - Name: Installing a cluster quickly on Alibaba Cloud
+ File: installing-alibaba-default
+ - Name: Installing a cluster on Alibaba Cloud with customizations
+ File: installing-alibaba-customizations
+ - Name: Installing a cluster on Alibaba Cloud with network customizations
+ File: installing-alibaba-network-customizations
+ - Name: Installing a cluster on Alibaba Cloud into an existing VPC
+ File: installing-alibaba-vpc
+ - Name: Installation configuration parameters for Alibaba Cloud
+ File: installation-config-parameters-alibaba
+ - Name: Uninstalling a cluster on Alibaba Cloud
+ File: uninstall-cluster-alibaba
- Name: Installing on AWS
Dir: installing_aws
Distros: openshift-origin,openshift-enterprise
@@ -154,8 +159,6 @@ Topics:
File: preparing-to-install-on-aws
- Name: Configuring an AWS account
File: installing-aws-account
- - Name: Manually creating IAM
- File: manually-creating-iam
- Name: Installing a cluster quickly on AWS
File: installing-aws-default
- Name: Installing a cluster on AWS with customizations
@@ -176,7 +179,7 @@ Topics:
File: installing-aws-china
- Name: Installing a cluster on AWS using CloudFormation templates
File: installing-aws-user-infra
- - Name: Installing a cluster using AWS Local Zones
+ - Name: Installing a cluster on AWS with worker nodes on AWS Local Zones
File: installing-aws-localzone
- Name: Installing a cluster on AWS in a restricted network with user-provisioned infrastructure
File: installing-restricted-networks-aws
@@ -184,10 +187,10 @@ Topics:
File: installing-aws-outposts-remote-workers
- Name: Installing a three-node cluster on AWS
File: installing-aws-three-node
- - Name: Expanding a cluster with on-premise bare metal nodes
- File: installing-aws-expanding-a-cluster-with-on-premise-bare-metal-nodes
- Name: Uninstalling a cluster on AWS
File: uninstalling-cluster-aws
+ - Name: Installation configuration parameters for AWS
+ File: installation-config-parameters-aws
- Name: Installing on Azure
Dir: installing_azure
Distros: openshift-origin,openshift-enterprise
@@ -196,8 +199,6 @@ Topics:
File: preparing-to-install-on-azure
- Name: Configuring an Azure account
File: installing-azure-account
- - Name: Manually creating IAM
- File: manually-creating-iam-azure
- Name: Enabling user-managed encryption on Azure
File: enabling-user-managed-encryption-azure
- Name: Installing a cluster quickly on Azure
@@ -212,12 +213,18 @@ Topics:
File: installing-azure-private
- Name: Installing a cluster on Azure into a government region
File: installing-azure-government-region
+ - Name: Installing a cluster on Azure in a restricted network with user-provisioned infrastructure
+ File: installing-restricted-networks-azure-user-provisioned
- Name: Installing a cluster on Azure using ARM templates
File: installing-azure-user-infra
+ - Name: Installing a cluster on Azure in a restricted network
+ File: installing-restricted-networks-azure-installer-provisioned
- Name: Installing a three-node cluster on Azure
File: installing-azure-three-node
- Name: Uninstalling a cluster on Azure
File: uninstalling-cluster-azure
+ - Name: Installation configuration parameters for Azure
+ File: installation-config-parameters-azure
- Name: Installing on Azure Stack Hub
Dir: installing_azure_stack_hub
Distros: openshift-origin,openshift-enterprise
@@ -232,6 +239,8 @@ Topics:
File: installing-azure-stack-hub-network-customizations
- Name: Installing a cluster on Azure Stack Hub using ARM templates
File: installing-azure-stack-hub-user-infra
+ - Name: Installation configuration parameters for Azure Stack Hub
+ File: installation-config-parameters-ash
- Name: Uninstalling a cluster on Azure Stack Hub
File: uninstalling-cluster-azure-stack-hub
- Name: Installing on GCP
@@ -242,8 +251,6 @@ Topics:
File: preparing-to-install-on-gcp
- Name: Configuring a GCP project
File: installing-gcp-account
- - Name: Manually creating IAM
- File: manually-creating-iam-gcp
- Name: Installing a cluster quickly on GCP
File: installing-gcp-default
- Name: Installing a cluster on GCP with customizations
@@ -266,27 +273,35 @@ Topics:
File: installing-restricted-networks-gcp
- Name: Installing a three-node cluster on GCP
File: installing-gcp-three-node
+ - Name: Installation configuration parameters for GCP
+ File: installation-config-parameters-gcp
- Name: Uninstalling a cluster on GCP
File: uninstalling-cluster-gcp
-- Name: Installing on IBM Cloud VPC
+- Name: Installing on IBM Cloud
Dir: installing_ibm_cloud_public
Distros: openshift-origin,openshift-enterprise
Topics:
- - Name: Preparing to install on IBM Cloud VPC
+ - Name: Preparing to install on IBM Cloud
File: preparing-to-install-on-ibm-cloud
- Name: Configuring an IBM Cloud account
File: installing-ibm-cloud-account
- - Name: Configuring IAM for IBM Cloud VPC
+ - Name: Configuring IAM for IBM Cloud
File: configuring-iam-ibm-cloud
- - Name: Installing a cluster on IBM Cloud VPC with customizations
+ - Name: User-managed encryption
+ File: user-managed-encryption-ibm-cloud
+ - Name: Installing a cluster on IBM Cloud with customizations
File: installing-ibm-cloud-customizations
- - Name: Installing a cluster on IBM Cloud VPC with network customizations
+ - Name: Installing a cluster on IBM Cloud with network customizations
File: installing-ibm-cloud-network-customizations
- - Name: Installing a cluster on IBM Cloud VPC into an existing VPC
+ - Name: Installing a cluster on IBM Cloud into an existing VPC
File: installing-ibm-cloud-vpc
- - Name: Installing a private cluster on IBM Cloud VPC
+ - Name: Installing a private cluster on IBM Cloud
File: installing-ibm-cloud-private
- - Name: Uninstalling a cluster on IBM Cloud VPC
+ - Name: Installing a cluster on IBM Cloud in a restricted network
+ File: installing-ibm-cloud-restricted
+ - Name: Installation configuration parameters for IBM Cloud
+ File: installation-config-parameters-ibm-cloud-vpc
+ - Name: Uninstalling a cluster on IBM Cloud
File: uninstalling-cluster-ibm-cloud
- Name: Installing on Nutanix
Dir: installing_nutanix
@@ -294,6 +309,8 @@ Topics:
Topics:
- Name: Preparing to install on Nutanix
File: preparing-to-install-on-nutanix
+ - Name: Fault tolerant deployments
+ File: nutanix-failure-domains
- Name: Installing a cluster on Nutanix
File: installing-nutanix-installer-provisioned
- Name: Installing a cluster on Nutanix in a restricted network
@@ -302,6 +319,8 @@ Topics:
File: installing-nutanix-three-node
- Name: Uninstalling a cluster on Nutanix
File: uninstalling-cluster-nutanix
+ - Name: Installation configuration parameters for Nutanix
+ File: installation-config-parameters-nutanix
- Name: Installing on bare metal
Dir: installing_bare_metal
Distros: openshift-origin,openshift-enterprise
@@ -316,6 +335,8 @@ Topics:
File: installing-restricted-networks-bare-metal
- Name: Scaling a user-provisioned installation with the bare metal operator
File: scaling-a-user-provisioned-cluster-with-the-bare-metal-operator
+ - Name: Installation configuration parameters for bare metal
+ File: installation-config-parameters-bare-metal
- Name: Installing on-premise with Assisted Installer
Dir: installing_on_prem_assisted
Distros: openshift-enterprise
@@ -332,8 +353,12 @@ Topics:
File: understanding-disconnected-installation-mirroring
- Name: Installing a cluster with Agent-based Installer
File: installing-with-agent-based-installer
+ - Name: Preparing PXE assets for OCP
+ File: prepare-pxe-assets-agent
- Name: Preparing an Agent-based installed cluster for the multicluster engine for Kubernetes
File: preparing-an-agent-based-installed-cluster-for-mce
+ - Name: Installation configuration parameters for the Agent-based Installer
+ File: installation-config-parameters-agent
- Name: Installing on a single node
Dir: installing_sno
Distros: openshift-enterprise
@@ -352,13 +377,13 @@ Topics:
File: ipi-install-prerequisites
- Name: Setting up the environment for an OpenShift installation
File: ipi-install-installation-workflow
- - Name: Post-installation configuration
+ - Name: Postinstallation configuration
File: ipi-install-post-installation-configuration
- Name: Expanding the cluster
File: ipi-install-expanding-the-cluster
- Name: Troubleshooting
File: ipi-install-troubleshooting
-- Name: Installing bare metal clusters on IBM Cloud
+- Name: Installing IBM Cloud Bare Metal (Classic)
Dir: installing_ibm_cloud
Distros: openshift-origin,openshift-enterprise
Topics:
@@ -366,26 +391,22 @@ Topics:
File: install-ibm-cloud-prerequisites
- Name: Installation workflow
File: install-ibm-cloud-installation-workflow
-- Name: Installing with z/VM on IBM Z and IBM LinuxONE
+- Name: Installing on IBM Z and IBM LinuxONE
Dir: installing_ibm_z
Distros: openshift-enterprise
Topics:
- - Name: Preparing to install with z/VM on IBM Z and IBM LinuxONE
+ - Name: Preparing to install on IBM Z and IBM LinuxONE
File: preparing-to-install-on-ibm-z
- Name: Installing a cluster with z/VM on IBM Z and IBM LinuxONE
File: installing-ibm-z
- Name: Restricted network IBM Z installation with z/VM
File: installing-restricted-networks-ibm-z
-- Name: Installing with RHEL KVM on IBM Z and IBM LinuxONE
- Dir: installing_ibm_z
- Distros: openshift-enterprise
- Topics:
- - Name: Preparing to install with RHEL KVM on IBM Z and IBM LinuxONE
- File: preparing-to-install-on-ibm-z-kvm
- Name: Installing a cluster with RHEL KVM on IBM Z and IBM LinuxONE
File: installing-ibm-z-kvm
- Name: Restricted network IBM Z installation with RHEL KVM
File: installing-restricted-networks-ibm-z-kvm
+ - Name: Installation configuration parameters for IBM Z and IBM LinuxONE
+ File: installation-config-parameters-ibm-z
- Name: Installing on IBM Power
Dir: installing_ibm_power
Distros: openshift-enterprise
@@ -396,6 +417,8 @@ Topics:
File: installing-ibm-power
- Name: Restricted network IBM Power installation
File: installing-restricted-networks-ibm-power
+ - Name: Installation configuration parameters for IBM Power
+ File: installation-config-parameters-ibm-power
- Name: Installing on IBM Power Virtual Server
Dir: installing_ibm_powervs
Distros: openshift-enterprise
@@ -416,6 +439,8 @@ Topics:
File: installing-restricted-networks-ibm-power-vs
- Name: Uninstalling a cluster on IBM Power Virtual Server
File: uninstalling-cluster-ibm-power-vs
+ - Name: Installation configuration parameters for IBM Power Virtual Server
+ File: installation-config-parameters-ibm-power-vs
- Name: Installing on OpenStack
Dir: installing_openstack
Distros: openshift-origin,openshift-enterprise
@@ -428,12 +453,8 @@ Topics:
# File: installing-openstack-installer
- Name: Installing a cluster on OpenStack with customizations
File: installing-openstack-installer-custom
- - Name: Installing a cluster on OpenStack with Kuryr
- File: installing-openstack-installer-kuryr
- Name: Installing a cluster on OpenStack on your own infrastructure
File: installing-openstack-user
- - Name: Installing a cluster on OpenStack with Kuryr on your own infrastructure
- File: installing-openstack-user-kuryr
- Name: Installing a cluster on OpenStack in a restricted network
File: installing-openstack-installer-restricted
- Name: OpenStack Cloud Controller Manager reference guide
@@ -444,67 +465,57 @@ Topics:
File: uninstalling-cluster-openstack
- Name: Uninstalling a cluster on OpenStack from your own infrastructure
File: uninstalling-openstack-user
-- Name: Installing on RHV
- Dir: installing_rhv
- Distros: openshift-enterprise
- Topics:
- - Name: Preparing to install on RHV
- File: preparing-to-install-on-rhv
- - Name: Installing a cluster quickly on RHV
- File: installing-rhv-default
- - Name: Installing a cluster on RHV with customizations
- File: installing-rhv-customizations
- - Name: Installing a cluster on RHV with user-provisioned infrastructure
- File: installing-rhv-user-infra
- - Name: Installing a cluster on RHV in a restricted network
- File: installing-rhv-restricted-network
- - Name: Uninstalling a cluster on RHV
- File: uninstalling-cluster-rhv
-- Name: Installing on oVirt
- Dir: installing_rhv
- Distros: openshift-origin
- Topics:
- - Name: Preparing to install on RHV
- File: preparing-to-install-on-rhv
- - Name: Installing a cluster quickly on oVirt
- File: installing-rhv-default
- - Name: Installing a cluster on oVirt with customizations
- File: installing-rhv-customizations
- - Name: Installing a cluster on oVirt with user-provisioned infrastructure
- File: installing-rhv-user-infra
- - Name: Installing a cluster on RHV in a restricted network
- File: installing-rhv-restricted-network
- - Name: Uninstalling a cluster on oVirt
- File: uninstalling-cluster-rhv
+ - Name: Installation configuration parameters for OpenStack
+ File: installation-config-parameters-openstack
- Name: Installing on vSphere
Dir: installing_vsphere
Distros: openshift-origin,openshift-enterprise
Topics:
- - Name: Preparing to install on vSphere
+ - Name: Installation methods
File: preparing-to-install-on-vsphere
- - Name: Installing a cluster on vSphere
- File: installing-vsphere-installer-provisioned
- - Name: Installing a cluster on vSphere with customizations
- File: installing-vsphere-installer-provisioned-customizations
- - Name: Installing a cluster on vSphere with network customizations
- File: installing-vsphere-installer-provisioned-network-customizations
- - Name: Installing a cluster on vSphere with user-provisioned infrastructure
- File: installing-vsphere
- - Name: Installing a cluster on vSphere with user-provisioned infrastructure and network customizations
- File: installing-vsphere-network-customizations
- - Name: Installing a cluster on vSphere in a restricted network
- File: installing-restricted-networks-installer-provisioned-vsphere
- - Name: Installing a cluster on vSphere in a restricted network with user-provisioned infrastructure
- File: installing-restricted-networks-vsphere
- - Name: Installing a three-node cluster on vSphere
+ - Name: Installer-provisioned infrastructure
+ Dir: ipi
+ Distros: openshift-origin,openshift-enterprise
+ Topics:
+ - Name: vSphere installation requirements
+ File: ipi-vsphere-installation-reqs
+ - Name: Preparing to install a cluster
+ File: ipi-vsphere-preparing-to-install
+ - Name: Installing a cluster
+ File: installing-vsphere-installer-provisioned
+ - Name: Installing a cluster with customizations
+ File: installing-vsphere-installer-provisioned-customizations
+ - Name: Installing a cluster with network customizations
+ File: installing-vsphere-installer-provisioned-network-customizations
+ - Name: Installing a cluster in a restricted network
+ File: installing-restricted-networks-installer-provisioned-vsphere
+ - Name: User-provisioned infrastructure
+ Dir: upi
+ Distros: openshift-origin,openshift-enterprise
+ Topics:
+ - Name: vSphere installation requirements
+ File: upi-vsphere-installation-reqs
+ - Name: Preparing to install a cluster
+ File: upi-vsphere-preparing-to-install
+ - Name: Installing a cluster
+ File: installing-vsphere
+ - Name: Installing a cluster with network customizations
+ File: installing-vsphere-network-customizations
+ - Name: Installing a cluster in a restricted network
+ File: installing-restricted-networks-vsphere
+ - Name: Assisted Installer
+ Distros: openshift-enterprise
+ File: installing-vsphere-assisted-installer
+ - Name: Agent-based Installer
+ Distros: openshift-enterprise
+ File: installing-vsphere-agent-based-installer
+ - Name: Installing a three-node cluster
File: installing-vsphere-three-node
- - Name: Configuring the vSphere connection settings after an installation
- File: installing-vsphere-post-installation-configuration
- - Name: Uninstalling a cluster on vSphere that uses installer-provisioned infrastructure
+ - Name: Uninstalling a cluster
File: uninstalling-cluster-vsphere-installer-provisioned
- Name: Using the vSphere Problem Detector Operator
File: using-vsphere-problem-detector-operator
- - Name: Installation configuration parameters for vSphere
+ - Name: Installation configuration parameters
File: installation-config-parameters-vsphere
- Name: Installing on any platform
Dir: installing_platform_agnostic
@@ -520,8 +531,8 @@ Topics:
File: installing-customizing
- Name: Configuring your firewall
File: configuring-firewall
- - Name: Enabling Linux control group version 2 (cgroup v2)
- File: enabling-cgroup-v2
+ - Name: Enabling Linux control group version 1 (cgroup v1)
+ File: enabling-cgroup-v1
Distros: openshift-enterprise
- Name: Validating an installation
File: validating-an-installation
@@ -529,12 +540,15 @@ Topics:
- Name: Troubleshooting installation issues
File: installing-troubleshooting
Distros: openshift-origin,openshift-enterprise
+- Name: Support for FIPS cryptography
+ File: installing-fips
+ Distros: openshift-enterprise,openshift-online
---
-Name: Post-installation configuration
+Name: Postinstallation configuration
Dir: post_installation_configuration
Distros: openshift-origin,openshift-enterprise
Topics:
-- Name: Post-installation configuration overview
+- Name: Postinstallation configuration overview
Distros: openshift-enterprise
File: index
- Name: Configuring a private cluster
@@ -543,10 +557,31 @@ Topics:
- Name: Bare metal configuration
File: bare-metal-configuration
- Name: Configuring multi-architecture compute machines on an OpenShift cluster
+ Dir: configuring-multi-arch-compute-machines
Distros: openshift-enterprise
- File: multi-architecture-configuration
+ Topics:
+ - Name: About clusters with multi-architecture compute machines
+ File: multi-architecture-configuration
+ - Name: Creating a cluster with multi-architecture compute machines on Azure
+ File: creating-multi-arch-compute-nodes-azure
+ - Name: Creating a cluster with multi-architecture compute machines on AWS
+ File: creating-multi-arch-compute-nodes-aws
+ - Name: Creating a cluster with multi-architecture compute machines on GCP
+ File: creating-multi-arch-compute-nodes-gcp
+ - Name: Creating a cluster with multi-architecture compute machines on bare metal
+ File: creating-multi-arch-compute-nodes-bare-metal
+ - Name: Creating a cluster with multi-architecture compute machines on IBM Z and IBM LinuxONE with z/VM
+ File: creating-multi-arch-compute-nodes-ibm-z
+ - Name: Creating a cluster with multi-architecture compute machines on IBM Z and IBM LinuxONE with RHEL KVM
+ File: creating-multi-arch-compute-nodes-ibm-z-kvm
+ - Name: Creating a cluster with multi-architecture compute machines on IBM Power
+ File: creating-multi-arch-compute-nodes-ibm-power
+ - Name: Managing your cluster with multi-architecture compute machines
+ File: multi-architecture-compute-managing
- Name: Enabling encryption on a vSphere cluster
File: vsphere-post-installation-encryption
+- Name: Configuring the vSphere connection settings after an installation
+ File: installing-vsphere-post-installation-configuration
- Name: Machine configuration tasks
File: machine-configuration-tasks
- Name: Cluster tasks
@@ -576,6 +611,12 @@ Topics:
- Name: Fedora CoreOS (FCOS) image layering
File: coreos-layering
Distros: openshift-origin
+- Name: AWS Local Zone tasks
+ File: aws-compute-edge-tasks
+ Distros: openshift-enterprise
+- Name: Adding failure domains to an existing Nutanix cluster
+ File: adding-nutanix-failure-domains
+ Distros: openshift-origin,openshift-enterprise
---
Name: Updating clusters
Dir: updating
@@ -583,25 +624,33 @@ Distros: openshift-origin,openshift-enterprise
Topics:
- Name: Updating clusters overview
File: index
+ Distros: openshift-origin
- Name: Understanding OpenShift updates
Dir: understanding_updates
- Distros: openshift-enterprise
Topics:
- - Name: Introduction to OpenShift updates
- File: intro-to-updates
- - Name: How cluster updates work
- File: how-updates-work
-- Name: Understanding update channels and releases
- File: understanding-upgrade-channels-release
- Distros: openshift-enterprise
-- Name: Understanding OpenShift update duration
- File: understanding-openshift-update-duration
-- Name: Preparing to update to OpenShift Container Platform 4.13
- File: updating-cluster-prepare
- Distros: openshift-enterprise
-- Name: Preparing to update to OKD 4.13
- File: updating-cluster-prepare
- Distros: openshift-origin
+ - Name: Introduction to OpenShift updates
+ File: intro-to-updates
+ - Name: How cluster updates work
+ File: how-updates-work
+ Distros: openshift-enterprise
+ - Name: Understanding update channels and releases
+ File: understanding-update-channels-release
+ Distros: openshift-enterprise
+ - Name: Understanding OpenShift update duration
+ File: understanding-openshift-update-duration
+- Name: Preparing to update a cluster
+ Dir: preparing_for_updates
+ Topics:
+ - Name: Preparing to update to OpenShift Container Platform 4.15
+ File: updating-cluster-prepare
+ Distros: openshift-enterprise
+ - Name: Preparing to update to OKD 4.15
+ File: updating-cluster-prepare
+ Distros: openshift-origin
+ - Name: Preparing to update a cluster with manually maintained credentials
+ File: preparing-manual-creds-update
+ - Name: Preflight validation for Kernel Module Management (KMM) Modules
+ File: kmm-preflight-validation
- Name: Performing a cluster update
Dir: updating_a_cluster
Topics:
@@ -642,12 +691,14 @@ Topics:
File: migrating-to-multi-payload
- Name: Updating hosted control planes
File: updating-hosted-control-planes
-- Name: Preparing to update a cluster with manually maintained credentials
- File: preparing-manual-creds-update
-- Name: Preflight validation for Kernel Module Management (KMM) Modules
- File: kmm-preflight-validation
-# - Name: Troubleshooting an update
-# File: updating-troubleshooting
+- Name: Troubleshooting a cluster update
+ Dir: troubleshooting_updates
+ Distros: openshift-enterprise,openshift-origin
+ Topics:
+ #- Name: Recovering when an update fails before it is applied
+ # File: recovering-update-before-applied
+ - Name: Gathering data about your cluster update
+ File: gathering-data-cluster-update
---
Name: Support
Dir: support
@@ -674,7 +725,7 @@ Topics:
File: enabling-remote-health-reporting
- Name: Using Insights to identify issues with your cluster
File: using-insights-to-identify-issues-with-your-cluster
- - Name: Using Insights Operator
+ - Name: Using the Insights Operator
File: using-insights-operator
- Name: Using remote health reporting in a restricted network
File: remote-health-reporting-from-restricted-network
@@ -725,7 +776,7 @@ Topics:
File: web-console-overview
- Name: Accessing the web console
File: web-console
-- Name: Viewing cluster information
+- Name: Using the OpenShift Container Platform dashboard to get cluster information
File: using-dashboard-to-get-cluster-information
- Name: Adding user preferences
File: adding-user-preferences
@@ -927,37 +978,47 @@ Topics:
- Name: Compliance Operator
Dir: compliance_operator
Topics:
+ - Name: Compliance Operator overview
+ File: co-overview
- Name: Compliance Operator release notes
File: compliance-operator-release-notes
- - Name: Supported compliance profiles
- File: compliance-operator-supported-profiles
- - Name: Installing the Compliance Operator
- File: compliance-operator-installation
- - Name: Updating the Compliance Operator
- File: compliance-operator-updating
- - Name: Compliance Operator scans
- File: compliance-scans
- - Name: Understanding the Compliance Operator
- File: compliance-operator-understanding
- - Name: Managing the Compliance Operator
- File: compliance-operator-manage
- - Name: Tailoring the Compliance Operator
- File: compliance-operator-tailor
- - Name: Retrieving Compliance Operator raw results
- File: compliance-operator-raw-results
- - Name: Managing Compliance Operator remediation
- File: compliance-operator-remediation
- - Name: Performing advanced Compliance Operator tasks
- File: compliance-operator-advanced
- - Name: Troubleshooting the Compliance Operator
- File: compliance-operator-troubleshooting
- - Name: Uninstalling the Compliance Operator
- File: compliance-operator-uninstallation
- - Name: Using the oc-compliance plugin
- File: oc-compliance-plug-in-using
- - Name: Understanding the Custom Resource Definitions
- File: compliance-operator-crd
-
+ - Name: Compliance Operator concepts
+ Dir: co-concepts
+ Topics:
+ - Name: Understanding the Compliance Operator
+ File: compliance-operator-understanding
+ - Name: Understanding the Custom Resource Definitions
+ File: compliance-operator-crd
+ - Name: Compliance Operator management
+ Dir: co-management
+ Topics:
+ - Name: Installing the Compliance Operator
+ File: compliance-operator-installation
+ - Name: Updating the Compliance Operator
+ File: compliance-operator-updating
+ - Name: Managing the Compliance Operator
+ File: compliance-operator-manage
+ - Name: Uninstalling the Compliance Operator
+ File: compliance-operator-uninstallation
+ - Name: Compliance Operator scan management
+ Dir: co-scans
+ Topics:
+ - Name: Supported compliance profiles
+ File: compliance-operator-supported-profiles
+ - Name: Compliance Operator scans
+ File: compliance-scans
+ - Name: Tailoring the Compliance Operator
+ File: compliance-operator-tailor
+ - Name: Retrieving Compliance Operator raw results
+ File: compliance-operator-raw-results
+ - Name: Managing Compliance Operator remediation
+ File: compliance-operator-remediation
+ - Name: Performing advanced Compliance Operator tasks
+ File: compliance-operator-advanced
+ - Name: Troubleshooting the Compliance Operator
+ File: compliance-operator-troubleshooting
+ - Name: Using the oc-compliance plugin
+ File: oc-compliance-plug-in-using
- Name: File Integrity Operator
Dir: file_integrity_operator
Topics:
@@ -996,6 +1057,22 @@ Topics:
File: spo-troubleshooting
- Name: Uninstalling the Security Profiles Operator
File: spo-uninstalling
+- Name: NBDE Tang Server Operator
+ Dir: nbde_tang_server_operator
+ Distros: openshift-enterprise
+ Topics:
+ - Name: NBDE Tang Server Operator overview
+ File: nbde-tang-server-operator-overview
+ - Name: NBDE Tang Server Operator release notes
+ File: nbde-tang-server-operator-release-notes
+ - Name: Understanding the NBDE Tang Server Operator
+ File: nbde-tang-server-operator-understanding
+ - Name: Installing the NBDE Tang Server Operator
+ File: nbde-tang-server-operator-installing
+ - Name: Configuring and managing Tang servers using the NBDE Tang Server Operator
+ File: nbde-tang-server-operator-configuring-managing
+ - Name: Identifying URL of a Tang server deployed with the NBDE Tang Server Operator
+ File: nbde-tang-server-operator-identifying-url
- Name: cert-manager Operator for Red Hat OpenShift
Dir: cert_manager_operator
Distros: openshift-enterprise
@@ -1006,8 +1083,10 @@ Topics:
File: cert-manager-operator-release-notes
- Name: Installing the cert-manager Operator for Red Hat OpenShift
File: cert-manager-operator-install
- - Name: Managing certificates with an ACME issuer
+ - Name: Configuring an ACME issuer
File: cert-manager-operator-issuer-acme
+ - Name: Configuring certificates with an issuer
+ File: cert-manager-creating-certificate
- Name: Enabling monitoring for the cert-manager Operator for Red Hat OpenShift
File: cert-manager-monitoring
- Name: Configuring the egress proxy for the cert-manager Operator for Red Hat OpenShift
@@ -1135,16 +1214,14 @@ Topics:
Topics:
- Name: About the Cloud Credential Operator
File: about-cloud-credential-operator
- - Name: Using mint mode
+ - Name: Mint mode
File: cco-mode-mint
- - Name: Using passthrough mode
+ - Name: Passthrough mode
File: cco-mode-passthrough
- - Name: Using manual mode
+ - Name: Manual mode with long-term credentials for components
File: cco-mode-manual
- - Name: Using manual mode with AWS Security Token Service
- File: cco-mode-sts
- - Name: Using manual mode with GCP Workload Identity
- File: cco-mode-gcp-workload-identity
+ - Name: Manual mode with short-term credentials for components
+ File: cco-short-term-creds
---
Name: Networking
Dir: networking
@@ -1158,6 +1235,8 @@ Topics:
File: accessing-hosts
- Name: Networking Operators overview
File: networking-operators-overview
+- Name: Networking dashboards
+ File: networking-dashboards
- Name: Understanding the Cluster Network Operator
File: cluster-network-operator
Distros: openshift-enterprise,openshift-origin
@@ -1188,18 +1267,27 @@ Topics:
File: configuring-cluster-network-range
- Name: Configuring IP failover
File: configuring-ipfailover
-- Name: Configuring interface-level network sysctls
- File: setting-interface-level-network-sysctls
+- Name: Configuring system controls and interface attributes using the tuning plugin
+ File: configure-syscontrols-interface-tuning-cni
- Name: Using SCTP
File: using-sctp
Distros: openshift-enterprise,openshift-origin
- Name: Using PTP hardware
- File: using-ptp
-- Name: Developing PTP events consumer applications
- File: ptp-cloud-events-consumer-dev-reference
+ Dir: ptp
+ Topics:
+ - Name: About PTP in OpenShift clusters
+ File: about-ptp
+ - Name: Configuring PTP hardware
+ File: configuring-ptp
+ - Name: Using PTP events
+ File: using-ptp-events
+ - Name: Developing PTP events consumer applications
+ File: ptp-cloud-events-consumer-dev-reference
- Name: External DNS Operator
Dir: external_dns_operator
Topics:
+ - Name: External DNS Operator release notes
+ File: external-dns-operator-release-notes
- Name: Understanding the External DNS Operator
File: understanding-external-dns-operator
- Name: Installing the External DNS Operator
@@ -1233,6 +1321,8 @@ Topics:
File: default-network-policy
- Name: Configuring multitenant isolation with network policy
File: multitenant-network-policy
+- Name: CIDR range definitions
+ File: cidr-range-definitions
- Name: AWS Load Balancer Operator
Dir: aws_load_balancer_operator
Distros: openshift-enterprise,openshift-origin
@@ -1243,7 +1333,7 @@ Topics:
File: understanding-aws-load-balancer-operator
- Name: Installing the AWS Load Balancer Operator
File: install-aws-load-balancer-operator
- - Name: Installing the AWS Load Balancer Operator on Security Token Service cluster
+ - Name: Installing the AWS Load Balancer Operator on a Security Token Service cluster
File: installing-albo-sts-cluster
- Name: Creating an instance of the AWS Load Balancer Controller
File: create-instance-aws-load-balancer-controller
@@ -1293,7 +1383,7 @@ Topics:
File: configuring-sriov-ib-attach
- Name: Adding a pod to an SR-IOV network
File: add-pod
- - Name: Tuning sysctl settings on an SR-IOV network
+ - Name: Configuring interface-level network sysctl settings and all-multicast mode for SR-IOV networks
File: configuring-interface-sysctl-sriov-device
- Name: Using high performance multicast
File: using-sriov-multicast
@@ -1323,14 +1413,14 @@ Topics:
File: migrate-from-openshift-sdn
- Name: Rolling back to the OpenShift SDN network plugin
File: rollback-to-openshift-sdn
- - Name: Migrating from Kuryr
- File: migrate-from-kuryr-sdn
- Name: Converting to IPv4/IPv6 dual stack networking
File: converting-to-dual-stack
- Name: Logging for egress firewall and network policy rules
File: logging-network-policy
- Name: Configuring IPsec encryption
File: configuring-ipsec-ovn
+ - Name: Configure an external gateway through a secondary network interface
+ File: configuring-secondary-external-gateway
- Name: Configuring an egress firewall for a project
File: configuring-egress-firewall-ovn
- Name: Viewing an egress firewall for a project
@@ -1343,6 +1433,8 @@ Topics:
File: configuring-egress-ips-ovn
- Name: Assigning an egress IP address
File: assigning-egress-ips-ovn
+ - Name: Configuring an egress service
+ File: configuring-egress-traffic-for-vrf-loadbalancer-services
- Name: Considerations for the use of an egress router pod
File: using-an-egress-router-ovn
- Name: Deploying an egress router pod in redirect mode
@@ -1362,7 +1454,7 @@ Topics:
File: about-openshift-sdn
- Name: Migrating to the OpenShift SDN network plugin
File: migrate-to-openshift-sdn
- - Name: Rolling back to the OpenShift SDN network plugin
+ - Name: Rolling back to the OVN-Kubernetes network plugin
File: rollback-to-ovn-kubernetes
- Name: Configuring egress IPs for a project
File: assigning-egress-ips
@@ -1436,9 +1528,7 @@ Topics:
Topics:
- Name: About the Kubernetes NMState Operator
File: k8s-nmstate-about-the-k8s-nmstate-operator
- - Name: Observing node network state
- File: k8s-nmstate-observing-node-network-state
- - Name: Updating node network configuration
+ - Name: Observing and updating node network state and configuration
File: k8s-nmstate-updating-node-network-config
- Name: Troubleshooting node network configuration
File: k8s-nmstate-troubleshooting-node-network
@@ -1471,33 +1561,12 @@ Topics:
File: metallb-configure-bfd-profiles
- Name: Configuring services to use MetalLB
File: metallb-configure-services
+ - Name: Managing symmetric routing with MetalLB
+ File: metallb-configure-return-traffic
- Name: MetalLB logging, troubleshooting, and support
File: metallb-troubleshoot-support
- Name: Associating secondary interfaces metrics to network attachments
File: associating-secondary-interfaces-metrics-to-network-attachments
-- Name: Network Observability
- Dir: network_observability
- Topics:
- - Name: Network Observability release notes
- File: network-observability-operator-release-notes
- - Name: Network Observability overview
- File: network-observability-overview
- - Name: Installing the Network Observability Operator
- File: installing-operators
- - Name: Understanding Network Observability Operator
- File: understanding-network-observability-operator
- - Name: Configuring the Network Observability Operator
- File: configuring-operator
- - Name: Observing the network traffic
- File: observing-network-traffic
- - Name: Monitoring the Network Observability Operator
- File: network-observability-operator-monitoring
- - Name: API reference
- File: flowcollector-api
- - Name: JSON flows format reference
- File: json-flows-format-reference
- - Name: Troubleshooting Network Observability
- File: troubleshooting-network-observability
---
Name: Storage
Dir: storage
@@ -1548,6 +1617,8 @@ Topics:
File: persistent-storage-hostpath
- Name: Persistent storage using LVM Storage
File: persistent-storage-using-lvms
+ - Name: Troubleshooting local persistent storage using LVMS
+ File: troubleshooting-local-persistent-storage-using-lvms
- Name: Using Container Storage Interface (CSI)
Dir: container_storage_interface
Distros: openshift-enterprise,openshift-origin
@@ -1592,8 +1663,8 @@ Topics:
File: persistent-storage-csi-cinder
- Name: OpenStack Manila CSI Driver Operator
File: persistent-storage-csi-manila
- - Name: Red Hat Virtualization CSI Driver Operator
- File: persistent-storage-csi-ovirt
+ - Name: Secrets Store CSI Driver Operator
+ File: persistent-storage-csi-secrets-store
- Name: VMware vSphere CSI Driver Operator
File: persistent-storage-csi-vsphere
- Name: Generic ephemeral volumes
@@ -1639,6 +1710,9 @@ Topics:
- Name: Configuring the registry for OpenShift Data Foundation
File: configuring-registry-storage-rhodf
Distros: openshift-enterprise,openshift-origin
+ - Name: Configuring the registry for Nutanix
+ File: configuring-registry-storage-nutanix
+ Distros: openshift-enterprise,openshift-origin
- Name: Accessing the registry
File: accessing-the-registry
- Name: Exposing the registry
@@ -1748,6 +1822,9 @@ Topics:
- Name: Managing platform Operators
File: olm-managing-po
Distros: openshift-enterprise,openshift-origin
+ - Name: Troubleshooting Operator issues
+ File: olm-troubleshooting-operator-issues
+ Distros: openshift-enterprise,openshift-origin
- Name: Developing Operators
Dir: operator_sdk
Distros: openshift-origin,openshift-enterprise
@@ -1820,6 +1897,8 @@ Topics:
File: osdk-working-bundle-images
- Name: Complying with pod security admission
File: osdk-complying-with-psa
+ - Name: Token authentication for Operators on cloud providers
+ File: osdk-token-auth
- Name: Validating Operators using the scorecard
File: osdk-scorecard
- Name: Validating Operator bundles
@@ -1830,6 +1909,8 @@ Topics:
File: osdk-monitoring-prometheus
- Name: Configuring leader election
File: osdk-leader-election
+ - Name: Configuring support for multiple platforms
+ File: osdk-multi-arch-support
- Name: Object pruning utility
File: osdk-pruning-utility
- Name: Migrating package manifest projects to bundle format
@@ -1841,6 +1922,29 @@ Topics:
Distros: openshift-origin
- Name: Cluster Operators reference
File: operator-reference
+- Name: OLM 1.0 (Technology Preview)
+ Dir: olm_v1
+ Distros: openshift-origin,openshift-enterprise
+ Topics:
+ - Name: About OLM 1.0
+ File: index
+ - Name: Components and architecture
+ Dir: arch
+ Topics:
+ - Name: Components overview
+ File: olmv1-components
+ - Name: Operator Controller
+ File: olmv1-operator-controller
+ - Name: RukPak
+ File: olmv1-rukpak
+ - Name: Dependency resolution
+ File: olmv1-dependency
+ - Name: Catalogd
+ File: olmv1-catalogd
+ - Name: Installing an Operator from a catalog
+ File: olmv1-installing-an-operator-from-a-catalog
+ - Name: Managing plain bundles
+ File: olmv1-managing-plain-bundles
---
Name: CI/CD
Dir: cicd
@@ -1848,7 +1952,13 @@ Distros: openshift-enterprise,openshift-origin,openshift-online
Topics:
- Name: CI/CD overview
File: index
-- Name: Builds
+- Name: Builds using Shipwright
+ Dir: builds_using_shipwright
+ Distros: openshift-enterprise
+ Topics:
+ - Name: Overview of Builds
+ File: overview-openshift-builds
+- Name: Builds using BuildConfig
Dir: builds
Distros: openshift-enterprise,openshift-origin,openshift-online
Topics:
@@ -1893,84 +2003,14 @@ Topics:
Dir: pipelines
Distros: openshift-enterprise
Topics:
- - Name: OpenShift Pipelines release notes
- File: op-release-notes
- - Name: Understanding OpenShift Pipelines
- File: understanding-openshift-pipelines
- - Name: Installing OpenShift Pipelines
- File: installing-pipelines
- - Name: Uninstalling OpenShift Pipelines
- File: uninstalling-pipelines
- - Name: Creating CI/CD solutions for applications using OpenShift Pipelines
- File: creating-applications-with-cicd-pipelines
- - Name: Managing non-versioned and versioned cluster tasks
- File: managing-nonversioned-and-versioned-cluster-tasks
- - Name: Using Tekton Hub with OpenShift Pipelines
- File: using-tekton-hub-with-openshift-pipelines
- - Name: Specifying remote pipelines and tasks using resolvers
- File: remote-pipelines-tasks-resolvers
- - Name: Using Pipelines as Code
- File: using-pipelines-as-code
- - Name: Working with OpenShift Pipelines using the Developer perspective
- File: working-with-pipelines-using-the-developer-perspective
- - Name: Customizing configurations in the TektonConfig custom resource
- File: customizing-configurations-in-the-tektonconfig-cr
- - Name: Reducing resource consumption of OpenShift Pipelines
- File: reducing-pipelines-resource-consumption
- - Name: Setting compute resource quota for OpenShift Pipelines
- File: setting-compute-resource-quota-for-openshift-pipelines
- - Name: Using pods in a privileged security context
- File: using-pods-in-a-privileged-security-context
- - Name: Securing webhooks with event listeners
- File: securing-webhooks-with-event-listeners
- - Name: Authenticating pipelines using git secret
- File: authenticating-pipelines-using-git-secret
- - Name: Using Tekton Chains for OpenShift Pipelines supply chain security
- File: using-tekton-chains-for-openshift-pipelines-supply-chain-security
- - Name: Viewing pipeline logs using the OpenShift Logging Operator
- File: viewing-pipeline-logs-using-the-openshift-logging-operator
- - Name: Unprivileged building of container images using Buildah
- File: unprivileged-building-of-container-images-using-buildah
+ - Name: About OpenShift Pipelines
+ File: about-pipelines
- Name: GitOps
Dir: gitops
Distros: openshift-enterprise
Topics:
- - Name: OpenShift GitOps release notes
- File: gitops-release-notes
- - Name: Understanding OpenShift GitOps
- File: understanding-openshift-gitops
- - Name: Installing OpenShift GitOps
- File: installing-openshift-gitops
- - Name: Uninstalling OpenShift GitOps
- File: uninstalling-openshift-gitops
- - Name: Setting up a new Argo CD instance
- File: setting-up-argocd-instance
- - Name: Configuring an OpenShift cluster by deploying an application with cluster configurations
- File: configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations
- - Name: Deploying a Spring Boot application with Argo CD
- File: deploying-a-spring-boot-application-with-argo-cd
- - Name: Argo CD custom resource properties
- File: argo-cd-custom-resource-properties
- - Name: Monitoring application health status
- File: health-information-for-resources-deployment
- - Name: Configuring SSO for Argo CD using Dex
- File: configuring-sso-on-argo-cd-using-dex
- - Name: Configuring SSO for Argo CD using Keycloak
- File: configuring-sso-for-argo-cd-using-keycloak
- - Name: Configuring Argo CD RBAC
- File: configuring-argo-cd-rbac
- - Name: Configuring Resource Quota
- File: configuring-resource-quota
- - Name: Monitoring Argo CD custom resource workloads
- File: monitoring-argo-cd-custom-resource-workloads
- - Name: Running Control Plane Workloads on Infra nodes
- File: run-gitops-control-plane-workload-on-infra-nodes
- - Name: Sizing requirements for GitOps Operator
- File: about-sizing-requirements-gitops
- - Name: Collecting debugging data for a support case
- File: collecting-debugging-data-for-support
- - Name: Troubleshooting issues in GitOps
- File: troubleshooting-issues-in-GitOps
+ - Name: About OpenShift GitOps
+ File: about-redhat-openshift-gitops
- Name: Jenkins
Dir: jenkins
Distros: openshift-enterprise
@@ -2101,7 +2141,7 @@ Topics:
- Name: Deployments
Dir: deployments
Topics:
- - Name: Understanding Deployments and DeploymentConfigs
+ - Name: Understanding deployments
File: what-deployments-are
- Name: Managing deployment processes
File: managing-deployment-processes
@@ -2141,6 +2181,16 @@ Topics:
File: red-hat-marketplace
Distros: openshift-origin,openshift-enterprise
---
+Name: Serverless
+Dir: serverless
+Distros: openshift-enterprise
+Topics:
+- Name: About Serverless
+ Dir: about
+ Topics:
+ - Name: Serverless overview
+ File: about-serverless
+---
Name: Machine management
Dir: machine_management
Distros: openshift-origin,openshift-enterprise
@@ -2169,12 +2219,6 @@ Topics:
File: creating-machineset-nutanix
- Name: Creating a compute machine set on OpenStack
File: creating-machineset-osp
- - Name: Creating a compute machine set on RHV
- File: creating-machineset-rhv
- Distros: openshift-enterprise
- - Name: Creating a compute machine set on oVirt
- File: creating-machineset-rhv
- Distros: openshift-origin
- Name: Creating a compute machine set on vSphere
File: creating-machineset-vsphere
- Name: Creating a compute machine set on bare metal
@@ -2183,6 +2227,8 @@ Topics:
File: manually-scaling-machineset
- Name: Modifying a compute machine set
File: modifying-machineset
+- Name: Machine phases and lifecycle
+ File: machine-phases-lifecycle
- Name: Deleting a machine
File: deleting-machine
- Name: Applying autoscaling to a cluster
@@ -2204,8 +2250,6 @@ Topics:
File: adding-aws-compute-user-infra
- Name: Adding compute machines to vSphere manually
File: adding-vsphere-compute-user-infra
- - Name: Adding compute machines to a cluster on RHV
- File: adding-rhv-compute-user-infra
- Name: Adding compute machines to bare metal
File: adding-bare-metal-compute-user-infra
- Name: Managing machines with the Cluster API
@@ -2242,6 +2286,8 @@ Topics:
File: hcp-managing
- Name: Backup, restore, and disaster recovery for hosted control planes
File: hcp-backup-restore-dr
+- Name: Troubleshooting hosted control planes
+ File: hcp-troubleshooting
---
Name: Nodes
Dir: nodes
@@ -2263,8 +2309,10 @@ Topics:
File: nodes-pods-autoscaling
- Name: Automatically adjust pod resource levels with the vertical pod autoscaler
File: nodes-pods-vertical-autoscaler
- - Name: Providing sensitive data to pods
+ - Name: Providing sensitive data to pods by using secrets
File: nodes-pods-secrets
+ - Name: Providing sensitive data to pods by using an external secrets store
+ File: nodes-pods-secrets-store
- Name: Creating and using config maps
File: nodes-pods-configmaps
- Name: Using Device Manager to make devices available to nodes
@@ -2300,7 +2348,7 @@ Topics:
File: nodes-cma-autoscaling-custom-install
- Name: Understanding the custom metrics autoscaler triggers
File: nodes-cma-autoscaling-custom-trigger
- - Name: Understanding the custom metrics autoscaler trigger authentications
+ - Name: Understanding custom metrics autoscaler trigger authentications
File: nodes-cma-autoscaling-custom-trigger-auth
- Name: Pausing the custom metrics autoscaler
File: nodes-cma-autoscaling-custom-pausing
@@ -2340,8 +2388,18 @@ Topics:
# File: nodes-scheduler-node-projects
# - Name: Keeping your cluster balanced using the descheduler
# File: nodes-scheduler-descheduler
- - Name: Evicting pods using the descheduler
- File: nodes-descheduler
+ - Name: Descheduler
+ Dir: descheduler
+ Distros: openshift-enterprise
+ Topics:
+ - Name: Descheduler overview
+ File: index
+ - Name: Descheduler release notes
+ File: nodes-descheduler-release-notes
+ - Name: Evicting pods using the descheduler
+ File: nodes-descheduler-configuring
+ - Name: Uninstalling the descheduler
+ File: nodes-descheduler-uninstalling
- Name: Secondary scheduler
Dir: secondary_scheduler
Distros: openshift-enterprise
@@ -2372,8 +2430,9 @@ Topics:
File: nodes-nodes-working
- Name: Managing nodes
File: nodes-nodes-managing
- - Name: Managing graceful node shutdown
- File: nodes-nodes-graceful-shutdown
+# Hiding this assembly per @rphillips: "We are trying to enable the feature, but there are cases we are running into where networking does not get enabled at boot."
+# - Name: Managing graceful node shutdown
+# File: nodes-nodes-graceful-shutdown
- Name: Managing the maximum number of pods per node
File: nodes-nodes-managing-max-pods
- Name: Using the Node Tuning Operator
@@ -2388,7 +2447,7 @@ Topics:
File: nodes-nodes-resources-configuring
- Name: Allocating specific CPUs for nodes in a cluster
File: nodes-nodes-resources-cpus
- - Name: Configuring the TLS security profile for the kubelet
+ - Name: Enabling TLS security profiles for the kubelet
File: nodes-nodes-tls
Distros: openshift-enterprise,openshift-origin
# - Name: Monitoring for problems in your nodes
@@ -2434,13 +2493,9 @@ Topics:
Distros: openshift-enterprise,openshift-origin
- Name: Configuring your cluster to place pods on overcommited nodes
File: nodes-cluster-overcommit
- Distros: openshift-enterprise,openshift-origin
- - Name: Configuring the Linux cgroup version on your nodes
- File: nodes-cluster-cgroups-2
Distros: openshift-enterprise
- Name: Configuring the Linux cgroup version on your nodes
- File: nodes-cluster-cgroups-okd
- Distros: openshift-origin
+ File: nodes-cluster-cgroups-2
- Name: Enabling features using FeatureGates
File: nodes-cluster-enabling-features
Distros: openshift-enterprise,openshift-origin
@@ -2459,6 +2514,8 @@ Topics:
Topics:
- Name: Adding worker nodes to single-node OpenShift clusters
File: nodes-sno-worker-nodes
+- Name: Node metrics dashboard
+ File: nodes-dashboard-using
---
Name: Windows Container Support for OpenShift
Dir: windows_containers
@@ -2479,10 +2536,12 @@ Topics:
File: creating-windows-machineset-aws
- Name: Creating a Windows machine set on Azure
File: creating-windows-machineset-azure
- - Name: Creating a Windows machine set on vSphere
- File: creating-windows-machineset-vsphere
- Name: Creating a Windows machine set on GCP
File: creating-windows-machineset-gcp
+ - Name: Creating a Windows machine set on Nutanix
+ File: creating-windows-machineset-nutanix
+ - Name: Creating a Windows machine set on vSphere
+ File: creating-windows-machineset-vsphere
- Name: Scheduling Windows container workloads
File: scheduling-windows-workloads
- Name: Windows node upgrades
@@ -2506,125 +2565,112 @@ Dir: logging
Distros: openshift-enterprise,openshift-origin
Topics:
- Name: Release notes
- File: cluster-logging-release-notes
-- Name: Logging 5.7
- Dir: v5_7
- Distros: openshift-enterprise,openshift-origin
+ Dir: logging_release_notes
Topics:
- - Name: Logging 5.7 Release Notes
+ - Name: Logging 5.8
+ File: logging-5-8-release-notes
+ - Name: Logging 5.7
File: logging-5-7-release-notes
- - Name: Getting started with logging
- File: logging-5-7-getting-started
- - Name: Understanding Logging
- File: logging-5-7-architecture
- - Name: Configuring Logging
- File: logging-5-7-configuration
- - Name: Administering Logging
- File: logging-5-7-administration
-# Name: Logging Reference
-# File: logging-5-7-reference
-- Name: Logging 5.6
- Dir: v5_6
- Distros: openshift-enterprise,openshift-origin
- Topics:
- - Name: Logging 5.6 Release Notes
- File: logging-5-6-release-notes
- - Name: Getting started with logging
- File: logging-5-6-getting-started
- - Name: Understanding Logging
- File: logging-5-6-architecture
- - Name: Configuring Logging
- File: logging-5-6-configuration
- - Name: Administering Logging
- File: logging-5-6-administration
- - Name: Logging Reference
- File: logging-5-6-reference
-- Name: Logging 5.5
- Dir: v5_5
- Distros: openshift-enterprise,openshift-origin
+- Name: Support
+ File: cluster-logging-support
+- Name: Troubleshooting logging
+ Dir: troubleshooting
Topics:
- - Name: Logging 5.5 Release Notes
- File: logging-5-5-release-notes
- - Name: Getting started with logging
- File: logging-5-5-getting-started
- - Name: Understanding Logging
- File: logging-5-5-architecture
- - Name: Administering Logging
- File: logging-5-5-administration
-# - Name: Configuring Logging
-# File: logging-5-5-configuration
-# - Name: Logging Reference
-# File: logging-5-5-reference
+ - Name: Viewing Logging status
+ File: cluster-logging-cluster-status
+ - Name: Troubleshooting log forwarding
+ File: log-forwarding-troubleshooting
+ - Name: Troubleshooting logging alerts
+ File: troubleshooting-logging-alerts
+ - Name: Viewing the status of the Elasticsearch log store
+ File: cluster-logging-log-store-status
- Name: About Logging
File: cluster-logging
- Name: Installing Logging
File: cluster-logging-deploying
+- Name: Updating Logging
+ File: cluster-logging-upgrading
Distros: openshift-enterprise,openshift-origin
+- Name: Visualizing logs
+ Dir: log_visualization
+ Topics:
+ - Name: About log visualization
+ File: log-visualization
+ - Name: Log visualization with the web console
+ File: log-visualization-ocp-console
+ - Name: Viewing cluster dashboards
+ File: cluster-logging-dashboards
+ - Name: Log visualization with Kibana
+ File: logging-kibana
- Name: Configuring your Logging deployment
Dir: config
Distros: openshift-enterprise,openshift-origin
Topics:
- - Name: About the Cluster Logging custom resource
- File: cluster-logging-configuring-cr
- - Name: Configuring the logging collector
- File: cluster-logging-collector
- - Name: Configuring the log store
- File: cluster-logging-log-store
- - Name: Configuring the log visualizer
- File: cluster-logging-visualizer
- - Name: Configuring Logging storage
- File: cluster-logging-storage-considerations
- Name: Configuring CPU and memory limits for Logging components
File: cluster-logging-memory
- - Name: Using tolerations to control Logging pod placement
- File: cluster-logging-tolerations
- - Name: Moving the Logging resources with node selectors
- File: cluster-logging-moving-nodes
- Name: Configuring systemd-journald for Logging
File: cluster-logging-systemd
- - Name: Maintenance and support
- File: cluster-logging-maintenance-support
-- Name: Logging with the LokiStack
- File: cluster-logging-loki
-- Name: Viewing logs for a specific resource
- File: viewing-resource-logs
-- Name: Viewing cluster logs in Kibana
- File: cluster-logging-visualizer
- Distros: openshift-enterprise,openshift-origin
-- Name: Forwarding logs to third party systems
- File: cluster-logging-external
- Distros: openshift-enterprise,openshift-origin
-- Name: Enabling JSON logging
- File: cluster-logging-enabling-json-logging
-- Name: Collecting and storing Kubernetes events
- File: cluster-logging-eventrouter
- Distros: openshift-enterprise,openshift-origin
-# - Name: Forwarding logs using ConfigMaps
-# File: cluster-logging-external-configmap
-# Distros: openshift-enterprise,openshift-origin
-- Name: Updating Logging
- File: cluster-logging-upgrading
-- Name: Viewing cluster dashboards
- File: cluster-logging-dashboards
-- Name: Troubleshooting Logging
- Dir: troubleshooting
- Distros: openshift-enterprise,openshift-origin
- Topics:
- - Name: Viewing Logging status
- File: cluster-logging-cluster-status
- - Name: Viewing the status of the log store
- File: cluster-logging-log-store-status
- - Name: Understanding Logging alerts
- File: cluster-logging-alerts
- - Name: Collecting logging data for Red Hat Support
- File: cluster-logging-must-gather
- - Name: Troubleshooting for Critical Alerts
- File: cluster-logging-troubleshooting-for-critical-alerts
+- Name: Log collection and forwarding
+ Dir: log_collection_forwarding
+ Topics:
+ - Name: About log collection and forwarding
+ File: log-forwarding
+ - Name: Log output types
+ File: logging-output-types
+ - Name: Enabling JSON log forwarding
+ File: cluster-logging-enabling-json-logging
+ - Name: Configuring log forwarding
+ File: configuring-log-forwarding
+ - Name: Configuring the logging collector
+ File: cluster-logging-collector
+ - Name: Collecting and storing Kubernetes events
+ File: cluster-logging-eventrouter
+- Name: Log storage
+ Dir: log_storage
+ Topics:
+ - Name: About log storage
+ File: about-log-storage
+ - Name: Installing log storage
+ File: installing-log-storage
+ - Name: Configuring the LokiStack log store
+ File: cluster-logging-loki
+ - Name: Configuring the Elasticsearch log store
+ File: logging-config-es-store
+- Name: Logging alerts
+ Dir: logging_alerts
+ Topics:
+ - Name: Default logging alerts
+ File: default-logging-alerts
+ - Name: Custom logging alerts
+ File: custom-logging-alerts
+- Name: Performance and reliability tuning
+ Dir: performance_reliability
+ Topics:
+ - Name: Flow control mechanisms
+ File: logging-flow-control-mechanisms
+- Name: Scheduling resources
+ Dir: scheduling_resources
+ Topics:
+ - Name: Using node selectors to move logging resources
+ File: logging-node-selectors
+ - Name: Using tolerations to control logging pod placement
+ File: logging-taints-tolerations
- Name: Uninstalling Logging
File: cluster-logging-uninstall
- Name: Exported fields
File: cluster-logging-exported-fields
Distros: openshift-enterprise,openshift-origin
+- Name: API reference
+ Dir: api_reference
+ Topics:
+# - Name: 5.8 Logging API reference
+# File: logging-5-8-reference
+# - Name: 5.7 Logging API reference
+# File: logging-5-7-reference
+ - Name: 5.6 Logging API reference
+ File: logging-5-6-reference
+- Name: Glossary
+ File: logging-common-terms
---
Name: Monitoring
Dir: monitoring
@@ -2640,24 +2686,155 @@ Topics:
File: enabling-alert-routing-for-user-defined-projects
- Name: Managing metrics
File: managing-metrics
-- Name: Querying metrics
- File: querying-metrics
-- Name: Managing metrics targets
- File: managing-metrics-targets
- Name: Managing alerts
File: managing-alerts
- Name: Reviewing monitoring dashboards
File: reviewing-monitoring-dashboards
-- Name: The NVIDIA GPU administration dashboard
- File: nvidia-gpu-admin-dashboard
-- Name: Monitoring bare-metal events
- File: using-rfhe
- Name: Accessing third-party monitoring APIs
File: accessing-third-party-monitoring-apis
- Name: Troubleshooting monitoring issues
File: troubleshooting-monitoring-issues
- Name: Config map reference for the Cluster Monitoring Operator
File: config-map-reference-for-the-cluster-monitoring-operator
+- Name: Cluster Observability Operator
+ Dir: cluster_observability_operator
+ Topics:
+ - Name: Cluster Observability Operator release notes
+ File: cluster-observability-operator-release-notes
+ - Name: Cluster Observability Operator overview
+ File: cluster-observability-operator-overview
+ - Name: Installing the Cluster Observability Operator
+ File: installing-the-cluster-observability-operator
+ - Name: Configuring the Cluster Observability Operator to monitor a service
+ File: configuring-the-cluster-observability-operator-to-monitor-a-service
+---
+Name: Power monitoring
+Dir: power_monitoring
+Distros: openshift-enterprise,openshift-origin
+Topics:
+- Name: Power monitoring release notes
+ File: power-monitoring-release-notes
+- Name: Power monitoring overview
+ File: power-monitoring-overview
+- Name: Installing power monitoring
+ File: installing-power-monitoring
+- Name: Configuring power monitoring
+ File: configuring-power-monitoring
+- Name: Visualizing power monitoring metrics
+ File: visualizing-power-monitoring-metrics
+- Name: Uninstalling power monitoring
+ File: uninstalling-power-monitoring
+---
+Name: Distributed tracing
+Dir: distr_tracing
+Distros: openshift-enterprise
+Topics:
+- Name: Distributed tracing release notes
+ Dir: distr_tracing_rn
+ Topics:
+ - Name: Distributed tracing 3.0
+ File: distr-tracing-rn-3-0
+ - Name: Distributed tracing 2.9.2
+ File: distr-tracing-rn-2-9-2
+ - Name: Distributed tracing 2.9.1
+ File: distr-tracing-rn-2-9-1
+ - Name: Distributed tracing 2.9
+ File: distr-tracing-rn-2-9
+ - Name: Distributed tracing 2.8
+ File: distr-tracing-rn-2-8
+ - Name: Distributed tracing 2.7
+ File: distr-tracing-rn-2-7
+ - Name: Distributed tracing 2.6
+ File: distr-tracing-rn-2-6
+ - Name: Distributed tracing 2.5
+ File: distr-tracing-rn-2-5
+ - Name: Distributed tracing 2.4
+ File: distr-tracing-rn-2-4
+ - Name: Distributed tracing 2.3
+ File: distr-tracing-rn-2-3
+ - Name: Distributed tracing 2.2
+ File: distr-tracing-rn-2-2
+ - Name: Distributed tracing 2.1
+ File: distr-tracing-rn-2-1
+ - Name: Distributed tracing 2.0
+ File: distr-tracing-rn-2-0
+- Name: Distributed tracing architecture
+ Dir: distr_tracing_arch
+ Topics:
+ - Name: Distributed tracing architecture
+ File: distr-tracing-architecture
+- Name: Distributed tracing platform (Jaeger)
+ Dir: distr_tracing_jaeger
+ Topics:
+ - Name: Installation
+ File: distr-tracing-jaeger-installing
+ - Name: Configuration
+ File: distr-tracing-jaeger-configuring
+ - Name: Updating
+ File: distr-tracing-jaeger-updating
+ - Name: Removal
+ File: distr-tracing-jaeger-removing
+- Name: Distributed tracing platform (Tempo)
+ Dir: distr_tracing_tempo
+ Topics:
+ - Name: Installation
+ File: distr-tracing-tempo-installing
+ - Name: Configuration
+ File: distr-tracing-tempo-configuring
+ - Name: Updating
+ File: distr-tracing-tempo-updating
+ - Name: Removal
+ File: distr-tracing-tempo-removing
+---
+Name: Red Hat build of OpenTelemetry
+Dir: otel
+Distros: openshift-enterprise
+Topics:
+- Name: Release notes
+ File: otel-release-notes
+- Name: Installation
+ File: otel-installing
+- Name: Collector configuration
+ File: otel-configuring
+- Name: Instrumentation
+ File: otel-instrumentation
+- Name: Use
+ File: otel-using
+- Name: Troubleshooting
+ File: otel-troubleshooting
+- Name: Migration
+ File: otel-migrating
+- Name: Updating
+ File: otel-updating
+- Name: Removal
+ File: otel-removing
+---
+Name: Network Observability
+Dir: network_observability
+Distros: openshift-enterprise,openshift-origin
+Topics:
+- Name: Network Observability release notes
+ File: network-observability-operator-release-notes
+- Name: Network Observability overview
+ File: network-observability-overview
+- Name: Installing the Network Observability Operator
+ File: installing-operators
+- Name: Understanding Network Observability Operator
+ File: understanding-network-observability-operator
+- Name: Configuring the Network Observability Operator
+ File: configuring-operator
+- Name: Network Policy
+ File: network-observability-network-policy
+- Name: Observing the network traffic
+ File: observing-network-traffic
+- Name: Monitoring the Network Observability Operator
+ File: network-observability-operator-monitoring
+- Name: API reference
+ File: flowcollector-api
+- Name: JSON flows format reference
+ File: json-flows-format-reference
+- Name: Troubleshooting Network Observability
+ File: troubleshooting-network-observability
---
Name: Scalability and performance
Dir: scalability_and_performance
@@ -2692,17 +2869,19 @@ Topics:
Dir: optimization
Distros: openshift-origin,openshift-enterprise
Topics:
- - Name: Optimizing storage
- File: optimizing-storage
- - Name: Optimizing routing
- File: routing-optimization
- - Name: Optimizing networking
- File: optimizing-networking
- - Name: Optimizing CPU usage
- File: optimizing-cpu-usage
+ - Name: Optimizing storage
+ File: optimizing-storage
+ - Name: Optimizing routing
+ File: routing-optimization
+ - Name: Optimizing networking
+ File: optimizing-networking
+ - Name: Optimizing CPU usage
+ File: optimizing-cpu-usage
- Name: Managing bare metal hosts
File: managing-bare-metal-hosts
Distros: openshift-origin,openshift-enterprise
+- Name: Monitoring bare-metal events
+ File: using-rfhe
- Name: What huge pages do and how they are consumed by apps
File: what-huge-pages-do-and-how-they-are-consumed-by-apps
Distros: openshift-origin,openshift-enterprise
@@ -2713,48 +2892,79 @@ Topics:
File: cnf-performing-platform-verification-latency-tests
- Name: Improving cluster stability in high latency environments using worker latency profiles
File: scaling-worker-latency-profiles
-- Name: Topology Aware Lifecycle Manager for cluster updates
- File: cnf-talm-for-cluster-upgrades
- Distros: openshift-origin,openshift-enterprise
- Name: Creating a performance profile
File: cnf-create-performance-profiles
Distros: openshift-origin,openshift-enterprise
- Name: Workload partitioning
File: enabling-workload-partitioning
Distros: openshift-origin,openshift-enterprise
-- Name: Requesting CRI-O and Kubelet profiling data by using the Node Observability Operator
+- Name: Using the Node Observability Operator
File: node-observability-operator
Distros: openshift-origin,openshift-enterprise
- Name: Clusters at the network far edge
Dir: ztp_far_edge
Distros: openshift-origin,openshift-enterprise
Topics:
- - Name: Challenges of the network far edge
- File: ztp-deploying-far-edge-clusters-at-scale
- - Name: Preparing the hub cluster for ZTP
- File: ztp-preparing-the-hub-cluster
- - Name: Installing managed clusters with RHACM and SiteConfig resources
- File: ztp-deploying-far-edge-sites
- - Name: Configuring managed clusters with policies and PolicyGenTemplate resources
- File: ztp-configuring-managed-clusters-policies
- - Name: Manually installing a single-node OpenShift cluster with ZTP
- File: ztp-manual-install
- - Name: Recommended single-node OpenShift cluster configuration for vDU application workloads
- File: ztp-reference-cluster-configuration-for-vdu
- - Name: Validating cluster tuning for vDU application workloads
- File: ztp-vdu-validating-cluster-tuning
- - Name: Advanced managed cluster configuration with SiteConfig resources
- File: ztp-advanced-install-ztp
- - Name: Advanced managed cluster configuration with PolicyGenTemplate resources
- File: ztp-advanced-policy-config
- - Name: Updating managed clusters with the Topology Aware Lifecycle Manager
- File: ztp-talm-updating-managed-policies
- - Name: Updating GitOps ZTP
- File: ztp-updating-gitops
- - Name: Expanding single-node OpenShift clusters with GitOps ZTP
- File: ztp-sno-additional-worker-node
- - Name: Pre-caching images for single-node OpenShift deployments
- File: ztp-precaching-tool
+ - Name: Challenges of the network far edge
+ File: ztp-deploying-far-edge-clusters-at-scale
+ - Name: Preparing the hub cluster for ZTP
+ File: ztp-preparing-the-hub-cluster
+ - Name: Installing managed clusters with RHACM and SiteConfig resources
+ File: ztp-deploying-far-edge-sites
+ - Name: Configuring managed clusters with policies and PolicyGenTemplate resources
+ File: ztp-configuring-managed-clusters-policies
+ - Name: Manually installing a single-node OpenShift cluster with ZTP
+ File: ztp-manual-install
+ - Name: Recommended single-node OpenShift cluster configuration for vDU application workloads
+ File: ztp-reference-cluster-configuration-for-vdu
+ - Name: Validating cluster tuning for vDU application workloads
+ File: ztp-vdu-validating-cluster-tuning
+ - Name: Advanced managed cluster configuration with SiteConfig resources
+ File: ztp-advanced-install-ztp
+ - Name: Advanced managed cluster configuration with PolicyGenTemplate resources
+ File: ztp-advanced-policy-config
+ - Name: Updating managed clusters with the Topology Aware Lifecycle Manager
+ File: cnf-talm-for-cluster-upgrades
+ Distros: openshift-origin,openshift-enterprise
+ - Name: Updating managed clusters in a disconnected environment with the Topology Aware Lifecycle Manager
+ File: ztp-talm-updating-managed-policies
+ - Name: Updating GitOps ZTP
+ File: ztp-updating-gitops
+ - Name: Expanding single-node OpenShift clusters with GitOps ZTP
+ File: ztp-sno-additional-worker-node
+ - Name: Pre-caching images for single-node OpenShift deployments
+ File: ztp-precaching-tool
+---
+Name: Reference design specifications
+Dir: telco_ref_design_specs
+Distros: openshift-telco
+Topics:
+- Name: Telco reference design specifications
+ File: telco-ref-design-specs-overview
+- Name: Telco RAN DU reference design specification
+ Dir: ran
+ Topics:
+ - Name: Telco RAN DU reference design overview
+ File: telco-ran-ref-design-spec
+ - Name: Telco RAN DU use model overview
+ File: telco-ran-du-overview
+ - Name: RAN DU reference design components
+ File: telco-ran-ref-du-components
+ - Name: RAN DU reference design configuration CRs
+ File: telco-ran-ref-du-crs
+ - Name: Telco RAN DU software specifications
+ File: telco-ran-ref-software-artifacts
+- Name: Telco core reference design specification
+ Dir: core
+ Topics:
+ - Name: Telco core reference design overview
+ File: telco-core-rds-overview
+ - Name: Telco core use model overview
+ File: telco-core-rds-use-cases
+ - Name: Core reference design components
+ File: telco-core-ref-design-components
+ - Name: Core reference design configuration CRs
+ File: telco-core-ref-crs
---
Name: Specialized hardware and driver enablement
Dir: hardware_enablement
@@ -2779,11 +2989,20 @@ Topics:
File: graceful-cluster-shutdown
- Name: Restarting a cluster gracefully
File: graceful-cluster-restart
-- Name: Application backup and restore
+- Name: OADP Application backup and restore
Dir: application_backup_and_restore
Topics:
+ - Name: Introduction to OpenShift API for Data Protection
+ File: oadp-intro
- Name: OADP release notes
- File: oadp-release-notes
+ Dir: release-notes
+ Topics:
+ - Name: OADP 1.3 release notes
+ File: oadp-release-notes-1-3
+ - Name: OADP 1.2 release notes
+ File: oadp-release-notes-1-2
+ - Name: OADP 1.1 release notes
+ File: oadp-release-notes-1-1
- Name: OADP features and plugins
File: oadp-features-plugins
- Name: Installing and configuring OADP
@@ -2791,25 +3010,70 @@ Topics:
Topics:
- Name: About installing OADP
File: about-installing-oadp
- - Name: Installing and configuring OADP with AWS
+ - Name: Installing the OADP Operator
+ File: oadp-installing-operator
+ - Name: Configuring OADP with AWS
File: installing-oadp-aws
- - Name: Installing and configuring OADP with Azure
+ - Name: Configuring OADP with Azure
File: installing-oadp-azure
- - Name: Installing and configuring OADP with GCP
+ - Name: Configuring OADP with GCP
File: installing-oadp-gcp
- - Name: Installing and configuring OADP with MCG
+ - Name: Configuring OADP with MCG
File: installing-oadp-mcg
- - Name: Installing and configuring OADP with ODF
+ - Name: Configuring OADP with ODF
File: installing-oadp-ocs
+ - Name: Uninstalling OADP
+ Dir: installing
+ Topics:
- Name: Uninstalling OADP
File: uninstalling-oadp
- - Name: Backing up and restoring
+ - Name: OADP backing up
Dir: backing_up_and_restoring
Topics:
- Name: Backing up applications
File: backing-up-applications
+ - Name: Creating a Backup CR
+ File: oadp-creating-backup-cr
+ - Name: Backing up persistent volumes with CSI snapshots
+ File: oadp-backing-up-pvs-csi-doc
+ - Name: Backing up applications with File System Backup
+ File: oadp-backing-up-applications-restic-doc
+ - Name: Creating backup hooks
+ File: oadp-creating-backup-hooks-doc
+ - Name: Scheduling backups using Schedule CR
+ File: oadp-scheduling-backups-doc
+ - Name: Deleting backups
+ File: oadp-deleting-backups-doc
+ - Name: About Kopia
+ File: oadp-about-kopia
+ - Name: OADP restoring
+ Dir: backing_up_and_restoring
+ Topics:
- Name: Restoring applications
File: restoring-applications
+ - Name: OADP and ROSA
+ Dir: oadp-rosa
+ Topics:
+ - Name: Backing up applications on ROSA STS using OADP
+ File: oadp-rosa-backing-up-applications
+ - Name: OADP Data Mover
+ Dir: installing
+ Topics:
+ - Name: Introduction to OADP Data Mover
+ File: data-mover-intro
+ - Name: Using Data Mover for CSI snapshots
+ File: oadp-using-data-mover-for-csi-snapshots-doc
+ - Name: Using OADP 1.2 Data Mover with Ceph storage
+ File: oadp-12-data-mover-ceph-doc
+ - Name: Cleaning up after a backup using OADP 1.1 Data Mover
+ File: oadp-cleaning-up-after-data-mover-1-1-backup-doc
+ - Name: OADP 1.3 Data Mover
+ Dir: installing
+ Topics:
+ - Name: About the OADP 1.3 Data Mover
+ File: about-oadp-1-3-data-mover
+ - Name: Backing up and restoring volumes by using CSI snapshots
+ File: oadp-backup-restore-csi-snapshots
- Name: Troubleshooting
File: troubleshooting
- Name: OADP API
@@ -2929,6 +3193,8 @@ Topics:
File: subjectaccessreview-authorization-openshift-io-v1
- Name: 'SubjectRulesReview [authorization.openshift.io/v1]'
File: subjectrulesreview-authorization-openshift-io-v1
+ - Name: 'SelfSubjectReview [authentication.k8s.io/v1]'
+ File: selfsubjectreview-authentication-k8s-io-v1
- Name: 'TokenRequest [authentication.k8s.io/v1]'
File: tokenrequest-authentication-k8s-io-v1
- Name: 'TokenReview [authentication.k8s.io/v1]'
@@ -3022,6 +3288,8 @@ Topics:
File: consoleplugin-console-openshift-io-v1
- Name: 'ConsoleQuickStart [console.openshift.io/v1]'
File: consolequickstart-console-openshift-io-v1
+ - Name: 'ConsoleSample [console.openshift.io/v1]'
+ File: consolesample-console-openshift-io-v1
- Name: 'ConsoleYAMLSample [console.openshift.io/v1]'
File: consoleyamlsample-console-openshift-io-v1
- Name: Extension APIs
@@ -3075,10 +3343,12 @@ Topics:
File: controlplanemachineset-machine-openshift-io-v1
- Name: 'KubeletConfig [machineconfiguration.openshift.io/v1]'
File: kubeletconfig-machineconfiguration-openshift-io-v1
- - Name: 'MachineConfigPool [machineconfiguration.openshift.io/v1]'
- File: machineconfigpool-machineconfiguration-openshift-io-v1
- Name: 'MachineConfig [machineconfiguration.openshift.io/v1]'
File: machineconfig-machineconfiguration-openshift-io-v1
+ - Name: 'MachineConfigNode [machineconfiguration.openshift.io/v1alpha1]'
+ File: machineconfignode-machineconfiguration-openshift-io-v1alpha1
+ - Name: 'MachineConfigPool [machineconfiguration.openshift.io/v1]'
+ File: machineconfigpool-machineconfiguration-openshift-io-v1
- Name: 'MachineHealthCheck [machine.openshift.io/v1beta1]'
File: machinehealthcheck-machine-openshift-io-v1beta1
- Name: 'Machine [machine.openshift.io/v1beta1]'
@@ -3117,6 +3387,10 @@ Topics:
File: alertmanager-monitoring-coreos-com-v1
- Name: 'AlertmanagerConfig [monitoring.coreos.com/v1beta1]'
File: alertmanagerconfig-monitoring-coreos-com-v1beta1
+ - Name: 'AlertRelabelConfig [monitoring.openshift.io/v1]'
+ File: alertrelabelconfig-monitoring-openshift-io-v1
+ - Name: 'AlertingRule [monitoring.openshift.io/v1]'
+ File: alertingrule-monitoring-openshift-io-v1
- Name: 'PodMonitor [monitoring.coreos.com/v1]'
File: podmonitor-monitoring-coreos-com-v1
- Name: 'Probe [monitoring.coreos.com/v1]'
@@ -3134,6 +3408,8 @@ Topics:
Topics:
- Name: About Network APIs
File: network-apis-index
+ - Name: 'AdminPolicyBasedExternalRoute [k8s.ovn.org/v1]'
+ File: adminpolicybasedexternalroute-k8s-ovn-org-v1
- Name: 'CloudPrivateIPConfig [cloud.network.openshift.io/v1]'
File: cloudprivateipconfig-cloud-network-openshift-io-v1
- Name: 'EgressFirewall [k8s.ovn.org/v1]'
@@ -3142,6 +3418,8 @@ Topics:
File: egressip-k8s-ovn-org-v1
- Name: 'EgressQoS [k8s.ovn.org/v1]'
File: egressqos-k8s-ovn-org-v1
+ - Name: 'EgressService [k8s.ovn.org/v1]'
+ File: egressservice-k8s-ovn-org-v1
- Name: 'Endpoints [undefined/v1]'
File: endpoints-v1
- Name: 'EndpointSlice [discovery.k8s.io/v1]'
@@ -3239,6 +3517,8 @@ Topics:
File: kubescheduler-operator-openshift-io-v1
- Name: 'KubeStorageVersionMigrator [operator.openshift.io/v1]'
File: kubestorageversionmigrator-operator-openshift-io-v1
+ - Name: 'MachineConfiguration [operator.openshift.io/v1]'
+ File: machineconfiguration-operator-openshift-io-v1
- Name: 'Network [operator.openshift.io/v1]'
File: network-operator-openshift-io-v1
- Name: 'OpenShiftAPIServer [operator.openshift.io/v1]'
@@ -3352,14 +3632,14 @@ Topics:
File: appliedclusterresourcequota-quota-openshift-io-v1
- Name: 'ClusterResourceQuota [quota.openshift.io/v1]'
File: clusterresourcequota-quota-openshift-io-v1
- - Name: 'FlowSchema [flowcontrol.apiserver.k8s.io/v1beta1]'
- File: flowschema-flowcontrol-apiserver-k8s-io-v1beta1
+ - Name: 'FlowSchema [flowcontrol.apiserver.k8s.io/v1beta3]'
+ File: flowschema-flowcontrol-apiserver-k8s-io-v1beta3
- Name: 'LimitRange [undefined/v1]'
File: limitrange-v1
- Name: 'PriorityClass [scheduling.k8s.io/v1]'
File: priorityclass-scheduling-k8s-io-v1
- - Name: 'PriorityLevelConfiguration [flowcontrol.apiserver.k8s.io/v1beta1]'
- File: prioritylevelconfiguration-flowcontrol-apiserver-k8s-io-v1beta1
+ - Name: 'PriorityLevelConfiguration [flowcontrol.apiserver.k8s.io/v1beta3]'
+ File: prioritylevelconfiguration-flowcontrol-apiserver-k8s-io-v1beta3
- Name: 'ResourceQuota [undefined/v1]'
File: resourcequota-v1
- Name: Security APIs
@@ -3501,7 +3781,7 @@ Topics:
File: installing-ossm
- Name: Creating the ServiceMeshControlPlane
File: ossm-create-smcp
- - Name: Adding workloads to a service mesh
+ - Name: Adding services to a service mesh
File: ossm-create-mesh
- Name: Enabling sidecar injection
File: prepare-to-deploy-applications-ossm
@@ -3565,31 +3845,6 @@ Topics:
- Name: Removing Service Mesh
File: removing-ossm
---
-Name: Distributed tracing
-Dir: distr_tracing
-Distros: openshift-enterprise
-Topics:
-- Name: Distributed tracing release notes
- File: distributed-tracing-release-notes
-- Name: Distributed tracing architecture
- Dir: distr_tracing_arch
- Topics:
- - Name: Distributed tracing architecture
- File: distr-tracing-architecture
-- Name: Distributed tracing installation
- Dir: distr_tracing_install
- Topics:
- - Name: Installing distributed tracing
- File: distr-tracing-installing
- - Name: Configuring the distributed tracing platform
- File: distr-tracing-deploying-jaeger
- - Name: Configuring distributed tracing data collection
- File: distr-tracing-deploying-otel
- - Name: Upgrading distributed tracing
- File: distr-tracing-updating
- - Name: Removing distributed tracing
- File: distr-tracing-removing
----
Name: Virtualization
Dir: virt
Distros: openshift-enterprise,openshift-origin
@@ -3626,7 +3881,7 @@ Topics:
- Name: Getting started with OKD Virtualization
File: virt-getting-started
Distros: openshift-origin
- - Name: Using the virtctl and libguestfs CLI tools
+ - Name: virtctl and libguestfs
File: virt-using-the-cli-tools
- Name: Web console overview
File: virt-web-console-overview
@@ -3642,18 +3897,23 @@ Topics:
- Name: Installing OKD Virtualization
File: installing-virt
Distros: openshift-origin
- - Name: Specifying nodes for OpenShift Virtualization components
- File: virt-specifying-nodes-for-virtualization-components
- Distros: openshift-enterprise
- - Name: Specifying nodes for OKD Virtualization components
- File: virt-specifying-nodes-for-virtualization-components
- Distros: openshift-origin
- Name: Uninstalling OpenShift Virtualization
File: uninstalling-virt
Distros: openshift-enterprise
- Name: Uninstalling OKD Virtualization
File: uninstalling-virt
Distros: openshift-origin
+- Name: Postinstallation configuration
+ Dir: post_installation_configuration
+ Topics:
+ - Name: Postinstallation configuration
+ File: virt-post-install-config
+ - Name: Node placement rules
+ File: virt-node-placement-virt-components
+ - Name: Network configuration
+ File: virt-post-install-network-config
+ - Name: Storage configuration
+ File: virt-post-install-storage-config
- Name: Updating
Dir: updating
Topics:
@@ -3666,9 +3926,36 @@ Topics:
- Name: Virtual machines
Dir: virtual_machines
Topics:
-###VIRTUAL MACHINE CHESS SALAD (silly name to highlight that the commented out assemblies need to be checked against merged filenams)
- - Name: Creating virtual machines
- File: virt-create-vms
+ - Name: Creating VMs from Red Hat images
+ Dir: creating_vms_rh
+ Topics:
+ - Name: Creating VMs from Red Hat images overview
+ File: virt-creating-vms-from-rh-images-overview
+ - Name: Creating VMs from instance types
+ File: virt-creating-vms-from-instance-types
+ - Name: Creating VMs from templates
+ File: virt-creating-vms-from-templates
+ - Name: Creating VMs from the CLI
+ File: virt-creating-vms-from-cli
+ - Name: Creating VMs from custom images
+ Dir: creating_vms_custom
+ Topics:
+ - Name: Creating VMs from custom images overview
+ File: virt-creating-vms-from-custom-images-overview
+ - Name: Creating VMs by using container disks
+ File: virt-creating-vms-from-container-disks
+ - Name: Creating VMs by importing images from web pages
+ File: virt-creating-vms-from-web-images
+ - Name: Creating VMs by uploading images
+ File: virt-creating-vms-uploading-images
+ - Name: Creating VMs by cloning PVCs
+ File: virt-creating-vms-by-cloning-pvcs
+ - Name: Installing the QEMU guest agent and VirtIO drivers
+ File: virt-installing-qemu-guest-agent
+ - Name: Connecting to VM consoles
+ File: virt-accessing-vm-consoles
+ - Name: Configuring SSH access to VMs
+ File: virt-accessing-vm-ssh
- Name: Editing virtual machines
File: virt-edit-vms
- Name: Editing boot order
@@ -3681,16 +3968,6 @@ Topics:
File: virt-manage-vmis
- Name: Controlling virtual machine states
File: virt-controlling-vm-states
- - Name: Accessing virtual machine consoles
- File: virt-accessing-vm-consoles
- - Name: Automating Windows installation with sysprep
- File: virt-automating-windows-sysprep
- - Name: Triggering virtual machine failover by resolving a failed node
- File: virt-triggering-vm-failover-resolving-failed-node
- - Name: Installing the QEMU guest agent and VirtIO drivers
- File: virt-installing-qemu-guest-agent
- - Name: Viewing the QEMU guest agent information for virtual machines
- File: virt-viewing-qemu-guest-agent-web
- Name: Using virtual Trusted Platform Module devices
File: virt-using-vtpm-devices
- Name: Managing virtual machines with OpenShift Pipelines
@@ -3719,153 +3996,103 @@ Topics:
File: virt-schedule-vms
- Name: Configuring PCI passthrough
File: virt-configuring-pci-passthrough
- - Name: Configuring vGPU passthrough
- File: virt-configuring-vgpu-passthrough
- - Name: Configuring mediated devices
- File: virt-configuring-mediated-devices
+ - Name: Configuring virtual GPUs
+ File: virt-configuring-virtual-gpus
- Name: Enabling descheduler evictions on virtual machines
File: virt-enabling-descheduler-evictions
-# Importing virtual machines
- - Name: Importing virtual machines
- Dir: importing_vms
- Topics:
- - Name: TLS certificates for data volume imports
- File: virt-tls-certificates-for-dv-imports
- - Name: Importing virtual machine images with data volumes
- File: virt-importing-virtual-machine-images-datavolumes
-# Cloning virtual machines
- - Name: Cloning virtual machines
- Dir: cloning_vms
- Topics:
- - Name: Enabling user permissions to clone data volumes across namespaces
- File: virt-enabling-user-permissions-to-clone-datavolumes
- - Name: Cloning a virtual machine disk into a new data volume
- File: virt-cloning-vm-disk-into-new-datavolume
- - Name: Cloning a virtual machine by using a data volume template
- File: virt-cloning-vm-using-datavolumetemplate
- - Name: Cloning a virtual machine disk into a new block storage persistent volume claim
- File: virt-cloning-vm-disk-into-new-block-storage-pvc
-# Virtual machine networking
- - Name: Virtual machine networking
- Dir: vm_networking
- Topics:
- - Name: Configuring a virtual machine for the default pod network
- File: virt-using-the-default-pod-network-with-virt
- Distros: openshift-enterprise
- - Name: Configuring a virtual machine for the default pod network with OKD Virtualization
- File: virt-using-the-default-pod-network-with-virt
- Distros: openshift-origin
- - Name: Creating a service to expose a virtual machine
- File: virt-creating-service-vm
- - Name: Connecting a virtual machine to a Linux bridge network
- File: virt-attaching-vm-multiple-networks
- - Name: Connecting a virtual machine to an SR-IOV network
- File: virt-attaching-vm-to-sriov-network
- - Name: Connecting a virtual machine to a service mesh
- File: virt-connecting-vm-to-service-mesh
- - Name: Configuring IP addresses for virtual machines
- File: virt-configuring-ip-for-vms
- - Name: Viewing the IP address of NICs on a virtual machine
- File: virt-viewing-ip-of-vm-nic
- - Name: Accessing a virtual machine on a secondary network by using the cluster domain name
- File: virt-accessing-vm-secondary-network-fqdn
- - Name: Using a MAC address pool for virtual machines
- File: virt-using-mac-address-pool-for-vms
-#A BETTER NAME THAN 'STORAGE 4 U'
- - Name: Virtual machine disks
+ - Name: About high availability for virtual machines
+ File: virt-high-availability-for-vms
+ - Name: Control plane tuning
+ File: virt-vm-control-plane-tuning
+ - Name: VM disks
Dir: virtual_disks
Topics:
- - Name: Configuring local storage for virtual machines
- File: virt-configuring-local-storage-for-vms
- - Name: Creating data volumes
- File: virt-creating-data-volumes
- - Name: Reserving PVC space for file system overhead
- File: virt-reserving-pvc-space-fs-overhead
- - Name: Configuring CDI to work with namespaces that have a compute resource quota
- File: virt-configuring-cdi-for-namespace-resourcequota
- - Name: Managing data volume annotations
- File: virt-managing-data-volume-annotations
- - Name: Using preallocation for data volumes
- File: virt-using-preallocation-for-datavolumes
- - Name: Uploading local disk images by using the web console
- File: virt-uploading-local-disk-images-web
- - Name: Uploading local disk images by using the virtctl tool
- File: virt-uploading-local-disk-images-virtctl
- - Name: Uploading a local disk image to a block storage persistent volume claim
- File: virt-uploading-local-disk-images-block
- - Name: Managing virtual machine snapshots
- File: virt-managing-vm-snapshots
- - Name: Moving a local virtual machine disk to a different node
- File: virt-moving-local-vm-disk-to-different-node
- - Name: Expanding virtual storage by adding blank disk images
- File: virt-expanding-virtual-storage-with-blank-disk-images
- - Name: Cloning a data volume using smart-cloning
- File: virt-cloning-a-datavolume-using-smart-cloning
- - Name: Hot plugging virtual disks
+ - Name: Hot-plugging VM disks
File: virt-hot-plugging-virtual-disks
- - Name: Using container disks with virtual machines
- File: virt-using-container-disks-with-vms
- - Name: Preparing CDI scratch space
- File: virt-preparing-cdi-scratch-space
- - Name: Re-using statically provisioned persistent volumes
- File: virt-reusing-statically-provisioned-persistent-volumes
- - Name: Expanding a virtual machine disk
- File: virt-expanding-vm-disk
-# Templates
-- Name: Virtual machine templates
- Dir: vm_templates
- Topics:
- - Name: Creating virtual machine templates
- File: virt-creating-vm-template
- - Name: Editing virtual machine templates
- File: virt-editing-vm-template
- - Name: Enabling dedicated resources for a virtual machine template
- File: virt-dedicated-resources-vm-template
- - Name: Deploying a virtual machine template to a custom namespace
- File: virt-deploying-vm-template-to-custom-namespace
- - Name: Deleting a virtual machine template
- File: virt-deleting-vm-template
- - Name: Creating and using boot sources
- File: virt-creating-and-using-boot-sources
+ - Name: Expanding VM disks
+ File: virt-expanding-vm-disks
+ - Name: Configuring shared volumes
+ File: virt-configuring-shared-volumes-for-vms
+- Name: Networking
+ Dir: vm_networking
+ Topics:
+ - Name: Networking configuration overview
+ File: virt-networking-overview
+ - Name: Connecting a VM to the default pod network
+ File: virt-connecting-vm-to-default-pod-network
+ - Name: Exposing a VM by using a service
+ File: virt-exposing-vm-with-service
+ - Name: Connecting a VM to a Linux bridge network
+ File: virt-connecting-vm-to-linux-bridge
+ - Name: Connecting a VM to an SR-IOV network
+ File: virt-connecting-vm-to-sriov
+ - Name: Using DPDK with SR-IOV
+ File: virt-using-dpdk-with-sriov
+ - Name: Connecting a VM to an OVN-Kubernetes secondary network
+ File: virt-connecting-vm-to-ovn-secondary-network
+ - Name: Hot plugging secondary network interfaces
+ File: virt-hot-plugging-network-interfaces
+ - Name: Connecting a VM to a service mesh
+ File: virt-connecting-vm-to-service-mesh
+ - Name: Configuring a dedicated network for live migration
+ File: virt-dedicated-network-live-migration
+ - Name: Configuring and viewing IP addresses
+ File: virt-configuring-viewing-ips-for-vms
+ - Name: Accessing a VM by using the cluster FQDN
+ File: virt-accessing-vm-secondary-network-fqdn
+ - Name: Managing MAC address pools for network interfaces
+ File: virt-using-mac-address-pool-for-vms
+- Name: Storage
+ Dir: storage
+ Topics:
+ - Name: Storage configuration overview
+ File: virt-storage-config-overview
+ - Name: Configuring storage profiles
+ File: virt-configuring-storage-profile
- Name: Managing automatic boot source updates
File: virt-automatic-bootsource-updates
- Distros: openshift-enterprise
+ - Name: Reserving PVC space for file system overhead
+ File: virt-reserving-pvc-space-fs-overhead
+ - Name: Configuring local storage by using HPP
+ File: virt-configuring-local-storage-with-hpp
+ - Name: Enabling user permissions to clone data volumes across namespaces
+ File: virt-enabling-user-permissions-to-clone-datavolumes
+ - Name: Configuring CDI to override CPU and memory quotas
+ File: virt-configuring-cdi-for-namespace-resourcequota
+ - Name: Preparing CDI scratch space
+ File: virt-preparing-cdi-scratch-space
+ - Name: Using preallocation for data volumes
+ File: virt-using-preallocation-for-datavolumes
+ - Name: Managing data volume annotations
+ File: virt-managing-data-volume-annotations
# Virtual machine live migration
- Name: Live migration
Dir: live_migration
Topics:
- - Name: Virtual machine live migration
- File: virt-live-migration
- - Name: Live migration limits and timeouts
- File: virt-live-migration-limits
- - Name: Migrating a virtual machine instance to another node
- File: virt-migrate-vmi
- - Name: Migrating a virtual machine over a dedicated additional network
- File: virt-migrating-vm-on-secondary-network
- - Name: Cancelling the live migration of a virtual machine instance
- File: virt-cancel-vmi-migration
- - Name: Configuring virtual machine eviction strategy
- File: virt-configuring-vmi-eviction-strategy
- - Name: Configuring live migration policies
- File: virt-configuring-live-migration-policies
+ - Name: About live migration
+ File: virt-about-live-migration
+ - Name: Configuring live migration
+ File: virt-configuring-live-migration
+ - Name: Initiating and canceling live migration
+ File: virt-initiating-live-migration
# Node maintenance mode
-- Name: Node maintenance
- Dir: node_maintenance
+- Name: Nodes
+ Dir: nodes
Topics:
- - Name: About node maintenance
- File: virt-about-node-maintenance
- - Name: Automatic renewal of TLS certificates
- File: virt-automatic-certificates
+ - Name: Node maintenance
+ File: virt-node-maintenance
- Name: Managing node labeling for obsolete CPU models
File: virt-managing-node-labeling-obsolete-cpu-models
- Name: Preventing node reconciliation
File: virt-preventing-node-reconciliation
+ - Name: Deleting a failed node to trigger VM failover
+ File: virt-triggering-vm-failover-resolving-failed-node
- Name: Monitoring
Dir: monitoring
Topics:
- Name: Monitoring overview
File: virt-monitoring-overview
- - Name: OpenShift cluster checkup framework
+ - Name: Cluster checkup framework
File: virt-running-cluster-checkups
- Name: Prometheus queries for virtual resources
File: virt-prometheus-queries
@@ -3888,6 +4115,8 @@ Topics:
- Name: Backup and restore
Dir: backup_restore
Topics:
+ - Name: Backup and restore by using VM snapshots
+ File: virt-backup-restore-snapshots
- Name: Installing and configuring OADP
File: virt-installing-configuring-oadp
- Name: Backing up and restoring virtual machines
@@ -3899,13 +4128,3 @@ Topics:
# - Name: Collecting OKD Virtualization data for community report
# File: virt-collecting-virt-data
# Distros: openshift-origin
----
-Name: Serverless
-Dir: serverless
-Distros: openshift-enterprise
-Topics:
-- Name: About Serverless
- Dir: about
- Topics:
- - Name: Serverless overview
- File: about-serverless
diff --git a/_topic_maps/_topic_map_ms.yml b/_topic_maps/_topic_map_ms.yml
index 89ecb5beec1d..23c25eff3588 100644
--- a/_topic_maps/_topic_map_ms.yml
+++ b/_topic_maps/_topic_map_ms.yml
@@ -23,26 +23,24 @@
---
Name: About
-Dir: welcome
+Dir: microshift_welcome
Distros: microshift
Topics:
- Name: Welcome
File: index
-- Name: Legal notice
- File: legal-notice
---
Name: Release notes
Dir: microshift_release_notes
Distros: microshift
Topics:
-- Name: MicroShift 4.14 release notes
- File: microshift-4-14-release-notes
+- Name: Red Hat build of MicroShift 4.15 release notes
+ File: microshift-4-15-release-notes
---
Name: Getting started
Dir: microshift_getting_started
Distros: microshift
Topics:
-- Name: Understanding MicroShift
+- Name: Understanding Red Hat build of MicroShift
File: microshift-understanding
- Name: Architecture
File: microshift-architecture
@@ -51,28 +49,44 @@ Name: Installing
Dir: microshift_install
Distros: microshift
Topics:
-- Name: Installing from RPM
+- Name: Installing from an RPM package
File: microshift-install-rpm
+- Name: Using FIPS mode
+ File: microshift-fips
+- Name: Mirroring container images for disconnected installations
+ File: microshift-deploy-with-mirror-registry
- Name: Embedding in a RHEL for Edge image
File: microshift-embed-in-rpm-ostree
-- Name: Greenboot health check
+- Name: Embedding in a RHEL for Edge image for offline use
+ File: microshift-embed-in-rpm-ostree-offline-use
+- Name: Understanding system health checks
File: microshift-greenboot
+- Name: Troubleshooting installation issues
+ File: microshift-installing-troubleshooting
---
-Name: Updating clusters
+Name: Updating
Dir: microshift_updating
Distros: microshift
Topics:
-- Name: About MicroShift updates
+- Name: About updates
File: microshift-about-updates
+- Name: Update options
+ File: microshift-update-options
+- Name: Updates with rpm-ostree systems
+ File: microshift-update-rpms-ostree
+- Name: Manual updates with RPMs
+ File: microshift-update-rpms-manually
---
Name: Support
Dir: microshift_support
Distros: microshift
Topics:
-- Name: MicroShift etcd
+- Name: The etcd service
File: microshift-etcd
-- Name: MicroShift sos report
+- Name: The sos report tool
File: microshift-sos-report
+- Name: Getting support
+ File: microshift-getting-support
---
Name: API reference
Dir: microshift_rest_api
@@ -82,16 +96,275 @@ Topics:
File: understanding-api-support-tiers
- Name: API compatibility guidelines
File: understanding-compatibility-guidelines
+- Name: API index
+ File: index
+- Name: API object reference
+ Dir: objects
+ Topics:
+ - Name: API objects reference
+ File: index
+- Name: Extension APIs
+ Dir: api_extensions_apis
+ Topics:
+ - Name: Extension APIs
+ File: api-extensions-apis-index
+ - Name: CustomResourceDefinition [apiextensions.k8s.io/v1]
+ File: customresourcedefinition-apiextensions-k8s-io-v1
+- Name: Registration APIs
+ Dir: api_registration_apis
+ Topics:
+ - Name: Registration APIs
+ File: api-registration-apis-index
+ - Name: APIService [apiregistration.k8s.io/v1]
+ File: apiservice-apiregistration-k8s-io-v1
+- Name: Apps APIs
+ Dir: apps_apis
+ Topics:
+ - Name: Apps APIs
+ File: apps-apis-index
+ - Name: ControllerRevision [apps/v1]
+ File: controllerrevision-apps-v1
+ - Name: DaemonSet [apps/v1]
+ File: daemonset-apps-v1
+ - Name: Deployment [apps/v1]
+ File: deployment-apps-v1
+ - Name: ReplicaSet [apps/v1]
+ File: replicaset-apps-v1
+ - Name: StatefulSet [apps/v1]
+ File: statefulset-apps-v1
+- Name: Authentication APIs
+ Dir: authentication_apis
+ Topics:
+ - Name: Authentication APIs
+ File: authentication-apis-index
+ - Name: TokenRequest [authentication.k8s.io/v1]
+ File: tokenrequest-authentication-k8s-io-v1
+ - Name: TokenReview [authentication.k8s.io/v1]
+ File: tokenreview-authentication-k8s-io-v1
+- Name: Authorization APIs
+ Dir: authorization_apis
+ Topics:
+ - Name: Authorization APIs
+ File: authorization-apis-index
+ - Name: LocalSubjectAccessReview [authorization.k8s.io/v1]
+ File: localsubjectaccessreview-authorization-k8s-io-v1
+ - Name: SelfSubjectAccessReview [authorization.k8s.io/v1]
+ File: selfsubjectaccessreview-authorization-k8s-io-v1
+ - Name: SelfSubjectRulesReview [authorization.k8s.io/v1]
+ File: selfsubjectrulesreview-authorization-k8s-io-v1
+ - Name: SubjectAccessReview [authorization.k8s.io/v1]
+ File: subjectaccessreview-authorization-k8s-io-v1
+- Name: Autoscaling APIs
+ Dir: autoscaling_apis
+ Topics:
+ - Name: Autoscaling APIs
+ File: autoscaling-apis-index
+ - Name: HorizontalPodAutoscaler [autoscaling/v2]
+ File: horizontalpodautoscaler-autoscaling-v2
+ - Name: Scale [autoscaling/v1]
+ File: scale-autoscaling-v1
+- Name: Batch APIs
+ Dir: batch_apis
+ Topics:
+ - Name: Batch APIs
+ File: batch-apis-index
+ - Name: CronJob [batch/v1]
+ File: cronjob-batch-v1
+ - Name: Job [batch/v1]
+ File: job-batch-v1
+- Name: Certificates APIs
+ Dir: certificates_apis
+ Topics:
+ - Name: Certificates APIs
+ File: certificates-apis-index
+ - Name: CertificateSigningRequest [certificates.k8s.io/v1]
+ File: certificatesigningrequest-certificates-k8s-io-v1
+- Name: Coordination APIs
+ Dir: coordination_apis
+ Topics:
+ - Name: Coordination APIs
+ File: coordination-apis-index
+ - Name: Lease [coordination.k8s.io/v1]
+ File: lease-coordination-k8s-io-v1
+- Name: Core APIs
+ Dir: core_apis
+ Topics:
+ - Name: Core APIs
+ File: core-apis-index
+ - Name: Binding [v1]
+ File: binding-v1
+ - Name: ComponentStatus [v1]
+ File: componentstatus-v1
+ - Name: ConfigMap [v1]
+ File: configmap-v1
+ - Name: Endpoints [v1]
+ File: endpoints-v1
+ - Name: Event [v1]
+ File: event-v1
+ - Name: LimitRange [v1]
+ File: limitrange-v1
+ - Name: Namespace [v1]
+ File: namespace-v1
+ - Name: Node [v1]
+ File: node-v1
+ - Name: PersistentVolume [v1]
+ File: persistentvolume-v1
+ - Name: PersistentVolumeClaim [v1]
+ File: persistentvolumeclaim-v1
+ - Name: Pod [v1]
+ File: pod-v1
+ - Name: PodTemplate [v1]
+ File: podtemplate-v1
+ - Name: ReplicationController [v1]
+ File: replicationcontroller-v1
+ - Name: ResourceQuota [v1]
+ File: resourcequota-v1
+ - Name: Secret [v1]
+ File: secret-v1
+ - Name: Service [v1]
+ File: service-v1
+ - Name: ServiceAccount [v1]
+ File: serviceaccount-v1
+- Name: Discovery APIs
+ Dir: discovery_apis
+ Topics:
+ - Name: Discovery APIs
+ File: discovery-apis-index
+ - Name: EndpointSlice [discovery.k8s.io/v1]
+ File: endpointslice-discovery-k8s-io-v1
+- Name: Events APIs
+ Dir: events_apis
+ Topics:
+ - Name: Events APIs
+ File: events-apis-index
+ - Name: Event [events.k8s.io/v1]
+ File: event-events-k8s-io-v1
+- Name: Flow Control APIs
+ Dir: flow_control_apis
+ Topics:
+ - Name: Flow Control APIs
+ File: flow-control-apis-index
+ - Name: FlowSchema [flowcontrol.apiserver.k8s.io/v1beta3]
+ File: flowschema-flowcontrol-apiserver-k8s-io-v1beta3
+ - Name: PriorityLevelConfiguration [flowcontrol.apiserver.k8s.io/v1beta3]
+ File: prioritylevelconfiguration-flowcontrol-apiserver-k8s-io-v1beta3
+- Name: Networking APIs
+ Dir: networking_apis
+ Topics:
+ - Name: Networking APIs
+ File: networking-apis-index
+ - Name: Ingress [networking.k8s.io/v1]
+ File: ingress-networking-k8s-io-v1
+ - Name: IngressClass [networking.k8s.io/v1]
+ File: ingressclass-networking-k8s-io-v1
+ - Name: NetworkPolicy [networking.k8s.io/v1]
+ File: networkpolicy-networking-k8s-io-v1
+- Name: Node APIs
+ Dir: node_apis
+ Topics:
+ - Name: Node APIs
+ File: node-apis-index
+ - Name: RuntimeClass [node.k8s.io/v1]
+ File: runtimeclass-node-k8s-io-v1
+- Name: Policy APIs
+ Dir: policy_apis
+ Topics:
+ - Name: Policy APIs
+ File: policy-apis-index
+ - Name: Eviction [policy/v1]
+ File: eviction-policy-v1
+ - Name: PodDisruptionBudget [policy/v1]
+ File: poddisruptionbudget-policy-v1
+- Name: RBAC APIs
+ Dir: rbac_apis
+ Topics:
+ - Name: RBAC APIs
+ File: rbac-apis-index
+ - Name: ClusterRole [rbac.authorization.k8s.io/v1]
+ File: clusterrole-rbac-authorization-k8s-io-v1
+ - Name: ClusterRoleBinding [rbac.authorization.k8s.io/v1]
+ File: clusterrolebinding-rbac-authorization-k8s-io-v1
+ - Name: Role [rbac.authorization.k8s.io/v1]
+ File: role-rbac-authorization-k8s-io-v1
+ - Name: RoleBinding [rbac.authorization.k8s.io/v1]
+ File: rolebinding-rbac-authorization-k8s-io-v1
- Name: Network APIs
Dir: network_apis
Topics:
+ - Name: Network APIs
+ File: network-apis-index
- Name: Route [route.openshift.io/v1]
File: route-route-openshift-io-v1
+- Name: Scheduling APIs
+ Dir: scheduling_apis
+ Topics:
+ - Name: Scheduling APIs
+ File: scheduling-apis-index
+ - Name: PriorityClass [scheduling.k8s.io/v1]
+ File: priorityclass-scheduling-k8s-io-v1
- Name: Security APIs
Dir: security_apis
Topics:
+ - Name: Security APIs
+ File: security-apis-index
- Name: SecurityContextConstraints [security.openshift.io/v1]
File: securitycontextconstraints-security-openshift-io-v1
+- Name: Security-Internal APIs
+ Dir: security_internal_apis
+ Topics:
+ - Name: Security Internal APIs
+ File: security-internal-apis-index
+ - Name: RangeAllocation [security.internal.openshift.io/v1]
+ File: rangeallocation-security-internal-openshift-io-v1
+- Name: Snapshot APIs
+ Dir: snapshot_apis
+ Topics:
+ - Name: CSI Snapshot APIs
+ File: snapshot-apis-index
+ - Name: VolumeSnapshot [snapshot.storage.k8s.io/v1]
+ File: volumesnapshot-snapshot-storage-k8s-io-v1
+ - Name: VolumeSnapshotClass [snapshot.storage.k8s.io/v1]
+ File: volumesnapshotclass-snapshot-storage-k8s-io-v1
+ - Name: VolumeSnapshotContent [snapshot.storage.k8s.io/v1]
+ File: volumesnapshotcontent-snapshot-storage-k8s-io-v1
+- Name: Storage APIs
+ Dir: storage_apis
+ Topics:
+ - Name: Storage APIs
+ File: storage-apis-index
+ - Name: CSIDriver [storage.k8s.io/v1]
+ File: csidriver-storage-k8s-io-v1
+ - Name: CSINode [storage.k8s.io/v1]
+ File: csinode-storage-k8s-io-v1
+ - Name: CSIStorageCapacity [storage.k8s.io/v1]
+ File: csistoragecapacity-storage-k8s-io-v1
+ - Name: StorageClass [storage.k8s.io/v1]
+ File: storageclass-storage-k8s-io-v1
+ - Name: VolumeAttachment [storage.k8s.io/v1]
+ File: volumeattachment-storage-k8s-io-v1
+- Name: Storage Version Migration APIs
+ Dir: storage_version_migration_apis
+ Topics:
+ - Name: Storage Version Migration APIs
+ File: storage-version-migration-apis-index
+ - Name: StorageVersionMigration [migration.k8s.io/v1alpha1]
+ File: storageversionmigration-migration-k8s-io-v1alpha1
+- Name: TopoLVM APIs
+ Dir: topolvm_apis
+ Topics:
+ - Name: TopoLVM APIs
+ File: topolvm-apis-index
+ - Name: LogicalVolume [topolvm.io/v1]
+ File: logicalvolume-topolvm-io-v1
+- Name: Webhook APIs
+ Dir: webhook_apis
+ Topics:
+ - Name: Webhook APIs
+ File: webhook-apis-index
+ - Name: MutatingWebhookConfiguration [admissionregistration.k8s.io/v1]
+ File: mutatingwebhookconfiguration-admissionregistration-k8s-io-v1
+ - Name: ValidatingWebhookConfiguration [admissionregistration.k8s.io/v1]
+ File: validatingwebhookconfiguration-admissionregistration-k8s-io-v1
---
Name: CLI tools
Dir: microshift_cli_ref
@@ -118,47 +391,82 @@ Topics:
File: microshift-using-config-tools
- Name: Cluster access with kubeconfig
File: microshift-cluster-access-kubeconfig
+- Name: Checking the status of Greenboot health checks
+ File: microshift-greenboot-checking-status
---
Name: Networking
Dir: microshift_networking
Distros: microshift
Topics:
-- Name: Networking settings
- File: microshift-networking
+- Name: About the networking plugin
+ File: microshift-cni
+- Name: Using networking settings
+ File: microshift-networking-settings
- Name: Firewall configuration
File: microshift-firewall
+- Name: Networking settings for fully disconnected hosts
+ File: microshift-disconnected-network-config
---
Name: Storage
Dir: microshift_storage
Distros: microshift
Topics:
-- Name: MicroShift storage overview
+- Name: About storage
File: index
-- Name: Understanding ephemeral storage for MicroShift
+- Name: Understanding ephemeral storage
File: understanding-ephemeral-storage-microshift
-- Name: Generic ephemeral volumes for MicroShift
+- Name: Generic ephemeral volumes
File: generic-ephemeral-volumes-microshift
-- Name: Understanding persistent storage for MicroShift
+- Name: Understanding persistent storage
File: understanding-persistent-storage-microshift
-- Name: Expanding persistent volumes for MicroShift
+- Name: Expanding persistent volumes
File: expanding-persistent-volumes-microshift
- Name: Dynamic storage using the LVMS plugin
File: microshift-storage-plugin-overview
+- Name: Working with volume snapshots
+ File: volume-snapshots-microshift
+- Name: Understanding storage migration
+ File: microshift-storage-migration
---
Name: Running applications
Dir: microshift_running_apps
Distros: microshift
Topics:
-- Name: Application deployment
+- Name: Using Kustomize to deploy applications
File: microshift-applications
-- Name: Operators
+- Name: Embedding applications on RHEL for Edge
+ File: microshift-embedded-apps-on-rhel-edge
+- Name: Embedding applications for offline use
+ File: microshift-embed-apps-offline-use
+- Name: Embedding applications tutorial
+ File: microshift-embedding-apps-tutorial
+- Name: Creating application or workload health check scripts
+ File: microshift-greenboot-workload-scripts
+- Name: Pod security authentication and authorization
+ File: microshift-authentication
+- Name: Using Operators
File: microshift-operators
---
+Name: Backup and restore
+Dir: microshift_backup_and_restore
+Distros: microshift
+Topics:
+- Name: Backing up and restoring data
+ File: microshift-backup-and-restore
+---
Name: Troubleshooting
Dir: microshift_troubleshooting
Distros: microshift
Topics:
- Name: Checking your version
File: microshift-version
+- Name: Troubleshooting backup and restore
+ File: microshift-troubleshoot-backup-restore
+- Name: Troubleshoot the cluster
+ File: microshift-troubleshoot-cluster
+- Name: Troubleshoot updates
+ File: microshift-troubleshoot-updates
+- Name: Checking audit logs
+ File: microshift-audit-logs
- Name: Additional information
File: microshift-things-to-know
diff --git a/_topic_maps/_topic_map_osd.yml b/_topic_maps/_topic_map_osd.yml
index d780de27de5e..4fcdf01dea29 100644
--- a/_topic_maps/_topic_map_osd.yml
+++ b/_topic_maps/_topic_map_osd.yml
@@ -44,20 +44,28 @@ Topics:
Dir: osd_policy
Distros: openshift-dedicated
Topics:
- - Name: OpenShift Dedicated service definition
- File: osd-service-definition
- - Name: Responsibility assignment matrix
- File: policy-responsibility-matrix
- - Name: Understanding process and security for OpenShift Dedicated
- File: policy-process-security
- - Name: About availability for OpenShift Dedicated
- File: policy-understand-availability
- - Name: Update life cycle
- File: osd-life-cycle
-- Name: Support for OpenShift Dedicated
- File: osd-support
+ - Name: OpenShift Dedicated service definition
+ File: osd-service-definition
+ - Name: Responsibility assignment matrix
+ File: policy-responsibility-matrix
+ - Name: Understanding process and security for OpenShift Dedicated
+ File: policy-process-security
+ - Name: SRE and service account access
+ File: osd-sre-access
+ - Name: About availability for OpenShift Dedicated
+ File: policy-understand-availability
+ - Name: Update life cycle
+ File: osd-life-cycle
+# Created a new assembly in ROSA/OSD. In OCP, the assembly is in a book that is not in ROSA/OSD
+- Name: About admission plugins
+ File: osd-admission-plug-ins
Distros: openshift-dedicated
---
+#Name: Tutorials
+#Dir: cloud_experts_tutorials
+#Distros: openshift-dedicated
+#Topics:
+#---
Name: Red Hat OpenShift Cluster Manager
Dir: ocm
Distros: openshift-dedicated
@@ -100,30 +108,239 @@ Topics:
- Name: Deleting an OpenShift Dedicated cluster
File: osd-deleting-a-cluster
---
+Name: Support
+Dir: support
+Distros: openshift-dedicated
+Topics:
+- Name: Support overview
+ File: index
+- Name: Managing your cluster resources
+ File: managing-cluster-resources
+- Name: Getting support
+ File: getting-support
+ Distros: openshift-dedicated
+- Name: Remote health monitoring with connected clusters
+ Dir: remote_health_monitoring
+ Distros: openshift-dedicated
+ Topics:
+ - Name: About remote health monitoring
+ File: about-remote-health-monitoring
+ - Name: Showing data collected by remote health monitoring
+ File: showing-data-collected-by-remote-health-monitoring
+# cannot get resource "secrets" in API group "" in the namespace "openshift-config"
+# - Name: Opting out of remote health reporting
+# File: opting-out-of-remote-health-reporting
+# cannot get resource "secrets" in API group "" in the namespace "openshift-config"
+# - Name: Enabling remote health reporting
+# File: enabling-remote-health-reporting
+ - Name: Using Insights to identify issues with your cluster
+ File: using-insights-to-identify-issues-with-your-cluster
+ - Name: Using Insights Operator
+ File: using-insights-operator
+# Not supported per Michael McNeill
+# - Name: Using remote health reporting in a restricted network
+# File: remote-health-reporting-from-restricted-network
+# cannot list resource "secrets" in API group "" in the namespace "openshift-config"
+# - Name: Importing simple content access entitlements with Insights Operator
+# File: insights-operator-simple-access
+# must-gather not supported for customers, per Dustin Row, cannot create resource "namespaces"
+# - Name: Gathering data about your cluster
+# File: gathering-cluster-data
+# Distros: openshift-dedicated
+- Name: Summarizing cluster specifications
+ File: summarizing-cluster-specifications
+ Distros: openshift-dedicated
+- Name: Troubleshooting
+ Dir: troubleshooting
+ Distros: openshift-dedicated
+ Topics:
+# - Name: Troubleshooting installations
+# File: troubleshooting-installations
+ - Name: Verifying node health
+ File: verifying-node-health
+# cannot create resource "namespaces", cannot patch resource "nodes"
+# - Name: Troubleshooting CRI-O container runtime issues
+# File: troubleshooting-crio-issues
+# requires ostree, butane, and other plug-ins
+# - Name: Troubleshooting operating system issues
+# File: troubleshooting-operating-system-issues
+# Distros: openshift-dedicated
+# cannot patch resource "nodes", "nodes/proxy", "namespaces"
+# - Name: Troubleshooting network issues
+# File: troubleshooting-network-issues
+# Distros: openshift-dedicated
+ - Name: Troubleshooting Operator issues
+ File: troubleshooting-operator-issues
+ - Name: Investigating pod issues
+ File: investigating-pod-issues
+# Hiding from ROSA and OSD until it is decided who should port the Build book
+# - Name: Troubleshooting the Source-to-Image process
+# File: troubleshooting-s2i
+ - Name: Troubleshooting storage issues
+ File: troubleshooting-storage-issues
+# Not supported per WINC team
+# - Name: Troubleshooting Windows container workload issues
+# File: troubleshooting-windows-container-workload-issues
+ - Name: Investigating monitoring issues
+ File: investigating-monitoring-issues
+ - Name: Diagnosing OpenShift CLI (oc) issues
+ File: diagnosing-oc-issues
+ - Name: OpenShift Dedicated managed resources
+ File: osd-managed-resources
+ Distros: openshift-dedicated
+---
+Name: Web console
+Dir: web_console
+Distros: openshift-dedicated
+Topics:
+- Name: Web console overview
+ File: web-console-overview
+- Name: Accessing the web console
+ File: web-console
+- Name: Viewing cluster information
+ File: using-dashboard-to-get-cluster-information
+- Name: Adding user preferences
+ File: adding-user-preferences
+ Distros: openshift-enterprise,openshift-origin
+# cannot patch resource "consoles", insufficient permissions to read any Cluster configuration
+#- Name: Configuring the web console
+# File: configuring-web-console
+# Distros: openshift-rosa
+- Name: Customizing the web console
+ File: customizing-the-web-console
+ Distros: openshift-dedicated
+- Name: Dynamic plugins
+ Dir: dynamic-plugin
+ Distros: openshift-dedicated
+ Topics:
+ - Name: Overview of dynamic plugins
+ File: overview-dynamic-plugin
+ - Name: Getting started with dynamic plugins
+ File: dynamic-plugins-get-started
+ - Name: Deploy your plugin on a cluster
+ File: deploy-plugin-cluster
+ - Name: Dynamic plugin example
+ File: dynamic-plugin-example
+ - Name: Dynamic plugin reference
+ File: dynamic-plugins-reference
+- Name: Web terminal
+ Dir: web_terminal
+ Distros: openshift-dedicated
+ Topics:
+ - Name: Installing the web terminal
+ File: installing-web-terminal
+ # Do not have sufficient permissions to read any cluster configuration.
+ # - Name: Configuring the web terminal
+ # File: configuring-web-terminal
+ - Name: Using the web terminal
+ File: odc-using-web-terminal
+ - Name: Troubleshooting the web terminal
+ File: troubleshooting-web-terminal
+ - Name: Uninstalling the web terminal
+ File: uninstalling-web-terminal
+- Name: Disabling the web console
+ File: disabling-web-console
+ Distros: openshift-dedicated
+- Name: About quick start tutorials
+ File: creating-quick-start-tutorials
+ Distros: openshift-dedicated
+---
+Name: CLI tools
+Dir: cli_reference
+Distros: openshift-dedicated
+Topics:
+- Name: CLI tools overview
+ File: index
+- Name: OpenShift CLI (oc)
+ Dir: openshift_cli
+ Topics:
+ - Name: Getting started with the OpenShift CLI
+ File: getting-started-cli
+ - Name: Configuring the OpenShift CLI
+ File: configuring-cli
+ - Name: Usage of oc and kubectl commands
+ File: usage-oc-kubectl
+ - Name: Managing CLI profiles
+ File: managing-cli-profiles
+ - Name: Extending the OpenShift CLI with plugins
+ File: extending-cli-plugins
+ # - Name: Managing CLI plugins with Krew
+ # File: managing-cli-plugins-krew
+ # Distros: openshift-dedicated
+ - Name: OpenShift CLI developer command reference
+ File: developer-cli-commands
+ - Name: OpenShift CLI administrator command reference
+ File: administrator-cli-commands
+ Distros: openshift-dedicated
+- Name: Developer CLI (odo)
+ File: odo-important-update
+ # Dir: developer_cli_odo
+ Distros: openshift-dedicated
+ # Topics:
+ # - Name: odo release notes
+ # File: odo-release-notes
+ # - Name: Understanding odo
+ # File: understanding-odo
+ # - Name: Installing odo
+ # File: installing-odo
+ # - Name: Configuring the odo CLI
+ # File: configuring-the-odo-cli
+ # - Name: odo CLI reference
+ # File: odo-cli-reference
+- Name: Knative CLI (kn) for use with OpenShift Serverless
+ File: kn-cli-tools
+ Distros: openshift-dedicated
+- Name: Pipelines CLI (tkn)
+ Dir: tkn_cli
+ Distros: openshift-dedicated
+ Topics:
+ - Name: Installing tkn
+ File: installing-tkn
+ - Name: Configuring tkn
+ File: op-configuring-tkn
+ - Name: Basic tkn commands
+ File: op-tkn-reference
+- Name: opm CLI
+ Dir: opm
+ Distros: openshift-dedicated
+ Topics:
+ - Name: Installing the opm CLI
+ File: cli-opm-install
+ - Name: opm CLI reference
+ File: cli-opm-ref
+- Name: Operator SDK
+ Dir: osdk
+ Distros: openshift-dedicated
+ Topics:
+ - Name: Installing the Operator SDK CLI
+ File: cli-osdk-install
+ - Name: Operator SDK CLI reference
+ File: cli-osdk-ref
+---
Name: Cluster administration
Dir: osd_cluster_admin
Distros: openshift-dedicated
Topics:
-- Name: Managing administration roles and users
- File: osd-admin-roles
- Name: Configuring private connections
Dir: osd_private_connections
Distros: openshift-dedicated
Topics:
- - Name: Configuring private connections for AWS
- File: aws-private-connections
- - Name: Configuring a private cluster
- File: private-cluster
+ - Name: Configuring private connections for AWS
+ File: aws-private-connections
+ - Name: Configuring a private cluster
+ File: private-cluster
+- Name: Cluster autoscaling
+ File: osd-cluster-autoscaling
- Name: Nodes
Dir: osd_nodes
Distros: openshift-dedicated
Topics:
- - Name: About machine pools
- File: osd-nodes-machinepools-about
- - Name: Managing compute nodes
- File: osd-managing-worker-nodes
- - Name: About autoscaling nodes on a cluster
- File: osd-nodes-about-autoscaling-nodes
+ - Name: About machine pools
+ File: osd-nodes-machinepools-about
+ - Name: Managing compute nodes
+ File: osd-managing-worker-nodes
+ - Name: About autoscaling nodes on a cluster
+ File: osd-nodes-about-autoscaling-nodes
- Name: Logging
Dir: osd_logging
Distros: openshift-dedicated
@@ -131,19 +348,91 @@ Topics:
- Name: Accessing the service logs
File: osd-accessing-the-service-logs
---
-# Name: Security and compliance
-# Dir: security
-# Distros: openshift-dedicated
-# Topics:
-# - Name: Viewing audit logs
-# File: audit-log-view
-# ---
+Name: Security and compliance
+Dir: security
+Distros: openshift-dedicated
+Topics:
+- Name: Audit logs
+ File: audit-log-view
+---
Name: Authentication and authorization
Dir: authentication
Distros: openshift-dedicated
Topics:
+- Name: Authentication and authorization overview
+ File: index
+- Name: Understanding authentication
+ File: understanding-authentication
+# - Name: Configuring the internal OAuth server
+# File: configuring-internal-oauth
+# - Name: Configuring OAuth clients
+# File: configuring-oauth-clients
+- Name: Managing user-owned OAuth access tokens
+ File: managing-oauth-access-tokens
+# - Name: Understanding identity provider configuration
+# File: understanding-identity-provider
+- Name: Configuring identity providers
+ File: sd-configuring-identity-providers
+# - Name: Configuring identity providers
+# Dir: identity_providers
+# Topics:
+# - Name: Configuring an htpasswd identity provider
+# File: configuring-htpasswd-identity-provider
+# - Name: Configuring a Keystone identity provider
+# File: configuring-keystone-identity-provider
+# - Name: Configuring an LDAP identity provider
+# File: configuring-ldap-identity-provider
+# - Name: Configuring a basic authentication identity provider
+# File: configuring-basic-authentication-identity-provider
+# - Name: Configuring a request header identity provider
+# File: configuring-request-header-identity-provider
+# - Name: Configuring a GitHub or GitHub Enterprise identity provider
+# File: configuring-github-identity-provider
+# - Name: Configuring a GitLab identity provider
+# File: configuring-gitlab-identity-provider
+# - Name: Configuring a Google identity provider
+# File: configuring-google-identity-provider
+# - Name: Configuring an OpenID Connect identity provider
+# File: configuring-oidc-identity-provider
+- Name: Managing administration roles and users
+ File: osd-admin-roles
+- Name: Using RBAC to define and apply permissions
+ File: using-rbac
+# - Name: Removing the kubeadmin user
+# File: remove-kubeadmin
+#- Name: Configuring LDAP failover
+# File: configuring-ldap-failover
+- Name: Understanding and creating service accounts
+ File: understanding-and-creating-service-accounts
+- Name: Using service accounts in applications
+ File: using-service-accounts-in-applications
+- Name: Using a service account as an OAuth client
+ File: using-service-accounts-as-oauth-client
+- Name: Scoping tokens
+ File: tokens-scoping
+- Name: Using bound service account tokens
+ File: bound-service-account-tokens
- Name: Managing security context constraints
File: managing-security-context-constraints
+- Name: Understanding and managing pod security admission
+ File: understanding-and-managing-pod-security-admission
+# - Name: Impersonating the system:admin user
+# File: impersonating-system-admin
+- Name: Syncing LDAP groups
+ File: ldap-syncing
+# - Name: Managing cloud provider credentials
+# Dir: managing_cloud_provider_credentials
+# Topics:
+# - Name: About the Cloud Credential Operator
+# File: about-cloud-credential-operator
+# - Name: Mint mode
+# File: cco-mode-mint
+# - Name: Passthrough mode
+# File: cco-mode-passthrough
+# - Name: Manual mode with long-term credentials for components
+# File: cco-mode-manual
+# - Name: Manual mode with short-term credentials for components
+# File: cco-short-term-creds
---
Name: Upgrading
Dir: upgrading
@@ -168,6 +457,59 @@ Topics:
File: setting-up-trusted-ca
Distros: openshift-dedicated
---
+Name: Images
+Dir: openshift_images
+Distros: openshift-dedicated
+Topics:
+- Name: Overview of images
+ File: index
+# replaced Configuring the Cluster Samples Operator name, cannot configure the operator
+- Name: Overview of the Cluster Samples Operator
+ File: configuring-samples-operator
+ Distros: openshift-dedicated
+- Name: Using the Cluster Samples Operator with an alternate registry
+ File: samples-operator-alt-registry
+ Distros: openshift-dedicated
+- Name: Creating images
+ File: create-images
+- Name: Managing images
+ Dir: managing_images
+ Topics:
+ - Name: Managing images overview
+ File: managing-images-overview
+ - Name: Tagging images
+ File: tagging-images
+ - Name: Image pull policy
+ File: image-pull-policy
+ - Name: Using image pull secrets
+ File: using-image-pull-secrets
+- Name: Managing image streams
+ File: image-streams-manage
+ Distros: openshift-dedicated
+- Name: Using image streams with Kubernetes resources
+ File: using-imagestreams-with-kube-resources
+ Distros: openshift-dedicated
+- Name: Triggering updates on image stream changes
+ File: triggering-updates-on-imagestream-changes
+ Distros: openshift-dedicated
+- Name: Image configuration resources
+ File: image-configuration
+ Distros: openshift-dedicated
+- Name: Using templates
+ File: using-templates
+- Name: Using Ruby on Rails
+ File: templates-using-ruby-on-rails
+- Name: Using images
+ Dir: using_images
+ Distros: openshift-dedicated
+ Topics:
+ - Name: Using images overview
+ File: using-images-overview
+ - Name: Source-to-image
+ File: using-s21-images
+ - Name: Customizing source-to-image images
+ File: customizing-s2i-images
+---
Name: Add-on services
Dir: adding_service_cluster
Distros: openshift-dedicated
@@ -225,8 +567,179 @@ Topics:
File: configuring-registry-operator
- Name: Accessing the registry
File: accessing-the-registry
-- Name: Exposing the registry
- File: securing-exposing-registry
+# - Name: Exposing the registry
+# File: securing-exposing-registry
+---
+Name: Operators
+Dir: operators
+Distros: openshift-dedicated
+Topics:
+- Name: Operators overview
+ File: index
+- Name: Understanding Operators
+ Dir: understanding
+ Topics:
+ - Name: What are Operators?
+ File: olm-what-operators-are
+ - Name: Packaging format
+ File: olm-packaging-format
+ - Name: Common terms
+ File: olm-common-terms
+ - Name: Operator Lifecycle Manager (OLM)
+ Dir: olm
+ Topics:
+ - Name: Concepts and resources
+ File: olm-understanding-olm
+ - Name: Architecture
+ File: olm-arch
+ - Name: Workflow
+ File: olm-workflow
+ - Name: Dependency resolution
+ File: olm-understanding-dependency-resolution
+ - Name: Operator groups
+ File: olm-understanding-operatorgroups
+ - Name: Multitenancy and Operator colocation
+ File: olm-colocation
+ - Name: Operator conditions
+ File: olm-operatorconditions
+ - Name: Metrics
+ File: olm-understanding-metrics
+ - Name: Webhooks
+ File: olm-webhooks
+ - Name: OperatorHub
+ File: olm-understanding-operatorhub
+ - Name: Red Hat-provided Operator catalogs
+ File: olm-rh-catalogs
+ - Name: Operators in multitenant clusters
+ File: olm-multitenancy
+ - Name: CRDs
+ Dir: crds
+ Topics:
+ - Name: Managing resources from CRDs
+ File: crd-managing-resources-from-crds
+- Name: User tasks
+ Dir: user
+ Topics:
+ - Name: Creating applications from installed Operators
+ File: olm-creating-apps-from-installed-operators
+- Name: Administrator tasks
+ Dir: admin
+ Topics:
+ - Name: Adding Operators to a cluster
+ File: olm-adding-operators-to-cluster
+ - Name: Updating installed Operators
+ File: olm-upgrading-operators
+ - Name: Deleting Operators from a cluster
+ File: olm-deleting-operators-from-cluster
+ - Name: Configuring proxy support
+ File: olm-configuring-proxy-support
+ - Name: Viewing Operator status
+ File: olm-status
+ - Name: Managing Operator conditions
+ File: olm-managing-operatorconditions
+ - Name: Managing custom catalogs
+ File: olm-managing-custom-catalogs
+ - Name: Catalog source pod scheduling
+ File: olm-cs-podsched
+# - Name: Managing platform Operators <= Tech Preview
+# File: olm-managing-po
+ - Name: Troubleshooting Operator issues
+ File: olm-troubleshooting-operator-issues
+- Name: Developing Operators
+ Dir: operator_sdk
+ Topics:
+ - Name: About the Operator SDK
+ File: osdk-about
+ - Name: Installing the Operator SDK CLI
+ File: osdk-installing-cli
+ - Name: Go-based Operators
+ Dir: golang
+ Topics:
+# Quick start excluded, because it requires cluster-admin permissions.
+# - Name: Getting started
+# File: osdk-golang-quickstart
+ - Name: Tutorial
+ File: osdk-golang-tutorial
+ - Name: Project layout
+ File: osdk-golang-project-layout
+ - Name: Updating Go-based projects
+ File: osdk-golang-updating-projects
+ - Name: Ansible-based Operators
+ Dir: ansible
+ Topics:
+# Quick start excluded, because it requires cluster-admin permissions.
+# - Name: Getting started
+# File: osdk-ansible-quickstart
+ - Name: Tutorial
+ File: osdk-ansible-tutorial
+ - Name: Project layout
+ File: osdk-ansible-project-layout
+ - Name: Updating Ansible-based projects
+ File: osdk-ansible-updating-projects
+ - Name: Ansible support
+ File: osdk-ansible-support
+ - Name: Kubernetes Collection for Ansible
+ File: osdk-ansible-k8s-collection
+ - Name: Using Ansible inside an Operator
+ File: osdk-ansible-inside-operator
+ - Name: Custom resource status management
+ File: osdk-ansible-cr-status
+ - Name: Helm-based Operators
+ Dir: helm
+ Topics:
+# Quick start excluded, because it requires cluster-admin permissions.
+# - Name: Getting started
+# File: osdk-helm-quickstart
+ - Name: Tutorial
+ File: osdk-helm-tutorial
+ - Name: Project layout
+ File: osdk-helm-project-layout
+ - Name: Updating Helm-based projects
+ File: osdk-helm-updating-projects
+ - Name: Helm support
+ File: osdk-helm-support
+# - Name: Hybrid Helm Operator <= Tech Preview
+# File: osdk-hybrid-helm
+# - Name: Updating Hybrid Helm-based projects <= Tech Preview
+# File: osdk-hybrid-helm-updating-projects
+# - Name: Java-based Operators <= Tech Preview
+# Dir: java
+# Topics:
+# - Name: Getting started
+# File: osdk-java-quickstart
+# - Name: Tutorial
+# File: osdk-java-tutorial
+# - Name: Project layout
+# File: osdk-java-project-layout
+# - Name: Updating Java-based projects
+# File: osdk-java-updating-projects
+ - Name: Defining cluster service versions (CSVs)
+ File: osdk-generating-csvs
+ - Name: Working with bundle images
+ File: osdk-working-bundle-images
+ - Name: Complying with pod security admission
+ File: osdk-complying-with-psa
+ - Name: Validating Operators using the scorecard
+ File: osdk-scorecard
+ - Name: Validating Operator bundles
+ File: osdk-bundle-validate
+ - Name: High-availability or single-node cluster detection and support
+ File: osdk-ha-sno
+ - Name: Configuring built-in monitoring with Prometheus
+ File: osdk-monitoring-prometheus
+ - Name: Configuring leader election
+ File: osdk-leader-election
+ - Name: Object pruning utility
+ File: osdk-pruning-utility
+ - Name: Migrating package manifest projects to bundle format
+ File: osdk-pkgman-to-bundle
+ - Name: Operator SDK CLI reference
+ File: osdk-cli-ref
+ - Name: Migrating to Operator SDK v0.1.0
+ File: osdk-migrating-to-v0-1-0
+# ROSA customers can't configure/edit the cluster Operators
+# - Name: Cluster Operators reference
+# File: operator-reference
---
Name: Networking
Dir: networking
@@ -276,102 +789,383 @@ Topics:
Dir: deployments
Distros: openshift-dedicated
Topics:
- - Name: Custom domains for applications
- File: osd-config-custom-domains-applications
+ - Name: Custom domains for applications
+ File: osd-config-custom-domains-applications
+---
+Name: Nodes
+Dir: nodes
+Distros: openshift-dedicated
+Topics:
+- Name: Overview of nodes
+ File: index
+- Name: Working with pods
+ Dir: pods
+ Topics:
+ - Name: About pods
+ File: nodes-pods-using
+ - Name: Viewing pods
+ File: nodes-pods-viewing
+ - Name: Configuring a cluster for pods
+ File: nodes-pods-configuring
+ Distros: openshift-dedicated
+# Cannot create namespace to install VPA; revisit after Operator book converted
+# - Name: Automatically adjust pod resource levels with the vertical pod autoscaler
+# File: nodes-pods-vertical-autoscaler
+ - Name: Providing sensitive data to pods
+ File: nodes-pods-secrets
+ - Name: Creating and using config maps
+ File: nodes-pods-configmaps
+# Cannot create required "kubeletconfigs"
+# - Name: Using Device Manager to make devices available to nodes
+# File: nodes-pods-plugins
+# Distros: openshift-dedicated
+ - Name: Including pod priority in pod scheduling decisions
+ File: nodes-pods-priority
+ Distros: openshift-dedicated
+ - Name: Placing pods on specific nodes using node selectors
+ File: nodes-pods-node-selectors
+ Distros: openshift-dedicated
+# Cannot create namespace to install Run Once; revisit after Operator book converted
+# - Name: Run Once Duration Override Operator
+# Dir: run_once_duration_override
+# Distros: openshift-dedicated
+# Topics:
+# - Name: Run Once Duration Override Operator overview
+# File: index
+# - Name: Run Once Duration Override Operator release notes
+# File: run-once-duration-override-release-notes
+# - Name: Overriding the active deadline for run-once pods
+# File: run-once-duration-override-install
+# - Name: Uninstalling the Run Once Duration Override Operator
+# File: run-once-duration-override-uninstall
+- Name: Automatically scaling pods with the Custom Metrics Autoscaler Operator
+ Dir: cma
+ Distros: openshift-dedicated
+ Topics:
+ - Name: Custom Metrics Autoscaler Operator overview
+ File: nodes-cma-autoscaling-custom
+ - Name: Custom Metrics Autoscaler Operator release notes
+ File: nodes-cma-autoscaling-custom-rn
+ - Name: Installing the custom metrics autoscaler
+ File: nodes-cma-autoscaling-custom-install
+ - Name: Understanding the custom metrics autoscaler triggers
+ File: nodes-cma-autoscaling-custom-trigger
+ - Name: Understanding the custom metrics autoscaler trigger authentications
+ File: nodes-cma-autoscaling-custom-trigger-auth
+ - Name: Pausing the custom metrics autoscaler
+ File: nodes-cma-autoscaling-custom-pausing
+ - Name: Gathering audit logs
+ File: nodes-cma-autoscaling-custom-audit-log
+ - Name: Gathering debugging data
+ File: nodes-cma-autoscaling-custom-debugging
+ - Name: Viewing Operator metrics
+ File: nodes-cma-autoscaling-custom-metrics
+ - Name: Understanding how to add custom metrics autoscalers
+ File: nodes-cma-autoscaling-custom-adding
+ - Name: Removing the Custom Metrics Autoscaler Operator
+ File: nodes-cma-autoscaling-custom-removing
+- Name: Controlling pod placement onto nodes (scheduling)
+ Dir: scheduling
+ Distros: openshift-dedicated
+ Topics:
+ - Name: About pod placement using the scheduler
+ File: nodes-scheduler-about
+ - Name: Placing pods relative to other pods using pod affinity and anti-affinity rules
+ File: nodes-scheduler-pod-affinity
+ - Name: Controlling pod placement on nodes using node affinity rules
+ File: nodes-scheduler-node-affinity
+ - Name: Placing pods onto overcommited nodes
+ File: nodes-scheduler-overcommit
+ - Name: Controlling pod placement using node taints
+ File: nodes-scheduler-taints-tolerations
+ - Name: Placing pods on specific nodes using node selectors
+ File: nodes-scheduler-node-selectors
+ - Name: Controlling pod placement using pod topology spread constraints
+ File: nodes-scheduler-pod-topology-spread-constraints
+# - Name: Placing a pod on a specific node by name
+# File: nodes-scheduler-node-names
+# - Name: Placing a pod in a specific project
+# File: nodes-scheduler-node-projects
+# - Name: Keeping your cluster balanced using the descheduler
+# File: nodes-scheduler-descheduler
+ - Name: Descheduler
+ Dir: descheduler
+ Topics:
+ - Name: Descheduler overview
+ File: index
+ - Name: Descheduler release notes
+ File: nodes-descheduler-release-notes
+ - Name: Evicting pods using the descheduler
+ File: nodes-descheduler-configuring
+ - Name: Uninstalling the descheduler
+ File: nodes-descheduler-uninstalling
+ - Name: Secondary scheduler
+ Dir: secondary_scheduler
+ Distros: openshift-enterprise
+ Topics:
+ - Name: Secondary scheduler overview
+ File: index
+ - Name: Secondary Scheduler Operator release notes
+ File: nodes-secondary-scheduler-release-notes
+ - Name: Scheduling pods using a secondary scheduler
+ File: nodes-secondary-scheduler-configuring
+ - Name: Uninstalling the Secondary Scheduler Operator
+ File: nodes-secondary-scheduler-uninstalling
+- Name: Using Jobs and DaemonSets
+ Dir: jobs
+ Topics:
+ - Name: Running background tasks on nodes automatically with daemonsets
+ File: nodes-pods-daemonsets
+ Distros: openshift-dedicated
+ - Name: Running tasks in pods using jobs
+ File: nodes-nodes-jobs
+- Name: Working with nodes
+ Dir: nodes
+ Distros: openshift-dedicated
+ Topics:
+ - Name: Viewing and listing the nodes in your cluster
+ File: nodes-nodes-viewing
+# cannot use oc adm cordon; cannot patch resource "machinesets"; cannot patch resource "nodes"
+# - Name: Working with nodes
+# File: nodes-nodes-working
+# cannot create resource "kubeletconfigs", "schedulers", "machineconfigs", "kubeletconfigs"
+# - Name: Managing nodes
+# File: nodes-nodes-managing
+# cannot create resource "kubeletconfigs"
+# - Name: Managing graceful node shutdown
+# File: nodes-nodes-graceful-shutdown
+# cannot create resource "kubeletconfigs"
+# - Name: Managing the maximum number of pods per node
+# File: nodes-nodes-managing-max-pods
+ - Name: Using the Node Tuning Operator
+ File: nodes-node-tuning-operator
+ - Name: Remediating, fencing, and maintaining nodes
+ File: nodes-remediating-fencing-maintaining-rhwa
+# Cannot create namespace needed to oc debug and reboot; revisit after Operator book converted
+# - Name: Understanding node rebooting
+# File: nodes-nodes-rebooting
+# cannot create resource "kubeletconfigs"
+# - Name: Freeing node resources using garbage collection
+# File: nodes-nodes-garbage-collection
+# cannot create resource "kubeletconfigs"
+# - Name: Allocating resources for nodes
+# File: nodes-nodes-resources-configuring
+# cannot create resource "kubeletconfigs"
+# - Name: Allocating specific CPUs for nodes in a cluster
+# File: nodes-nodes-resources-cpus
+# cannot create resource "kubeletconfigs"
+# - Name: Configuring the TLS security profile for the kubelet
+# File: nodes-nodes-tls
+# Distros: openshift-dedicated
+# - Name: Monitoring for problems in your nodes
+# File: nodes-nodes-problem-detector
+ - Name: Machine Config Daemon metrics
+ File: nodes-nodes-machine-config-daemon-metrics
+# cannot patch resource "nodes"
+# - Name: Creating infrastructure nodes
+# File: nodes-nodes-creating-infrastructure-nodes
+- Name: Working with containers
+ Dir: containers
+ Topics:
+ - Name: Understanding containers
+ File: nodes-containers-using
+ - Name: Using Init Containers to perform tasks before a pod is deployed
+ File: nodes-containers-init
+ Distros: openshift-dedicated
+ - Name: Using volumes to persist container data
+ File: nodes-containers-volumes
+ - Name: Mapping volumes using projected volumes
+ File: nodes-containers-projected-volumes
+ - Name: Allowing containers to consume API objects
+ File: nodes-containers-downward-api
+ - Name: Copying files to or from a container
+ File: nodes-containers-copying-files
+ - Name: Executing remote commands in a container
+ File: nodes-containers-remote-commands
+ - Name: Using port forwarding to access applications in a container
+ File: nodes-containers-port-forwarding
+# cannot patch resource "configmaps"
+# - Name: Using sysctls in containers
+# File: nodes-containers-sysctls
+- Name: Working with clusters
+ Dir: clusters
+ Topics:
+ - Name: Viewing system event information in a cluster
+ File: nodes-containers-events
+ - Name: Analyzing cluster resource levels
+ File: nodes-cluster-resource-levels
+ Distros: openshift-dedicated
+ - Name: Setting limit ranges
+ File: nodes-cluster-limit-ranges
+ - Name: Configuring cluster memory to meet container memory and risk requirements
+ File: nodes-cluster-resource-configure
+ Distros: openshift-dedicated
+ - Name: Configuring your cluster to place pods on overcommited nodes
+ File: nodes-cluster-overcommit
+ Distros: openshift-dedicated
+ - Name: Configuring the Linux cgroup version on your nodes
+ File: nodes-cluster-cgroups-2
+ Distros: openshift-enterprise
+ - Name: Configuring the Linux cgroup version on your nodes
+ File: nodes-cluster-cgroups-okd
+ Distros: openshift-origin
+# The TechPreviewNoUpgrade Feature Gate is not allowed
+# - Name: Enabling features using FeatureGates
+# File: nodes-cluster-enabling-features
+# Distros: openshift-rosa
+# Error: nodes.config.openshift.io "cluster" could not be patched
+# - Name: Improving cluster stability in high latency environments using worker latency profiles
+# File: nodes-cluster-worker-latency-profiles
+# Not supported per Michael McNeill
+#- Name: Remote worker nodes on the network edge
+# Dir: edge
+# Topics:
+# - Name: Using remote worker node at the network edge
+# File: nodes-edge-remote-workers
+# Not supported per Michael McNeill
+#- Name: Worker nodes for single-node OpenShift clusters
+# Dir: nodes
+# Topics:
+# - Name: Adding worker nodes to single-node OpenShift clusters
+# File: nodes-sno-worker-nodes
---
Name: Logging
Dir: logging
Distros: openshift-dedicated
Topics:
- Name: Release notes
- File: cluster-logging-release-notes
+ Dir: logging_release_notes
+ Topics:
+ - Name: Logging 5.8
+ File: logging-5-8-release-notes
+ - Name: Logging 5.7
+ File: logging-5-7-release-notes
+- Name: Support
+ File: cluster-logging-support
+- Name: Troubleshooting logging
+ Dir: troubleshooting
+ Topics:
+ - Name: Viewing Logging status
+ File: cluster-logging-cluster-status
+ - Name: Troubleshooting log forwarding
+ File: log-forwarding-troubleshooting
+ - Name: Troubleshooting logging alerts
+ File: troubleshooting-logging-alerts
+ - Name: Viewing the status of the Elasticsearch log store
+ File: cluster-logging-log-store-status
- Name: About Logging
File: cluster-logging
- Name: Installing Logging
File: cluster-logging-deploying
+- Name: Updating Logging
+ File: cluster-logging-upgrading
+- Name: Visualizing logs
+ Dir: log_visualization
+ Topics:
+ - Name: About log visualization
+ File: log-visualization
+ - Name: Log visualization with the web console
+ File: log-visualization-ocp-console
+ - Name: Viewing cluster dashboards
+ File: cluster-logging-dashboards
+ - Name: Log visualization with Kibana
+ File: logging-kibana
- Name: Accessing the service logs
File: sd-accessing-the-service-logs
- Name: Configuring your Logging deployment
Dir: config
Topics:
- - Name: About the Cluster Logging custom resource
- File: cluster-logging-configuring-cr
- - Name: Configuring the logging collector
- File: cluster-logging-collector
- - Name: Configuring the log store
- File: cluster-logging-log-store
- - Name: Configuring the log visualizer
- File: cluster-logging-visualizer
- - Name: Configuring Logging storage
- File: cluster-logging-storage-considerations
- Name: Configuring CPU and memory limits for Logging components
File: cluster-logging-memory
- - Name: Using tolerations to control Logging pod placement
- File: cluster-logging-tolerations
- - Name: Moving the Logging resources with node selectors
- File: cluster-logging-moving-nodes
#- Name: Configuring systemd-journald and Fluentd
# File: cluster-logging-systemd
- - Name: Maintenance and support
- File: cluster-logging-maintenance-support
-- Name: Logging with the LokiStack
- File: cluster-logging-loki
-- Name: Viewing logs for a specific resource
- File: viewing-resource-logs
-- Name: Viewing cluster logs in Kibana
- File: cluster-logging-visualizer
- Distros: openshift-dedicated
-- Name: Forwarding logs to third party systems
- File: cluster-logging-external
-- Name: Enabling JSON logging
- File: cluster-logging-enabling-json-logging
-- Name: Collecting and storing Kubernetes events
- File: cluster-logging-eventrouter
-# - Name: Forwarding logs using ConfigMaps
-# File: cluster-logging-external-configmap
-# Distros: openshift-dedicated
-- Name: Updating Logging
- File: cluster-logging-upgrading
-- Name: Viewing cluster dashboards
- File: cluster-logging-dashboards
-- Name: Troubleshooting Logging
- Dir: troubleshooting
+- Name: Log collection and forwarding
+ Dir: log_collection_forwarding
Topics:
- - Name: Viewing Logging status
- File: cluster-logging-cluster-status
- - Name: Viewing the status of the log store
- File: cluster-logging-log-store-status
- - Name: Understanding Logging alerts
- File: cluster-logging-alerts
- - Name: Collecting logging data for Red Hat Support
- File: cluster-logging-must-gather
- - Name: Troubleshooting for Critical Alerts
- File: cluster-logging-troubleshooting-for-critical-alerts
+ - Name: About log collection and forwarding
+ File: log-forwarding
+ - Name: Log output types
+ File: logging-output-types
+ - Name: Enabling JSON log forwarding
+ File: cluster-logging-enabling-json-logging
+ - Name: Configuring log forwarding
+ File: configuring-log-forwarding
+ - Name: Configuring the logging collector
+ File: cluster-logging-collector
+ - Name: Collecting and storing Kubernetes events
+ File: cluster-logging-eventrouter
+- Name: Log storage
+ Dir: log_storage
+ Topics:
+ - Name: About log storage
+ File: about-log-storage
+ - Name: Installing log storage
+ File: installing-log-storage
+ - Name: Configuring the LokiStack log store
+ File: cluster-logging-loki
+ - Name: Configuring the Elasticsearch log store
+ File: logging-config-es-store
+- Name: Logging alerts
+ Dir: logging_alerts
+ Topics:
+ - Name: Default logging alerts
+ File: default-logging-alerts
+ - Name: Custom logging alerts
+ File: custom-logging-alerts
+- Name: Performance and reliability tuning
+ Dir: performance_reliability
+ Topics:
+ - Name: Flow control mechanisms
+ File: logging-flow-control-mechanisms
+- Name: Scheduling resources
+ Dir: scheduling_resources
+ Topics:
+ - Name: Using node selectors to move logging resources
+ File: logging-node-selectors
+ - Name: Using tolerations to control logging pod placement
+ File: logging-taints-tolerations
- Name: Uninstalling Logging
File: cluster-logging-uninstall
- Name: Exported fields
File: cluster-logging-exported-fields
+- Name: API reference
+ Dir: api_reference
+ Topics:
+# - Name: 5.8 Logging API reference
+# File: logging-5-8-reference
+# - Name: 5.7 Logging API reference
+# File: logging-5-7-reference
+ - Name: 5.6 Logging API reference
+ File: logging-5-6-reference
+- Name: Glossary
+ File: logging-common-terms
---
-Name: Monitoring user-defined projects
+Name: Monitoring
Dir: monitoring
Distros: openshift-dedicated
Topics:
-- Name: Understanding the monitoring stack
- File: osd-understanding-the-monitoring-stack
+- Name: Monitoring overview
+ File: monitoring-overview
- Name: Accessing monitoring for user-defined projects
- File: osd-accessing-monitoring-for-user-defined-projects
+ File: sd-accessing-monitoring-for-user-defined-projects
- Name: Configuring the monitoring stack
- File: osd-configuring-the-monitoring-stack
+ File: configuring-the-monitoring-stack
+- Name: Disabling monitoring for user-defined projects
+ File: sd-disabling-monitoring-for-user-defined-projects
- Name: Enabling alert routing for user-defined projects
- File: osd-enabling-alert-routing-for-user-defined-projects
+ File: enabling-alert-routing-for-user-defined-projects
- Name: Managing metrics
- File: osd-managing-metrics
+ File: managing-metrics
- Name: Managing alerts
File: managing-alerts
- Name: Reviewing monitoring dashboards
- File: osd-reviewing-monitoring-dashboards
+ File: reviewing-monitoring-dashboards
+- Name: Accessing third-party monitoring APIs
+ File: accessing-third-party-monitoring-apis
- Name: Troubleshooting monitoring issues
- File: osd-troubleshooting-monitoring-issues
+ File: troubleshooting-monitoring-issues
+- Name: Config map reference for the Cluster Monitoring Operator
+ File: config-map-reference-for-the-cluster-monitoring-operator
---
Name: Serverless
Dir: serverless
@@ -382,24 +1176,3 @@ Topics:
Topics:
- Name: Serverless overview
File: about-serverless
----
-Name: Troubleshooting
-Dir: sd_support
-Distros: openshift-dedicated
-Topics:
-- Name: Remote health monitoring with connected clusters
- Dir: remote_health_monitoring
- Distros: openshift-dedicated
- Topics:
- - Name: About remote health monitoring
- File: about-remote-health-monitoring
- - Name: Showing data collected by remote health monitoring
- File: showing-data-collected-by-remote-health-monitoring
- - Name: Using Insights to identify issues with your cluster
- File: using-insights-to-identify-issues-with-your-cluster
-- Name: Summarizing cluster specifications
- File: osd-summarizing-cluster-specifications
- Distros: openshift-dedicated
-- Name: OpenShift Dedicated managed resources
- File: osd-managed-resources
- Distros: openshift-dedicated
diff --git a/_topic_maps/_topic_map_rosa.yml b/_topic_maps/_topic_map_rosa.yml
index 39aa73cfd331..9120756b6281 100644
--- a/_topic_maps/_topic_map_rosa.yml
+++ b/_topic_maps/_topic_map_rosa.yml
@@ -36,7 +36,7 @@ Name: What's new
Dir: rosa_release_notes
Distros: openshift-rosa
Topics:
-- Name: What's new with ROSA
+- Name: What's new with Red Hat OpenShift Service on AWS
File: rosa-release-notes
---
Name: Introduction to ROSA
@@ -65,17 +65,111 @@ Topics:
File: rosa-service-definition
- Name: ROSA update life cycle
File: rosa-life-cycle
- - Name: Understanding process and security for ROSA
+ - Name: ROSA with HCP service definition
+ File: rosa-hcp-service-definition
+ - Name: ROSA with HCP update life cycle
+ File: rosa-hcp-life-cycle
+ - Name: Understanding security for ROSA
File: rosa-policy-process-security
+ - Name: SRE and service account access
+ File: rosa-sre-access
+# Created a new assembly in ROSA/OSD. In OCP, the assembly is in a book that is not in ROSA/OSD
+- Name: About admission plugins
+ File: rosa-admission-plug-ins
+ Distros: openshift-rosa
- Name: About IAM resources for ROSA with STS
File: rosa-sts-about-iam-resources
- Name: OpenID Connect Overview
File: rosa-oidc-overview
-- Name: Support for ROSA
- File: rosa-getting-support
# - Name: Training for ROSA
# File: rosa-training
---
+Name: Tutorials
+Dir: cloud_experts_tutorials
+Distros: openshift-rosa
+Topics:
+- Name: Tutorials overview
+ File: index
+#- Name: ROSA prerequisites
+# File: rosa-mobb-prerequisites-tutorial
+- Name: Verifying Permissions for a ROSA STS Deployment
+ File: rosa-mobb-verify-permissions-sts-deployment
+- Name: Configuring log forwarding for CloudWatch logs and STS
+ File: cloud-experts-rosa-cloudwatch-sts
+- Name: Using AWS WAF and Amazon CloudFront to protect ROSA workloads
+ File: cloud-experts-using-cloudfront-and-waf
+- Name: Using AWS WAF and AWS ALBs to protect ROSA workloads
+ File: cloud-experts-using-alb-and-waf
+- Name: Deploying OpenShift API for Data Protection on a ROSA cluster
+ File: cloud-experts-deploy-api-data-protection
+- Name: AWS Load Balancer Operator on ROSA
+ File: cloud-experts-aws-load-balancer-operator
+- Name: Configuring ROSA/OSD to use custom TLS ciphers on the ingress controllers
+ File: cloud-experts-configure-custom-tls-ciphers
+- Name: Configuring Microsoft Entra ID (formerly Azure Active Directory) as an identity provider
+ File: cloud-experts-entra-id-idp
+- Name: Using AWS Secrets Manager CSI on ROSA with STS
+ File: cloud-experts-aws-secret-manager
+- Name: Using AWS Controllers for Kubernetes on ROSA
+ File: cloud-experts-using-aws-ack
+- Name: Deploying the External DNS Operator on ROSA
+ File: cloud-experts-external-dns
+- Name: Dynamically issuing certificates using the cert-manager Operator on ROSA
+ File: cloud-experts-dynamic-certificate-custom-domain
+- Name: Changing the Console, OAuth, and Downloads domains and TLS certificate
+ File: cloud-experts-rosa-osd-change-default-domain
+- Name: Assigning consistent egress IP for external traffic
+ File: cloud-experts-consistent-egress-ip
+- Name: Getting started with ROSA
+ Dir: cloud-experts-getting-started
+ Distros: openshift-rosa
+ Topics:
+ - Name: What is ROSA
+ File: cloud-experts-getting-started-what-is-rosa
+ - Name: Deploying a cluster
+ Dir: cloud-experts-getting-started-deploying
+ Topics:
+ - Name: Choosing a deployment method
+ File: cloud-experts-getting-started-choose-deployment-method
+ - Name: Simple CLI guide
+ File: cloud-experts-getting-started-simple-cli-guide
+ - Name: Detailed CLI guide
+ File: cloud-experts-getting-started-detailed-cli-guide
+ - Name: Hosted Control Planes guide
+ File: cloud-experts-getting-started-hcp
+ - Name: Simple UI guide
+ File: cloud-experts-getting-started-simple-ui-guide
+ - Name: Detailed UI guide
+ File: cloud-experts-getting-started-detailed-ui
+ - Name: Creating an admin user
+ File: cloud-experts-getting-started-admin
+ - Name: Setting up an identity provider
+ File: cloud-experts-getting-started-idp
+ - Name: Granting admin rights
+ File: cloud-experts-getting-started-admin-rights
+ - Name: Accessing your cluster
+ File: cloud-experts-getting-started-accessing
+ - Name: Managing worker nodes
+ File: cloud-experts-getting-started-managing-worker-nodes
+ - Name: Autoscaling
+ File: cloud-experts-getting-started-autoscaling
+ - Name: Upgrading your cluster
+ File: cloud-experts-getting-started-upgrading
+ - Name: Deleting your cluster
+ File: cloud-experts-getting-started-deleting
+ - Name: Obtaining support
+ File: cloud-experts-getting-started-support
+- Name: Deploying an application
+ Dir: cloud-experts-deploying-application
+ Distros: openshift-rosa
+ Topics:
+ - Name: Introduction
+ File: cloud-experts-deploying-application-intro
+ - Name: Prerequisites
+ File: cloud-experts-deploying-application-prerequisites
+ - Name: Lab Overview
+ File: cloud-experts-deploying-application-lab-overview
+---
Name: Getting started
Dir: rosa_getting_started
Distros: openshift-rosa
@@ -91,7 +185,9 @@ Name: Prepare your environment
Dir: rosa_planning
Distros: openshift-rosa
Topics:
-- Name: AWS prerequisites for ROSA with STS
+- Name: Prerequisites checklist for deploying ROSA using STS
+ File: rosa-cloud-expert-prereq-checklist
+- Name: Detailed requirements for deploying ROSA using STS
File: rosa-sts-aws-prereqs
- Name: ROSA IAM role resources
File: rosa-sts-ocm-role
@@ -103,6 +199,8 @@ Topics:
File: rosa-sts-required-aws-service-quotas
- Name: Setting up your environment
File: rosa-sts-setting-up-environment
+- Name: Preparing Terraform to install ROSA clusters
+ File: rosa-understanding-terraform
---
Name: Install ROSA with HCP clusters
Dir: rosa_hcp
@@ -110,6 +208,8 @@ Distros: openshift-rosa
Topics:
- Name: Creating ROSA with HCP clusters using the default options
File: rosa-hcp-sts-creating-a-cluster-quickly
+- Name: Creating ROSA with HCP clusters using a custom AWS KMS encryption key
+ File: rosa-hcp-creating-cluster-with-aws-kms-key
- Name: Using the Node Tuning Operator on ROSA with HCP
File: rosa-tuning-config
---
@@ -121,10 +221,20 @@ Topics:
File: rosa-sts-creating-a-cluster-quickly
- Name: Creating a ROSA cluster with STS using customizations
File: rosa-sts-creating-a-cluster-with-customizations
+- Name: Creating a ROSA cluster with STS using Terraform
+ Dir: terraform
+ Distros: openshift-rosa
+ Topics:
+ - Name: Creating a default ROSA Classic cluster using Terraform
+ File: rosa-sts-creating-a-cluster-quickly-terraform
+# - Name: Customizing a ROSA cluster with Terraform
+# File: rosa-sts-creating-a-cluster-with-customizations-terraform
- Name: Interactive cluster creation mode reference
File: rosa-sts-interactive-mode-reference
- Name: Creating an AWS PrivateLink cluster on ROSA
File: rosa-aws-privatelink-creating-cluster
+- Name: Configuring a shared virtual private cloud for ROSA clusters
+ File: rosa-shared-vpc-config
- Name: Accessing a ROSA cluster
File: rosa-sts-accessing-cluster
- Name: Configuring identity providers using Red Hat OpenShift Cluster Manager
@@ -164,20 +274,242 @@ Topics:
- Name: Command quick reference for creating clusters and users
File: rosa-quickstart
---
-Name: ROSA CLI
-Dir: rosa_cli
+Name: Support
+Dir: support
Distros: openshift-rosa
Topics:
-# - Name: CLI and web console
-# File: rosa-cli-openshift-console
-- Name: Getting started with the ROSA CLI
- File: rosa-get-started-cli
-- Name: Managing objects with the ROSA CLI
- File: rosa-manage-objects-cli
-- Name: Checking account and version information with the ROSA CLI
- File: rosa-checking-acct-version-cli
-- Name: Checking logs with the ROSA CLI
- File: rosa-checking-logs-cli
+- Name: Support overview
+ File: index
+- Name: Managing your cluster resources
+ File: managing-cluster-resources
+- Name: Getting support
+ File: getting-support
+ Distros: openshift-rosa
+- Name: Remote health monitoring with connected clusters
+ Dir: remote_health_monitoring
+ Distros: openshift-rosa
+ Topics:
+ - Name: About remote health monitoring
+ File: about-remote-health-monitoring
+ - Name: Showing data collected by remote health monitoring
+ File: showing-data-collected-by-remote-health-monitoring
+# cannot get resource "secrets" in API group "" in the namespace "openshift-config"
+# - Name: Opting out of remote health reporting
+# File: opting-out-of-remote-health-reporting
+# cannot get resource "secrets" in API group "" in the namespace "openshift-config"
+# - Name: Enabling remote health reporting
+# File: enabling-remote-health-reporting
+ - Name: Using Insights to identify issues with your cluster
+ File: using-insights-to-identify-issues-with-your-cluster
+ - Name: Using Insights Operator
+ File: using-insights-operator
+# Not supported per Michael McNeill
+# - Name: Using remote health reporting in a restricted network
+# File: remote-health-reporting-from-restricted-network
+# cannot list resource "secrets" in API group "" in the namespace "openshift-config"
+# - Name: Importing simple content access entitlements with Insights Operator
+# File: insights-operator-simple-access
+# must-gather not supported for customers, per Dustin Row, cannot create resource "namespaces"
+# - Name: Gathering data about your cluster
+# File: gathering-cluster-data
+# Distros: openshift-rosa
+- Name: Summarizing cluster specifications
+ File: summarizing-cluster-specifications
+ Distros: openshift-rosa
+- Name: Troubleshooting
+ Dir: troubleshooting
+ Distros: openshift-rosa
+ Topics:
+# rosa has own troubleshooting installations
+# - Name: Troubleshooting installations
+# File: troubleshooting-installations
+ - Name: Troubleshooting ROSA installations
+ File: rosa-troubleshooting-installations
+ - Name: Troubleshooting networking
+ File: rosa-troubleshooting-networking
+ - Name: Verifying node health
+ File: verifying-node-health
+# cannot create resource "namespaces", cannot patch resource "nodes"
+# - Name: Troubleshooting CRI-O container runtime issues
+# File: troubleshooting-crio-issues
+# requires ostree, butane, and other plug-ins
+# - Name: Troubleshooting operating system issues
+# File: troubleshooting-operating-system-issues
+# Distros: openshift-rosa
+# cannot patch resource "nodes", "nodes/proxy", "namespaces"
+# - Name: Troubleshooting network issues
+# File: troubleshooting-network-issues
+# Distros: openshift-rosa
+ - Name: Troubleshooting Operator issues
+ File: troubleshooting-operator-issues
+ - Name: Investigating pod issues
+ File: investigating-pod-issues
+# Hiding from ROSA and OSD until it is decided who should port the Build book
+# - Name: Troubleshooting the Source-to-Image process
+# File: troubleshooting-s2i
+ - Name: Troubleshooting storage issues
+ File: troubleshooting-storage-issues
+# Not supported per WINC team
+# - Name: Troubleshooting Windows container workload issues
+# File: troubleshooting-windows-container-workload-issues
+ - Name: Investigating monitoring issues
+ File: investigating-monitoring-issues
+ - Name: Diagnosing OpenShift CLI (oc) issues
+ File: diagnosing-oc-issues
+ - Name: Troubleshooting expired offline access tokens
+ File: rosa-troubleshooting-expired-tokens
+ Distros: openshift-rosa
+ - Name: Troubleshooting IAM roles
+ File: rosa-troubleshooting-iam-resources
+ Distros: openshift-rosa
+ - Name: Troubleshooting cluster deployments
+ File: rosa-troubleshooting-deployments
+ Distros: openshift-rosa
+ - Name: Red Hat OpenShift Service on AWS managed resources
+ File: rosa-managed-resources
+ Distros: openshift-rosa
+---
+Name: Web console
+Dir: web_console
+Distros: openshift-rosa
+Topics:
+- Name: Web console overview
+ File: web-console-overview
+- Name: Accessing the web console
+ File: web-console
+- Name: Viewing cluster information
+ File: using-dashboard-to-get-cluster-information
+- Name: Adding user preferences
+ File: adding-user-preferences
+ Distros: openshift-enterprise,openshift-origin
+# cannot patch resource "consoles", insufficient permissions to read any Cluster configuration
+#- Name: Configuring the web console
+# File: configuring-web-console
+# Distros: openshift-rosa
+- Name: Customizing the web console
+ File: customizing-the-web-console
+ Distros: openshift-rosa
+- Name: Dynamic plugins
+ Dir: dynamic-plugin
+ Distros: openshift-rosa
+ Topics:
+ - Name: Overview of dynamic plugins
+ File: overview-dynamic-plugin
+ - Name: Getting started with dynamic plugins
+ File: dynamic-plugins-get-started
+ - Name: Deploy your plugin on a cluster
+ File: deploy-plugin-cluster
+ - Name: Dynamic plugin example
+ File: dynamic-plugin-example
+ - Name: Dynamic plugin reference
+ File: dynamic-plugins-reference
+- Name: Web terminal
+ Dir: web_terminal
+ Distros: openshift-rosa
+ Topics:
+ - Name: Installing the web terminal
+ File: installing-web-terminal
+# Do not have sufficient permissions to read any cluster configuration.
+# - Name: Configuring the web terminal
+# File: configuring-web-terminal
+ - Name: Using the web terminal
+ File: odc-using-web-terminal
+ - Name: Troubleshooting the web terminal
+ File: troubleshooting-web-terminal
+ - Name: Uninstalling the web terminal
+ File: uninstalling-web-terminal
+- Name: Disabling the web console
+ File: disabling-web-console
+ Distros: openshift-rosa
+- Name: About quick start tutorials
+ File: creating-quick-start-tutorials
+ Distros: openshift-rosa
+---
+Name: CLI tools
+Dir: cli_reference
+Distros: openshift-rosa
+Topics:
+- Name: CLI tools overview
+ File: index
+- Name: OpenShift CLI (oc)
+ Dir: openshift_cli
+ Topics:
+ - Name: Getting started with the OpenShift CLI
+ File: getting-started-cli
+ - Name: Configuring the OpenShift CLI
+ File: configuring-cli
+ - Name: Usage of oc and kubectl commands
+ File: usage-oc-kubectl
+ - Name: Managing CLI profiles
+ File: managing-cli-profiles
+ - Name: Extending the OpenShift CLI with plugins
+ File: extending-cli-plugins
+ # - Name: Managing CLI plugins with Krew
+ # File: managing-cli-plugins-krew
+ # Distros: openshift-rosa
+ - Name: OpenShift CLI developer command reference
+ File: developer-cli-commands
+ - Name: OpenShift CLI administrator command reference
+ File: administrator-cli-commands
+ Distros: openshift-rosa
+- Name: Developer CLI (odo)
+ File: odo-important-update
+ # Dir: developer_cli_odo
+ Distros: openshift-rosa
+ # Topics:
+ # - Name: odo release notes
+ # File: odo-release-notes
+ # - Name: Understanding odo
+ # File: understanding-odo
+ # - Name: Installing odo
+ # File: installing-odo
+ # - Name: Configuring the odo CLI
+ # File: configuring-the-odo-cli
+ # - Name: odo CLI reference
+ # File: odo-cli-reference
+- Name: Knative CLI (kn) for use with OpenShift Serverless
+ File: kn-cli-tools
+ Distros: openshift-rosa
+- Name: Pipelines CLI (tkn)
+ Dir: tkn_cli
+ Distros: openshift-rosa
+ Topics:
+ - Name: Installing tkn
+ File: installing-tkn
+ - Name: Configuring tkn
+ File: op-configuring-tkn
+ - Name: Basic tkn commands
+ File: op-tkn-reference
+- Name: opm CLI
+ Dir: opm
+ Distros: openshift-rosa
+ Topics:
+ - Name: Installing the opm CLI
+ File: cli-opm-install
+ - Name: opm CLI reference
+ File: cli-opm-ref
+- Name: Operator SDK
+ Dir: osdk
+ Distros: openshift-rosa
+ Topics:
+ - Name: Installing the Operator SDK CLI
+ File: cli-osdk-install
+ - Name: Operator SDK CLI reference
+ File: cli-osdk-ref
+- Name: ROSA CLI
+ Dir: rosa_cli
+ Distros: openshift-rosa
+ Topics:
+ # - Name: CLI and web console
+ # File: rosa-cli-openshift-console
+ - Name: Getting started with the ROSA CLI
+ File: rosa-get-started-cli
+ - Name: Managing objects with the ROSA CLI
+ File: rosa-manage-objects-cli
+ - Name: Checking account and version information with the ROSA CLI
+ File: rosa-checking-acct-version-cli
+ - Name: Checking logs with the ROSA CLI
+ File: rosa-checking-logs-cli
---
Name: Red Hat OpenShift Cluster Manager
Dir: ocm
@@ -212,7 +544,9 @@ Topics:
File: dedicated-aws-vpn
- Name: Configuring AWS Direct Connect
File: dedicated-aws-dc
-- Name: Nodes
+- Name: Cluster autoscaling
+ File: rosa-cluster-autoscaling
+- Name: Manage nodes using machine pools
Dir: rosa_nodes
Distros: openshift-rosa
Topics:
@@ -225,26 +559,102 @@ Topics:
Distros: openshift-rosa
- Name: About autoscaling nodes on a cluster
File: rosa-nodes-about-autoscaling-nodes
+ - Name: Configuring cluster memory to meet container memory and risk requirements
+ File: nodes-cluster-resource-configure
+- Name: Configuring PID limits
+ File: rosa-configuring-pid-limits
+---
+Name: Security and compliance
+Dir: security
+Distros: openshift-rosa
+Topics:
+- Name: Audit logs
+ File: audit-log-view
+- Name: Adding additional constraints for IP-based AWS role assumption
+ File: rosa-adding-additional-constraints-for-ip-based-aws-role-assumption
+#- Name: Security
+# File: rosa-security
+#- Name: Application and cluster compliance
+# File: rosa-app-security-compliance
---
-# Name: Security and compliance
-# Dir: security
-# Distros: openshift-rosa
-# Topics:
-# - Name: Viewing audit logs
-# File: audit-log-view
-# # - Name: Security
-# # File: rosa-security
-# # - Name: Application and cluster compliance
-# # File: rosa-app-security-compliance
-# ---
Name: Authentication and authorization
Dir: authentication
Distros: openshift-rosa
Topics:
+- Name: Authentication and authorization overview
+ File: index
+- Name: Understanding authentication
+ File: understanding-authentication
+# - Name: Configuring the internal OAuth server
+# File: configuring-internal-oauth
+# - Name: Configuring OAuth clients
+# File: configuring-oauth-clients
+- Name: Managing user-owned OAuth access tokens
+ File: managing-oauth-access-tokens
+# - Name: Understanding identity provider configuration
+# File: understanding-identity-provider
+- Name: Configuring identity providers
+ File: sd-configuring-identity-providers
+# - Name: Configuring identity providers
+# Dir: identity_providers
+# Topics:
+# - Name: Configuring an htpasswd identity provider
+# File: configuring-htpasswd-identity-provider
+# - Name: Configuring a Keystone identity provider
+# File: configuring-keystone-identity-provider
+# - Name: Configuring an LDAP identity provider
+# File: configuring-ldap-identity-provider
+# - Name: Configuring a basic authentication identity provider
+# File: configuring-basic-authentication-identity-provider
+# - Name: Configuring a request header identity provider
+# File: configuring-request-header-identity-provider
+# - Name: Configuring a GitHub or GitHub Enterprise identity provider
+# File: configuring-github-identity-provider
+# - Name: Configuring a GitLab identity provider
+# File: configuring-gitlab-identity-provider
+# - Name: Configuring a Google identity provider
+# File: configuring-google-identity-provider
+# - Name: Configuring an OpenID Connect identity provider
+# File: configuring-oidc-identity-provider
+- Name: Using RBAC to define and apply permissions
+ File: using-rbac
+# - Name: Removing the kubeadmin user
+# File: remove-kubeadmin
+#- Name: Configuring LDAP failover
+# File: configuring-ldap-failover
+- Name: Understanding and creating service accounts
+ File: understanding-and-creating-service-accounts
+- Name: Using service accounts in applications
+ File: using-service-accounts-in-applications
+- Name: Using a service account as an OAuth client
+ File: using-service-accounts-as-oauth-client
- Name: Assuming an AWS IAM role for a service account
File: assuming-an-aws-iam-role-for-a-service-account
+- Name: Scoping tokens
+ File: tokens-scoping
+- Name: Using bound service account tokens
+ File: bound-service-account-tokens
- Name: Managing security context constraints
File: managing-security-context-constraints
+- Name: Understanding and managing pod security admission
+ File: understanding-and-managing-pod-security-admission
+# - Name: Impersonating the system:admin user
+# File: impersonating-system-admin
+- Name: Syncing LDAP groups
+ File: ldap-syncing
+# - Name: Managing cloud provider credentials
+# Dir: managing_cloud_provider_credentials
+# Topics:
+# - Name: About the Cloud Credential Operator
+# File: about-cloud-credential-operator
+# - Name: Mint mode
+# File: cco-mode-mint
+# - Name: Passthrough mode
+# File: cco-mode-passthrough
+# - Name: Manual mode with long-term credentials for components
+# File: cco-mode-manual
+# - Name: Manual mode with short-term credentials for components
+# File: cco-short-term-creds
---
Name: Upgrading
Dir: upgrading
@@ -272,14 +682,67 @@ Topics:
File: setting-up-trusted-ca
Distros: openshift-rosa
---
- Name: Add-on services
- Dir: adding_service_cluster
+Name: Images
+Dir: openshift_images
+Distros: openshift-rosa
+Topics:
+- Name: Overview of images
+ File: index
+# replaced Configuring the Cluster Samples Operator name, cannot configure the operator
+- Name: Overview of the Cluster Samples Operator
+ File: configuring-samples-operator
Distros: openshift-rosa
+- Name: Using the Cluster Samples Operator with an alternate registry
+ File: samples-operator-alt-registry
+ Distros: openshift-rosa
+- Name: Creating images
+ File: create-images
+- Name: Managing images
+ Dir: managing_images
Topics:
- - Name: Adding services to a cluster
- File: adding-service
- - Name: Available services
- File: rosa-available-services
+ - Name: Managing images overview
+ File: managing-images-overview
+ - Name: Tagging images
+ File: tagging-images
+ - Name: Image pull policy
+ File: image-pull-policy
+ - Name: Using image pull secrets
+ File: using-image-pull-secrets
+- Name: Managing image streams
+ File: image-streams-manage
+ Distros: openshift-rosa
+- Name: Using image streams with Kubernetes resources
+ File: using-imagestreams-with-kube-resources
+ Distros: openshift-rosa
+- Name: Triggering updates on image stream changes
+ File: triggering-updates-on-imagestream-changes
+ Distros: openshift-rosa
+- Name: Image configuration resources
+ File: image-configuration
+ Distros: openshift-rosa
+- Name: Using templates
+ File: using-templates
+- Name: Using Ruby on Rails
+ File: templates-using-ruby-on-rails
+- Name: Using images
+ Dir: using_images
+ Distros: openshift-rosa
+ Topics:
+ - Name: Using images overview
+ File: using-images-overview
+ - Name: Source-to-image
+ File: using-s21-images
+ - Name: Customizing source-to-image images
+ File: customizing-s2i-images
+---
+Name: Add-on services
+Dir: adding_service_cluster
+Distros: openshift-rosa
+Topics:
+- Name: Adding services to a cluster
+ File: adding-service
+- Name: Available services
+ File: rosa-available-services
---
Name: Storage
Dir: storage
@@ -322,8 +785,179 @@ Topics:
File: configuring-registry-operator
- Name: Accessing the registry
File: accessing-the-registry
-- Name: Exposing the registry
- File: securing-exposing-registry
+# - Name: Exposing the registry
+# File: securing-exposing-registry
+---
+Name: Operators
+Dir: operators
+Distros: openshift-rosa
+Topics:
+- Name: Operators overview
+ File: index
+- Name: Understanding Operators
+ Dir: understanding
+ Topics:
+ - Name: What are Operators?
+ File: olm-what-operators-are
+ - Name: Packaging format
+ File: olm-packaging-format
+ - Name: Common terms
+ File: olm-common-terms
+ - Name: Operator Lifecycle Manager (OLM)
+ Dir: olm
+ Topics:
+ - Name: Concepts and resources
+ File: olm-understanding-olm
+ - Name: Architecture
+ File: olm-arch
+ - Name: Workflow
+ File: olm-workflow
+ - Name: Dependency resolution
+ File: olm-understanding-dependency-resolution
+ - Name: Operator groups
+ File: olm-understanding-operatorgroups
+ - Name: Multitenancy and Operator colocation
+ File: olm-colocation
+ - Name: Operator conditions
+ File: olm-operatorconditions
+ - Name: Metrics
+ File: olm-understanding-metrics
+ - Name: Webhooks
+ File: olm-webhooks
+ - Name: OperatorHub
+ File: olm-understanding-operatorhub
+ - Name: Red Hat-provided Operator catalogs
+ File: olm-rh-catalogs
+ - Name: Operators in multitenant clusters
+ File: olm-multitenancy
+ - Name: CRDs
+ Dir: crds
+ Topics:
+ - Name: Managing resources from CRDs
+ File: crd-managing-resources-from-crds
+- Name: User tasks
+ Dir: user
+ Topics:
+ - Name: Creating applications from installed Operators
+ File: olm-creating-apps-from-installed-operators
+- Name: Administrator tasks
+ Dir: admin
+ Topics:
+ - Name: Adding Operators to a cluster
+ File: olm-adding-operators-to-cluster
+ - Name: Updating installed Operators
+ File: olm-upgrading-operators
+ - Name: Deleting Operators from a cluster
+ File: olm-deleting-operators-from-cluster
+ - Name: Configuring proxy support
+ File: olm-configuring-proxy-support
+ - Name: Viewing Operator status
+ File: olm-status
+ - Name: Managing Operator conditions
+ File: olm-managing-operatorconditions
+ - Name: Managing custom catalogs
+ File: olm-managing-custom-catalogs
+ - Name: Catalog source pod scheduling
+ File: olm-cs-podsched
+# - Name: Managing platform Operators <= Tech Preview
+# File: olm-managing-po
+ - Name: Troubleshooting Operator issues
+ File: olm-troubleshooting-operator-issues
+- Name: Developing Operators
+ Dir: operator_sdk
+ Topics:
+ - Name: About the Operator SDK
+ File: osdk-about
+ - Name: Installing the Operator SDK CLI
+ File: osdk-installing-cli
+ - Name: Go-based Operators
+ Dir: golang
+ Topics:
+# Quick start excluded, because it requires cluster-admin permissions.
+# - Name: Getting started
+# File: osdk-golang-quickstart
+ - Name: Tutorial
+ File: osdk-golang-tutorial
+ - Name: Project layout
+ File: osdk-golang-project-layout
+ - Name: Updating Go-based projects
+ File: osdk-golang-updating-projects
+ - Name: Ansible-based Operators
+ Dir: ansible
+ Topics:
+# Quick start excluded, because it requires cluster-admin permissions.
+# - Name: Getting started
+# File: osdk-ansible-quickstart
+ - Name: Tutorial
+ File: osdk-ansible-tutorial
+ - Name: Project layout
+ File: osdk-ansible-project-layout
+ - Name: Updating Ansible-based projects
+ File: osdk-ansible-updating-projects
+ - Name: Ansible support
+ File: osdk-ansible-support
+ - Name: Kubernetes Collection for Ansible
+ File: osdk-ansible-k8s-collection
+ - Name: Using Ansible inside an Operator
+ File: osdk-ansible-inside-operator
+ - Name: Custom resource status management
+ File: osdk-ansible-cr-status
+ - Name: Helm-based Operators
+ Dir: helm
+ Topics:
+# Quick start excluded, because it requires cluster-admin permissions.
+# - Name: Getting started
+# File: osdk-helm-quickstart
+ - Name: Tutorial
+ File: osdk-helm-tutorial
+ - Name: Project layout
+ File: osdk-helm-project-layout
+ - Name: Updating Helm-based projects
+ File: osdk-helm-updating-projects
+ - Name: Helm support
+ File: osdk-helm-support
+# - Name: Hybrid Helm Operator <= Tech Preview
+# File: osdk-hybrid-helm
+# - Name: Updating Hybrid Helm-based projects <= Tech Preview
+# File: osdk-hybrid-helm-updating-projects
+# - Name: Java-based Operators <= Tech Preview
+# Dir: java
+# Topics:
+# - Name: Getting started
+# File: osdk-java-quickstart
+# - Name: Tutorial
+# File: osdk-java-tutorial
+# - Name: Project layout
+# File: osdk-java-project-layout
+# - Name: Updating Java-based projects
+# File: osdk-java-updating-projects
+ - Name: Defining cluster service versions (CSVs)
+ File: osdk-generating-csvs
+ - Name: Working with bundle images
+ File: osdk-working-bundle-images
+ - Name: Complying with pod security admission
+ File: osdk-complying-with-psa
+ - Name: Validating Operators using the scorecard
+ File: osdk-scorecard
+ - Name: Validating Operator bundles
+ File: osdk-bundle-validate
+ - Name: High-availability or single-node cluster detection and support
+ File: osdk-ha-sno
+ - Name: Configuring built-in monitoring with Prometheus
+ File: osdk-monitoring-prometheus
+ - Name: Configuring leader election
+ File: osdk-leader-election
+ - Name: Object pruning utility
+ File: osdk-pruning-utility
+ - Name: Migrating package manifest projects to bundle format
+ File: osdk-pkgman-to-bundle
+ - Name: Operator SDK CLI reference
+ File: osdk-cli-ref
+ - Name: Migrating to Operator SDK v0.1.0
+ File: osdk-migrating-to-v0-1-0
+# ROSA customers can't configure/edit the cluster Operators
+# - Name: Cluster Operators reference
+# File: operator-reference
---
Name: Networking
Dir: networking
@@ -333,6 +967,8 @@ Topics:
File: dns-operator
- Name: Understanding the Ingress Operator
File: ingress-operator
+- Name: AWS Load Balancer Operator
+ File: aws-load-balancer-operator
- Name: OpenShift SDN default CNI network provider
Dir: openshift_sdn
Topics:
@@ -373,8 +1009,8 @@ Topics:
Dir: deployments
Distros: openshift-rosa
Topics:
- - Name: Custom domains for applications
- File: osd-config-custom-domains-applications
+ - Name: Custom domains for applications
+ File: osd-config-custom-domains-applications
# - Name: Application GitOps workflows
# File: rosa-app-gitops-workflows
# - Name: Application logging
@@ -395,16 +1031,278 @@ Topics:
- Name: Installing OADP on ROSA with STS
File: backing-up-applications
---
+Name: Nodes
+Dir: nodes
+Distros: openshift-rosa
+Topics:
+- Name: Overview of nodes
+ File: index
+- Name: Working with pods
+ Dir: pods
+ Topics:
+ - Name: About pods
+ File: nodes-pods-using
+ - Name: Viewing pods
+ File: nodes-pods-viewing
+ - Name: Configuring a cluster for pods
+ File: nodes-pods-configuring
+ Distros: openshift-rosa
+# Cannot create namespace to install VPA; revisit after Operator book converted
+# - Name: Automatically adjust pod resource levels with the vertical pod autoscaler
+# File: nodes-pods-vertical-autoscaler
+ - Name: Providing sensitive data to pods
+ File: nodes-pods-secrets
+ - Name: Creating and using config maps
+ File: nodes-pods-configmaps
+# Cannot create required kubeletconfigs
+# - Name: Using Device Manager to make devices available to nodes
+# File: nodes-pods-plugins
+# Distros: openshift-rosa
+ - Name: Including pod priority in pod scheduling decisions
+ File: nodes-pods-priority
+ Distros: openshift-rosa
+ - Name: Placing pods on specific nodes using node selectors
+ File: nodes-pods-node-selectors
+ Distros: openshift-rosa
+# Cannot create namespace to install Run Once; revisit after Operator book converted
+# - Name: Run Once Duration Override Operator
+# Dir: run_once_duration_override
+# Distros: openshift-rosa
+# Topics:
+# - Name: Run Once Duration Override Operator overview
+# File: index
+# - Name: Run Once Duration Override Operator release notes
+# File: run-once-duration-override-release-notes
+# - Name: Overriding the active deadline for run-once pods
+# File: run-once-duration-override-install
+# - Name: Uninstalling the Run Once Duration Override Operator
+# File: run-once-duration-override-uninstall
+- Name: Automatically scaling pods with the Custom Metrics Autoscaler Operator
+ Dir: cma
+ Distros: openshift-rosa
+ Topics:
+ - Name: Custom Metrics Autoscaler Operator overview
+ File: nodes-cma-autoscaling-custom
+ - Name: Custom Metrics Autoscaler Operator release notes
+ File: nodes-cma-autoscaling-custom-rn
+ - Name: Installing the custom metrics autoscaler
+ File: nodes-cma-autoscaling-custom-install
+ - Name: Understanding the custom metrics autoscaler triggers
+ File: nodes-cma-autoscaling-custom-trigger
+ - Name: Understanding the custom metrics autoscaler trigger authentications
+ File: nodes-cma-autoscaling-custom-trigger-auth
+ - Name: Pausing the custom metrics autoscaler
+ File: nodes-cma-autoscaling-custom-pausing
+ - Name: Gathering audit logs
+ File: nodes-cma-autoscaling-custom-audit-log
+ - Name: Gathering debugging data
+ File: nodes-cma-autoscaling-custom-debugging
+ - Name: Viewing Operator metrics
+ File: nodes-cma-autoscaling-custom-metrics
+ - Name: Understanding how to add custom metrics autoscalers
+ File: nodes-cma-autoscaling-custom-adding
+ - Name: Removing the Custom Metrics Autoscaler Operator
+ File: nodes-cma-autoscaling-custom-removing
+- Name: Controlling pod placement onto nodes (scheduling)
+ Dir: scheduling
+ Distros: openshift-rosa
+ Topics:
+ - Name: About pod placement using the scheduler
+ File: nodes-scheduler-about
+ - Name: Placing pods relative to other pods using pod affinity and anti-affinity rules
+ File: nodes-scheduler-pod-affinity
+ - Name: Controlling pod placement on nodes using node affinity rules
+ File: nodes-scheduler-node-affinity
+ - Name: Placing pods onto overcommited nodes
+ File: nodes-scheduler-overcommit
+ - Name: Controlling pod placement using node taints
+ File: nodes-scheduler-taints-tolerations
+ - Name: Placing pods on specific nodes using node selectors
+ File: nodes-scheduler-node-selectors
+ - Name: Controlling pod placement using pod topology spread constraints
+ File: nodes-scheduler-pod-topology-spread-constraints
+# - Name: Placing a pod on a specific node by name
+# File: nodes-scheduler-node-names
+# - Name: Placing a pod in a specific project
+# File: nodes-scheduler-node-projects
+# - Name: Keeping your cluster balanced using the descheduler
+# File: nodes-scheduler-descheduler
+# Cannot create namespace to install Desceduler Operator; revisit after Operator book converted
+# - Name: Evicting pods using the descheduler
+# File: nodes-descheduler
+# Cannot create namespace to install Secondary Scheduler Operator; revisit after Operator book converted
+# - Name: Secondary scheduler
+# Dir: secondary_scheduler
+# Distros: openshift-enterprise
+# Topics:
+# - Name: Secondary scheduler overview
+# File: index
+# - Name: Secondary Scheduler Operator release notes
+# File: nodes-secondary-scheduler-release-notes
+# - Name: Scheduling pods using a secondary scheduler
+# File: nodes-secondary-scheduler-configuring
+# - Name: Uninstalling the Secondary Scheduler Operator
+# File: nodes-secondary-scheduler-uninstalling
+- Name: Using Jobs and DaemonSets
+ Dir: jobs
+ Topics:
+ - Name: Running background tasks on nodes automatically with daemonsets
+ File: nodes-pods-daemonsets
+ Distros: openshift-rosa
+ - Name: Running tasks in pods using jobs
+ File: nodes-nodes-jobs
+- Name: Working with nodes
+ Dir: nodes
+ Distros: openshift-rosa
+ Topics:
+ - Name: Viewing and listing the nodes in your cluster
+ File: nodes-nodes-viewing
+# cannot use oc adm cordon; cannot patch resource "machinesets"; cannot patch resource "nodes"
+# - Name: Working with nodes
+# File: nodes-nodes-working
+# cannot create resource "kubeletconfigs", "schedulers", "machineconfigs", "kubeletconfigs"
+# - Name: Managing nodes
+# File: nodes-nodes-managing
+# cannot create resource "kubeletconfigs"
+# - Name: Managing graceful node shutdown
+# File: nodes-nodes-graceful-shutdown
+# cannot create resource "kubeletconfigs"
+# - Name: Managing the maximum number of pods per node
+# File: nodes-nodes-managing-max-pods
+ - Name: Using the Node Tuning Operator
+ File: nodes-node-tuning-operator
+ - Name: Remediating, fencing, and maintaining nodes
+ File: nodes-remediating-fencing-maintaining-rhwa
+# Cannot create namespace needed to oc debug and reboot; revisit after Operator book converted
+# - Name: Understanding node rebooting
+# File: nodes-nodes-rebooting
+# cannot create resource "kubeletconfigs"
+# - Name: Freeing node resources using garbage collection
+# File: nodes-nodes-garbage-collection
+# cannot create resource "kubeletconfigs"
+# - Name: Allocating resources for nodes
+# File: nodes-nodes-resources-configuring
+# cannot create resource "kubeletconfigs"
+# - Name: Allocating specific CPUs for nodes in a cluster
+# File: nodes-nodes-resources-cpus
+# cannot create resource "kubeletconfigs"
+# - Name: Configuring the TLS security profile for the kubelet
+# File: nodes-nodes-tls
+# Distros: openshift-rosa
+# - Name: Monitoring for problems in your nodes
+# File: nodes-nodes-problem-detector
+ - Name: Machine Config Daemon metrics
+ File: nodes-nodes-machine-config-daemon-metrics
+# cannot patch resource "nodes"
+# - Name: Creating infrastructure nodes
+# File: nodes-nodes-creating-infrastructure-nodes
+- Name: Working with containers
+ Dir: containers
+ Topics:
+ - Name: Understanding containers
+ File: nodes-containers-using
+ - Name: Using Init Containers to perform tasks before a pod is deployed
+ File: nodes-containers-init
+ Distros: openshift-rosa
+ - Name: Using volumes to persist container data
+ File: nodes-containers-volumes
+ - Name: Mapping volumes using projected volumes
+ File: nodes-containers-projected-volumes
+ - Name: Allowing containers to consume API objects
+ File: nodes-containers-downward-api
+ - Name: Copying files to or from a container
+ File: nodes-containers-copying-files
+ - Name: Executing remote commands in a container
+ File: nodes-containers-remote-commands
+ - Name: Using port forwarding to access applications in a container
+ File: nodes-containers-port-forwarding
+# cannot patch resource "configmaps"
+# - Name: Using sysctls in containers
+# File: nodes-containers-sysctls
+- Name: Working with clusters
+ Dir: clusters
+ Topics:
+ - Name: Viewing system event information in a cluster
+ File: nodes-containers-events
+ - Name: Analyzing cluster resource levels
+ File: nodes-cluster-resource-levels
+ Distros: openshift-rosa
+ - Name: Setting limit ranges
+ File: nodes-cluster-limit-ranges
+ - Name: Configuring cluster memory to meet container memory and risk requirements
+ File: nodes-cluster-resource-configure
+ Distros: openshift-rosa
+ - Name: Configuring your cluster to place pods on overcommited nodes
+ File: nodes-cluster-overcommit
+ Distros: openshift-rosa
+ - Name: Configuring the Linux cgroup version on your nodes
+ File: nodes-cluster-cgroups-2
+ Distros: openshift-enterprise
+ - Name: Configuring the Linux cgroup version on your nodes
+ File: nodes-cluster-cgroups-okd
+ Distros: openshift-origin
+# The TechPreviewNoUpgrade Feature Gate is not allowed
+# - Name: Enabling features using FeatureGates
+# File: nodes-cluster-enabling-features
+# Distros: openshift-rosa
+# Error: nodes.config.openshift.io "cluster" could not be patched
+# - Name: Improving cluster stability in high latency environments using worker latency profiles
+# File: nodes-cluster-worker-latency-profiles
+# Not supported per Michael McNeill
+#- Name: Remote worker nodes on the network edge
+# Dir: edge
+# Topics:
+# - Name: Using remote worker node at the network edge
+# File: nodes-edge-remote-workers
+# Not supported per Michael McNeill
+#- Name: Worker nodes for single-node OpenShift clusters
+# Dir: nodes
+# Distros: openshift-rosa
+# Topics:
+# - Name: Adding worker nodes to single-node OpenShift clusters
+# File: nodes-sno-worker-nodes
+---
Name: Logging
Dir: logging
Distros: openshift-rosa
Topics:
- Name: Release notes
- File: cluster-logging-release-notes
+ Dir: logging_release_notes
+ Topics:
+ - Name: Logging 5.8
+ File: logging-5-8-release-notes
+ - Name: Logging 5.7
+ File: logging-5-7-release-notes
+- Name: Support
+ File: cluster-logging-support
+- Name: Troubleshooting logging
+ Dir: troubleshooting
+ Topics:
+ - Name: Viewing Logging status
+ File: cluster-logging-cluster-status
+ - Name: Troubleshooting log forwarding
+ File: log-forwarding-troubleshooting
+ - Name: Troubleshooting logging alerts
+ File: troubleshooting-logging-alerts
+ - Name: Viewing the status of the Elasticsearch log store
+ File: cluster-logging-log-store-status
- Name: About Logging
File: cluster-logging
- Name: Installing Logging
File: cluster-logging-deploying
+- Name: Updating Logging
+ File: cluster-logging-upgrading
+- Name: Visualizing logs
+ Dir: log_visualization
+ Topics:
+ - Name: About log visualization
+ File: log-visualization
+ - Name: Log visualization with the web console
+ File: log-visualization-ocp-console
+ - Name: Viewing cluster dashboards
+ File: cluster-logging-dashboards
+ - Name: Log visualization with Kibana
+ File: logging-kibana
- Name: Accessing the service logs
File: sd-accessing-the-service-logs
- Name: Viewing cluster logs in the AWS Console
@@ -412,82 +1310,97 @@ Topics:
- Name: Configuring your Logging deployment
Dir: config
Topics:
- - Name: About the Cluster Logging custom resource
- File: cluster-logging-configuring-cr
- - Name: Configuring the logging collector
- File: cluster-logging-collector
- - Name: Configuring the log store
- File: cluster-logging-log-store
- - Name: Configuring the log visualizer
- File: cluster-logging-visualizer
- - Name: Configuring Logging storage
- File: cluster-logging-storage-considerations
- Name: Configuring CPU and memory limits for Logging components
File: cluster-logging-memory
- - Name: Using tolerations to control Logging pod placement
- File: cluster-logging-tolerations
- - Name: Moving the Logging resources with node selectors
- File: cluster-logging-moving-nodes
#- Name: Configuring systemd-journald and Fluentd
# File: cluster-logging-systemd
- - Name: Maintenance and support
- File: cluster-logging-maintenance-support
-- Name: Logging with the LokiStack
- File: cluster-logging-loki
-- Name: Viewing logs for a specific resource
- File: viewing-resource-logs
-- Name: Viewing cluster logs in Kibana
- File: cluster-logging-visualizer
-- Name: Forwarding logs to third party systems
- File: cluster-logging-external
-- Name: Enabling JSON logging
- File: cluster-logging-enabling-json-logging
-- Name: Collecting and storing Kubernetes events
- File: cluster-logging-eventrouter
-# - Name: Forwarding logs using ConfigMaps
-# File: cluster-logging-external-configmap
-- Name: Updating Logging
- File: cluster-logging-upgrading
-- Name: Viewing cluster dashboards
- File: cluster-logging-dashboards
-- Name: Troubleshooting Logging
- Dir: troubleshooting
+- Name: Log collection and forwarding
+ Dir: log_collection_forwarding
Topics:
- - Name: Viewing Logging status
- File: cluster-logging-cluster-status
- - Name: Viewing the status of the log store
- File: cluster-logging-log-store-status
- - Name: Understanding Logging alerts
- File: cluster-logging-alerts
- - Name: Collecting logging data for Red Hat Support
- File: cluster-logging-must-gather
- - Name: Troubleshooting for Critical Alerts
- File: cluster-logging-troubleshooting-for-critical-alerts
+ - Name: About log collection and forwarding
+ File: log-forwarding
+ - Name: Log output types
+ File: logging-output-types
+ - Name: Enabling JSON log forwarding
+ File: cluster-logging-enabling-json-logging
+ - Name: Configuring log forwarding
+ File: configuring-log-forwarding
+ - Name: Configuring the logging collector
+ File: cluster-logging-collector
+ - Name: Collecting and storing Kubernetes events
+ File: cluster-logging-eventrouter
+- Name: Log storage
+ Dir: log_storage
+ Topics:
+ - Name: About log storage
+ File: about-log-storage
+ - Name: Installing log storage
+ File: installing-log-storage
+ - Name: Configuring the LokiStack log store
+ File: cluster-logging-loki
+ - Name: Configuring the Elasticsearch log store
+ File: logging-config-es-store
+- Name: Logging alerts
+ Dir: logging_alerts
+ Topics:
+ - Name: Default logging alerts
+ File: default-logging-alerts
+ - Name: Custom logging alerts
+ File: custom-logging-alerts
+- Name: Performance and reliability tuning
+ Dir: performance_reliability
+ Topics:
+ - Name: Flow control mechanisms
+ File: logging-flow-control-mechanisms
+- Name: Scheduling resources
+ Dir: scheduling_resources
+ Topics:
+ - Name: Using node selectors to move logging resources
+ File: logging-node-selectors
+ - Name: Using tolerations to control logging pod placement
+ File: logging-taints-tolerations
- Name: Uninstalling Logging
File: cluster-logging-uninstall
- Name: Exported fields
File: cluster-logging-exported-fields
+- Name: API reference
+ Dir: api_reference
+ Topics:
+# - Name: 5.8 Logging API reference
+# File: logging-5-8-reference
+# - Name: 5.7 Logging API reference
+# File: logging-5-7-reference
+ - Name: 5.6 Logging API reference
+ File: logging-5-6-reference
+- Name: Glossary
+ File: logging-common-terms
---
-Name: Monitoring user-defined projects
+Name: Monitoring
Dir: monitoring
Distros: openshift-rosa
Topics:
-- Name: Understanding the monitoring stack
- File: rosa-understanding-the-monitoring-stack
+- Name: Monitoring overview
+ File: monitoring-overview
- Name: Accessing monitoring for user-defined projects
- File: rosa-accessing-monitoring-for-user-defined-projects
+ File: sd-accessing-monitoring-for-user-defined-projects
- Name: Configuring the monitoring stack
- File: rosa-configuring-the-monitoring-stack
+ File: configuring-the-monitoring-stack
+- Name: Disabling monitoring for user-defined projects
+ File: sd-disabling-monitoring-for-user-defined-projects
- Name: Enabling alert routing for user-defined projects
- File: rosa-enabling-alert-routing-for-user-defined-projects
+ File: enabling-alert-routing-for-user-defined-projects
- Name: Managing metrics
- File: rosa-managing-metrics
+ File: managing-metrics
- Name: Managing alerts
File: managing-alerts
- Name: Reviewing monitoring dashboards
- File: rosa-reviewing-monitoring-dashboards
+ File: reviewing-monitoring-dashboards
+- Name: Accessing third-party monitoring APIs
+ File: accessing-third-party-monitoring-apis
- Name: Troubleshooting monitoring issues
- File: rosa-troubleshooting-monitoring-issues
+ File: troubleshooting-monitoring-issues
+- Name: Config map reference for the Cluster Monitoring Operator
+ File: config-map-reference-for-the-cluster-monitoring-operator
---
Name: Service Mesh
Dir: service_mesh
@@ -585,27 +1498,3 @@ Topics:
Topics:
- Name: Serverless overview
File: about-serverless
----
-Name: Troubleshooting
-Dir: sd_support
-Distros: openshift-rosa
-Topics:
-- Name: Remote health monitoring with connected clusters
- Dir: remote_health_monitoring
- Topics:
- - Name: About remote health monitoring
- File: about-remote-health-monitoring
- - Name: Showing data collected by remote health monitoring
- File: showing-data-collected-by-remote-health-monitoring
- - Name: Using Insights to identify issues with your cluster
- File: using-insights-to-identify-issues-with-your-cluster
-- Name: Troubleshooting expired offline access tokens
- File: rosa-troubleshooting-expired-tokens
-- Name: Troubleshooting installations
- File: rosa-troubleshooting-installations
-- Name: Troubleshooting IAM roles
- File: rosa-troubleshooting-iam-resources
-- Name: Troubleshooting cluster deployments
- File: rosa-troubleshooting-deployments
-- Name: Red Hat OpenShift Service on AWS managed resources
- File: rosa-managed-resources
diff --git a/authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc b/_unused_topics/cco-mode-gcp-workload-identity.adoc
similarity index 93%
rename from authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc
rename to _unused_topics/cco-mode-gcp-workload-identity.adoc
index ced6e811400a..0df5ff50ee20 100644
--- a/authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc
+++ b/_unused_topics/cco-mode-gcp-workload-identity.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="cco-mode-gcp-workload-identity"]
= Using manual mode with GCP Workload Identity
include::_attributes/common-attributes.adoc[]
@@ -103,7 +103,7 @@ Because the cluster is operating in manual mode when using GCP Workload Identity
[role="_additional-resources"]
.Additional resources
-* xref:../../updating/preparing-manual-creds-update.adoc#cco-ccoctl-configuring_preparing-manual-creds-update[Configuring the Cloud Credential Operator utility for a cluster update]
+* xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#cco-ccoctl-configuring_preparing-manual-creds-update[Configuring the Cloud Credential Operator utility for a cluster update]
//Task part 1: Configuring the Cloud Credential Operator utility
include::modules/cco-ccoctl-configuring.adoc[leveloffset=+2]
@@ -121,4 +121,4 @@ include::modules/sts-mode-installing-verifying.adoc[leveloffset=+2]
[id="additional-resources_{context}"]
== Additional resources
-* xref:../../updating/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials]
+* xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials]
diff --git a/authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc b/_unused_topics/cco-mode-sts.adoc
similarity index 93%
rename from authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc
rename to _unused_topics/cco-mode-sts.adoc
index ad0c756d57f8..7fd809f7702d 100644
--- a/authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc
+++ b/_unused_topics/cco-mode-sts.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="cco-mode-sts"]
= Using manual mode with Amazon Web Services Security Token Service
include::_attributes/common-attributes.adoc[]
@@ -82,7 +82,7 @@ Because the cluster is operating in manual mode when using STS, it is not able t
[role="_additional-resources"]
.Additional resources
-* xref:../../updating/preparing-manual-creds-update.adoc#cco-ccoctl-configuring_preparing-manual-creds-update[Configuring the Cloud Credential Operator utility for a cluster update]
+* xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#cco-ccoctl-configuring_preparing-manual-creds-update[Configuring the Cloud Credential Operator utility for a cluster update]
//[pre-4.8]Task part 1: Creating AWS resources manually
//include::modules/sts-mode-installing-manual-config.adoc[leveloffset=+2]
@@ -111,4 +111,4 @@ include::modules/sts-mode-installing-verifying.adoc[leveloffset=+2]
[id="additional-resources_{context}"]
== Additional resources
-* xref:../../updating/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials]
+* xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials]
diff --git a/_unused_topics/cco-short-term-creds-auth-flows.adoc b/_unused_topics/cco-short-term-creds-auth-flows.adoc
new file mode 100644
index 000000000000..52a1dca7cb54
--- /dev/null
+++ b/_unused_topics/cco-short-term-creds-auth-flows.adoc
@@ -0,0 +1,39 @@
+// Module included in the following assemblies:
+//
+// * authentication/managing_cloud_provider_credentials/cco-short-term-creds.adoc
+
+:_mod-docs-content-type: REFERENCE
+[id="cco-short-term-creds-auth-flows_{context}"]
+= Provider authentication details
+
+The authentication flow for this authentication method has similarities across the supported cloud providers.
+
+[id="cco-short-term-creds-auth-flow-aws_{context}"]
+== AWS Security Token Service
+
+In manual mode with STS, the individual {product-title} cluster components use the AWS Security Token Service (STS) to assign components IAM roles that provide short-term, limited-privilege security credentials. These credentials are associated with IAM roles that are specific to each component that makes AWS API calls.
+
+.AWS Security Token Service authentication flow
+image::347_OpenShift_credentials_with_STS_updates_0623_AWS.png[Detailed authentication flow between AWS and the cluster when using AWS STS]
+
+[id="cco-short-term-creds-auth-flow-gcp_{context}"]
+== GCP Workload Identity
+
+In manual mode with GCP Workload Identity, the individual {product-title} cluster components use the GCP workload identity provider to allow components to impersonate GCP service accounts using short-term, limited-privilege credentials.
+
+.GCP Workload Identity authentication flow
+image::347_OpenShift_credentials_with_STS_updates_0623_GCP.png[Detailed authentication flow between GCP and the cluster when using GCP Workload Identity]
+
+////
+[id="cco-short-term-creds-auth-flow-azure_{context}"]
+== Azure AD Workload Identity
+
+//todo: work with dev and diagrams team to get a diagram for Azure
+.Azure AD Workload Identity authentication flow
+image::Azure_diagram.png[Detailed authentication flow between Azure and the cluster when using Azure AD Workload Identity]
+////
+
+[id="cco-short-term-creds-auth-flow-refresh_{context}"]
+== Automated credential refreshing
+
+Requests for new and refreshed credentials are automated by using an appropriately configured OpenID Connect (OIDC) identity provider combined with provider-specific service accounts or roles. {product-title} signs Kubernetes service account tokens that are trusted by the cloud provider. These tokens can be mounted into a pod and used for authentication. By default, tokens are refreshed after one hour.
\ No newline at end of file
diff --git a/_unused_topics/cluster-logging-collector-envvar.adoc b/_unused_topics/cluster-logging-collector-envvar.adoc
deleted file mode 100644
index d1a96e696399..000000000000
--- a/_unused_topics/cluster-logging-collector-envvar.adoc
+++ /dev/null
@@ -1,30 +0,0 @@
-// Module included in the following assemblies:
-//
-// * logging/cluster-logging-collector.adoc
-
-[id="cluster-logging-collector-envvar_{context}"]
-= Configuring the logging collector using environment variables
-
-You can use environment variables to modify the configuration of the Fluentd log
-collector.
-
-See the link:https://github.com/openshift/origin-aggregated-logging/blob/master/fluentd/README.md[Fluentd README] in Github for lists of the
-available environment variables.
-
-.Prerequisites
-
-* Set OpenShift Logging to the unmanaged state. Operators in an unmanaged state are unsupported and the cluster administrator assumes full control of the individual component configurations and upgrades.
-
-.Procedure
-
-Set any of the Fluentd environment variables as needed:
-
-----
-$ oc set env ds/fluentd
=
-----
-
-For example:
-
-----
-$ oc set env ds/fluentd BUFFER_SIZE_LIMIT=24
-----
diff --git a/_unused_topics/cluster-logging-configuring-node-selector.adoc b/_unused_topics/cluster-logging-configuring-node-selector.adoc
deleted file mode 100644
index 05a470114490..000000000000
--- a/_unused_topics/cluster-logging-configuring-node-selector.adoc
+++ /dev/null
@@ -1,65 +0,0 @@
-// Module included in the following assemblies:
-//
-// * logging/cluster-logging-elasticsearch.adoc
-
-[id="cluster-logging-configuring-node-selector_{context}"]
-= Specifying a node for cluster logging components using node selectors
-
-Each component specification allows the component to target a specific node.
-
-.Procedure
-
-Edit the Cluster Logging Custom Resource (CR) in the `openshift-logging` project:
-
-[source,yaml]
-----
-$ oc edit ClusterLogging instance
-
-apiVersion: "logging.openshift.io/v1"
-kind: "ClusterLogging"
-metadata:
- name: "nodeselector"
-spec:
- managementState: "Managed"
- logStore:
- type: "elasticsearch"
- elasticsearch:
- nodeSelector: <1>
- logging: es
- nodeCount: 3
- resources:
- limits:
- memory: 16Gi
- requests:
- cpu: 500m
- memory: 16Gi
- storage:
- size: "20G"
- storageClassName: "gp2"
- redundancyPolicy: "ZeroRedundancy"
- visualization:
- type: "kibana"
- kibana:
- nodeSelector: <2>
- logging: kibana
- replicas: 1
- curation:
- type: "curator"
- curator:
- nodeSelector: <3>
- logging: curator
- schedule: "*/10 * * * *"
- collection:
- logs:
- type: "fluentd"
- fluentd:
- nodeSelector: <4>
- logging: fluentd
-----
-
-<1> Node selector for Elasticsearch.
-<2> Node selector for Kibana.
-<3> Node selector for Curator.
-<4> Node selector for Fluentd.
-
-
diff --git a/_unused_topics/cluster-logging-elasticsearch-admin.adoc b/_unused_topics/cluster-logging-elasticsearch-admin.adoc
deleted file mode 100644
index b1b3843deb19..000000000000
--- a/_unused_topics/cluster-logging-elasticsearch-admin.adoc
+++ /dev/null
@@ -1,43 +0,0 @@
-// Module included in the following assemblies:
-//
-// * logging/cluster-logging-elasticsearch.adoc
-
-[id="cluster-logging-elasticsearch-admin_{context}"]
-= Performing administrative Elasticsearch operations
-
-An administrator certificate, key, and CA that can be used to communicate with and perform administrative operations on Elasticsearch are provided within the *elasticsearch* secret in the `openshift-logging` project.
-
-[NOTE]
-====
-To confirm whether your OpenShift Logging installation provides these, run:
-----
-$ oc describe secret elasticsearch -n openshift-logging
-----
-====
-
-. Connect to an Elasticsearch pod that is in the cluster on which you are attempting to perform maintenance.
-
-. To find a pod in a cluster use:
-+
-----
-$ oc get pods -l component=elasticsearch -o name -n openshift-logging | head -1
-----
-
-. Connect to a pod:
-+
-----
-$ oc rsh
-----
-
-. Once connected to an Elasticsearch container, you can use the certificates mounted from the secret to communicate with Elasticsearch per its link:https://www.elastic.co/guide/en/elasticsearch/reference/2.3/indices.html[Indices APIs documentation].
-+
-Fluentd sends its logs to Elasticsearch using the index format *infra-00000x* or *app-00000x*.
-+
-For example, to delete all logs for the openshift-logging index, *app-000001*, we can run:
-+
-----
-$ curl --key /etc/elasticsearch/secret/admin-key \
---cert /etc/elasticsearch/secret/admin-cert \
---cacert /etc/elasticsearch/secret/admin-ca -XDELETE \
-"https://localhost:9200/app-000001"
-----
diff --git a/_unused_topics/cluster-logging-exported-fields-aushape.2021-06-04.adoc b/_unused_topics/cluster-logging-exported-fields-aushape.2021-06-04.adoc
deleted file mode 100644
index 3223ed28b26e..000000000000
--- a/_unused_topics/cluster-logging-exported-fields-aushape.2021-06-04.adoc
+++ /dev/null
@@ -1,63 +0,0 @@
-// Module included in the following assemblies:
-//
-// * logging/cluster-logging-exported-fields.adoc
-
-[id="cluster-logging-exported-fields-aushape_{context}"]
-= Aushape exported fields
-
-These are the Aushape fields exported by OpenShift Logging available for searching
-from Elasticsearch and Kibana.
-
-Audit events converted with Aushape. For more information, see
-link:https://github.com/Scribery/aushape[Aushape].
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `aushape.serial`
-|Audit event serial number.
-
-| `aushape.node`
-|Name of the host where the audit event occurred.
-
-| `aushape.error`
-|The error aushape encountered while converting the event.
-
-| `aushape.trimmed`
-|An array of JSONPath expressions relative to the event object, specifying
-objects or arrays with the content removed as the result of event size limiting.
-An empty string means the event removed the content, and an empty array means
-the trimming occurred by unspecified objects and arrays.
-
-| `aushape.text`
-|An array log record strings representing the original audit event.
-|===
-
-[discrete]
-[id="exported-fields-aushape.data_{context}"]
-=== `aushape.data` Fields
-
-Parsed audit event data related to Aushape.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `aushape.data.avc`
-|type: nested
-
-| `aushape.data.execve`
-|type: string
-
-| `aushape.data.netfilter_cfg`
-|type: nested
-
-| `aushape.data.obj_pid`
-|type: nested
-
-| `aushape.data.path`
-|type: nested
-|===
diff --git a/_unused_topics/cluster-logging-exported-fields-collectd.adoc b/_unused_topics/cluster-logging-exported-fields-collectd.adoc
deleted file mode 100644
index 75dfb4c71428..000000000000
--- a/_unused_topics/cluster-logging-exported-fields-collectd.adoc
+++ /dev/null
@@ -1,993 +0,0 @@
-// Module included in the following assemblies:
-//
-// * logging/cluster-logging-exported-fields.adoc
-
-[id="cluster-logging-exported-fields-collectd_{context}"]
-= `collectd` exported fields
-
-These are the `collectd` and `collectd-*` fields exported by the logging system and available for searching
-from Elasticsearch and Kibana.
-
-[discrete]
-[id="exported-fields-collectd_{context}"]
-=== `collectd` Fields
-
-The following fields represent namespace metrics metadata.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.interval`
-|type: float
-
-The `collectd` interval.
-
-| `collectd.plugin`
-|type: string
-
-The `collectd` plug-in.
-
-| `collectd.plugin_instance`
-|type: string
-
-The `collectd` plugin_instance.
-
-| `collectd.type_instance`
-|type: string
-
-The `collectd` `type_instance`.
-
-| `collectd.type`
-|type: string
-
-The `collectd` type.
-
-| `collectd.dstypes`
-|type: string
-
-The `collectd` dstypes.
-|===
-
-[discrete]
-[id="exported-fields-collectd.processes_{context}"]
-=== `collectd.processes` Fields
-
-The following field corresponds to the `collectd` processes plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.processes.ps_state`
-|type: integer
-The `collectd ps_state` type of processes plug-in.
-|===
-
-[discrete]
-[id="exported-fields-collectd.processes.ps_disk_ops_{context}"]
-=== `collectd.processes.ps_disk_ops` Fields
-
-The `collectd` `ps_disk_ops` type of processes plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.processes.ps_disk_ops.read`
-|type: float
-
-`TODO`
-
-| `collectd.processes.ps_disk_ops.write`
-|type: float
-
-`TODO`
-
-| `collectd.processes.ps_vm`
-|type: integer
-
-The `collectd` `ps_vm` type of processes plug-in.
-
-| `collectd.processes.ps_rss`
-|type: integer
-
-The `collectd` `ps_rss` type of processes plug-in.
-
-| `collectd.processes.ps_data`
-|type: integer
-
-The `collectd` `ps_data` type of processes plug-in.
-
-| `collectd.processes.ps_code`
-|type: integer
-
-The `collectd` `ps_code` type of processes plug-in.
-
-| `collectd.processes.ps_stacksize`
-| type: integer
-
-The `collectd` `ps_stacksize` type of processes plug-in.
-|===
-
-[discrete]
-[id="exported-fields-collectd.processes.ps_cputime_{context}"]
-=== `collectd.processes.ps_cputime` Fields
-
-The `collectd` `ps_cputime` type of processes plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.processes.ps_cputime.user`
-|type: float
-
-`TODO`
-
-| `collectd.processes.ps_cputime.syst`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.processes.ps_count_{context}"]
-=== `collectd.processes.ps_count` Fields
-
-The `collectd` `ps_count` type of processes plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.processes.ps_count.processes`
-|type: integer
-
-`TODO`
-
-| `collectd.processes.ps_count.threads`
-|type: integer
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.processes.ps_pagefaults_{context}"]
-=== `collectd.processes.ps_pagefaults` Fields
-
-The `collectd` `ps_pagefaults` type of processes plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.processes.ps_pagefaults.majflt`
-|type: float
-
-`TODO`
-
-| `collectd.processes.ps_pagefaults.minflt`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.processes.ps_disk_octets_{context}"]
-=== `collectd.processes.ps_disk_octets` Fields
-
-The `collectd ps_disk_octets` type of processes plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.processes.ps_disk_octets.read`
-|type: float
-
-`TODO`
-
-| `collectd.processes.ps_disk_octets.write`
-|type: float
-
-`TODO`
-
-| `collectd.processes.fork_rate`
-|type: float
-
-The `collectd` `fork_rate` type of processes plug-in.
-|===
-
-[discrete]
-[id="exported-fields-collectd.disk_{context}"]
-=== `collectd.disk` Fields
-
-Corresponds to `collectd` disk plug-in.
-
-[discrete]
-[id="exported-fields-collectd.disk.disk_merged_{context}"]
-=== `collectd.disk.disk_merged` Fields
-
-The `collectd` `disk_merged` type of disk plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.disk.disk_merged.read`
-|type: float
-
-`TODO`
-
-| `collectd.disk.disk_merged.write`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.disk.disk_octets_{context}"]
-=== `collectd.disk.disk_octets` Fields
-
-The `collectd` `disk_octets` type of disk plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.disk.disk_octets.read`
-|type: float
-
-`TODO`
-
-| `collectd.disk.disk_octets.write`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.disk.disk_time_{context}"]
-=== `collectd.disk.disk_time` Fields
-
-The `collectd` `disk_time` type of disk plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.disk.disk_time.read`
-|type: float
-
-`TODO`
-
-| `collectd.disk.disk_time.write`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.disk.disk_ops_{context}"]
-=== `collectd.disk.disk_ops` Fields
-
-The `collectd` `disk_ops` type of disk plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.disk.disk_ops.read`
-|type: float
-
-`TODO`
-
-| `collectd.disk.disk_ops.write`
-|type: float
-
-`TODO`
-
-| `collectd.disk.pending_operations`
-|type: integer
-
-The `collectd` `pending_operations` type of disk plug-in.
-|===
-
-[discrete]
-[id="exported-fields-collectd.disk.disk_io_time_{context}"]
-=== `collectd.disk.disk_io_time` Fields
-
-The `collectd disk_io_time` type of disk plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.disk.disk_io_time.io_time`
-|type: float
-
-`TODO`
-
-| `collectd.disk.disk_io_time.weighted_io_time`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.interface_{context}"]
-=== `collectd.interface` Fields
-
-Corresponds to the `collectd` interface plug-in.
-
-[discrete]
-[id="exported-fields-collectd.interface.if_octets_{context}"]
-=== `collectd.interface.if_octets` Fields
-
-The `collectd` `if_octets` type of interface plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.interface.if_octets.rx`
-|type: float
-
-`TODO`
-
-| `collectd.interface.if_octets.tx`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.interface.if_packets_{context}"]
-=== `collectd.interface.if_packets` Fields
-
-The `collectd` `if_packets` type of interface plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.interface.if_packets.rx`
-|type: float
-
-`TODO`
-
-| `collectd.interface.if_packets.tx`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.interface.if_errors_{context}"]
-=== `collectd.interface.if_errors` Fields
-
-The `collectd` `if_errors` type of interface plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.interface.if_errors.rx`
-|type: float
-
-`TODO`
-
-| `collectd.interface.if_errors.tx`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.interface.if_dropped_{context}"]
-=== collectd.interface.if_dropped Fields
-
-The `collectd` `if_dropped` type of interface plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.interface.if_dropped.rx`
-|type: float
-
-`TODO`
-
-| `collectd.interface.if_dropped.tx`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.virt_{context}"]
-=== `collectd.virt` Fields
-
-Corresponds to `collectd` virt plug-in.
-
-[discrete]
-[id="exported-fields-collectd.virt.if_octets_{context}"]
-=== `collectd.virt.if_octets` Fields
-
-The `collectd if_octets` type of virt plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.virt.if_octets.rx`
-|type: float
-
-`TODO`
-
-| `collectd.virt.if_octets.tx`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.virt.if_packets_{context}"]
-=== `collectd.virt.if_packets` Fields
-
-The `collectd` `if_packets` type of virt plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.virt.if_packets.rx`
-|type: float
-
-`TODO`
-
-| `collectd.virt.if_packets.tx`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.virt.if_errors_{context}"]
-=== `collectd.virt.if_errors` Fields
-
-The `collectd` `if_errors` type of virt plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.virt.if_errors.rx`
-|type: float
-
-`TODO`
-
-| `collectd.virt.if_errors.tx`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.virt.if_dropped_{context}"]
-=== `collectd.virt.if_dropped` Fields
-
-The `collectd` `if_dropped` type of virt plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.virt.if_dropped.rx`
-|type: float
-
-`TODO`
-
-| `collectd.virt.if_dropped.tx`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.virt.disk_ops_{context}"]
-=== `collectd.virt.disk_ops` Fields
-
-The `collectd` `disk_ops` type of virt plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| collectd.virt.disk_ops.read
-|type: float
-
-`TODO`
-
-| `collectd.virt.disk_ops.write`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.virt.disk_octets_{context}"]
-=== `collectd.virt.disk_octets` Fields
-
-The `collectd` `disk_octets` type of virt plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.virt.disk_octets.read`
-|type: float
-
-`TODO`
-
-| `collectd.virt.disk_octets.write`
-|type: float
-
-`TODO`
-
-| `collectd.virt.memory`
-|type: float
-
-The `collectd` memory type of virt plug-in.
-
-| `collectd.virt.virt_vcpu`
-|type: float
-
-The `collectd` `virt_vcpu` type of virt plug-in.
-
-| `collectd.virt.virt_cpu_total`
-|type: float
-
-The `collectd` `virt_cpu_total` type of virt plug-in.
-|===
-
-[discrete]
-[id="exported-fields-collectd.CPU_{context}"]
-=== `collectd.CPU` Fields
-
-Corresponds to the `collectd` CPU plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.CPU.percent`
-|type: float
-
-The `collectd` type percent of plug-in CPU.
-|===
-
-[discrete]
-[id="exported-fields-collectd.df_{context}"]
-=== collectd.df Fields
-
-Corresponds to the `collectd` `df` plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.df.df_complex`
-|type: float
-
-The `collectd` type `df_complex` of plug-in `df`.
-
-| `collectd.df.percent_bytes`
-|type: float
-
-The `collectd` type `percent_bytes` of plug-in `df`.
-|===
-
-[discrete]
-[id="exported-fields-collectd.entropy_{context}"]
-=== `collectd.entropy` Fields
-
-Corresponds to the `collectd` entropy plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.entropy.entropy`
-|type: integer
-
-The `collectd` entropy type of entropy plug-in.
-|===
-
-////
-[discrete]
-[id="exported-fields-collectd.nfs_{context}"]
-=== `collectd.nfs` Fields
-
-Corresponds to the `collectd` NFS plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.nfs.nfs_procedure`
-|type: integer
-
-The `collectd` `nfs_procedure` type of nfs plug-in.
-|===
-////
-
-[discrete]
-[id="exported-fields-collectd.memory_{context}"]
-=== `collectd.memory` Fields
-
-Corresponds to the `collectd` memory plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.memory.memory`
-|type: float
-
-The `collectd` memory type of memory plug-in.
-
-| `collectd.memory.percent`
-|type: float
-
-The `collectd` percent type of memory plug-in.
-|===
-
-[discrete]
-[id="exported-fields-collectd.swap_{context}"]
-=== `collectd.swap` Fields
-
-Corresponds to the `collectd` swap plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.swap.swap`
-|type: integer
-
-The `collectd` swap type of swap plug-in.
-
-| `collectd.swap.swap_io`
-|type: integer
-
-The `collectd swap_io` type of swap plug-in.
-|===
-
-[discrete]
-[id="exported-fields-collectd.load_{context}"]
-=== `collectd.load` Fields
-
-Corresponds to the `collectd` load plug-in.
-
-[discrete]
-[id="exported-fields-collectd.load.load_{context}"]
-=== `collectd.load.load` Fields
-
-The `collectd` load type of load plug-in
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.load.load.shortterm`
-|type: float
-
-`TODO`
-
-| `collectd.load.load.midterm`
-|type: float
-
-`TODO`
-
-| `collectd.load.load.longterm`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.aggregation_{context}"]
-=== `collectd.aggregation` Fields
-
-Corresponds to `collectd` aggregation plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.aggregation.percent`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.statsd_{context}"]
-=== `collectd.statsd` Fields
-
-Corresponds to `collectd` `statsd` plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.statsd.host_cpu`
-|type: integer
-
-The `collectd` CPU type of `statsd` plug-in.
-
-| `collectd.statsd.host_elapsed_time`
-|type: integer
-
-The `collectd` `elapsed_time` type of `statsd` plug-in.
-
-| `collectd.statsd.host_memory`
-|type: integer
-
-The `collectd` memory type of `statsd` plug-in.
-
-| `collectd.statsd.host_nic_speed`
-|type: integer
-
-The `collectd` `nic_speed` type of `statsd` plug-in.
-
-| `collectd.statsd.host_nic_rx`
-|type: integer
-
-The `collectd` `nic_rx` type of `statsd` plug-in.
-
-| `collectd.statsd.host_nic_tx`
-|type: integer
-
-The `collectd` `nic_tx` type of `statsd` plug-in.
-
-| `collectd.statsd.host_nic_rx_dropped`
-|type: integer
-
-The `collectd` `nic_rx_dropped` type of `statsd` plug-in.
-
-| `collectd.statsd.host_nic_tx_dropped`
-|type: integer
-
-The `collectd` `nic_tx_dropped` type of `statsd` plug-in.
-
-| `collectd.statsd.host_nic_rx_errors`
-|type: integer
-
-The `collectd` `nic_rx_errors` type of `statsd` plug-in.
-
-| `collectd.statsd.host_nic_tx_errors`
-|type: integer
-
-The `collectd` `nic_tx_errors` type of `statsd` plug-in.
-
-| `collectd.statsd.host_storage`
-|type: integer
-
-The `collectd` storage type of `statsd` plug-in.
-
-| `collectd.statsd.host_swap`
-|type: integer
-
-The `collectd` swap type of `statsd` plug-in.
-
-| `collectd.statsd.host_vdsm`
-|type: integer
-
-The `collectd` VDSM type of `statsd` plug-in.
-
-| `collectd.statsd.host_vms`
-|type: integer
-
-The `collectd` VMS type of `statsd` plug-in.
-
-| `collectd.statsd.vm_nic_tx_dropped`
-|type: integer
-
-The `collectd` `nic_tx_dropped` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_nic_rx_bytes`
-|type: integer
-
-The `collectd` `nic_rx_bytes` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_nic_tx_bytes`
-|type: integer
-
-The `collectd` `nic_tx_bytes` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_balloon_min`
-|type: integer
-
-The `collectd` `balloon_min` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_balloon_max`
-|type: integer
-
-The `collectd` `balloon_max` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_balloon_target`
-|type: integer
-
-The `collectd` `balloon_target` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_balloon_cur`
-| type: integer
-
-The `collectd` `balloon_cur` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_cpu_sys`
-|type: integer
-
-The `collectd` `cpu_sys` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_cpu_usage`
-|type: integer
-
-The `collectd` `cpu_usage` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_disk_read_ops`
-|type: integer
-
-The `collectd` `disk_read_ops` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_disk_write_ops`
-|type: integer
-
-The collectd` `disk_write_ops` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_disk_flush_latency`
-|type: integer
-
-The `collectd` `disk_flush_latency` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_disk_apparent_size`
-|type: integer
-
-The `collectd` `disk_apparent_size` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_disk_write_bytes`
-|type: integer
-
-The `collectd` `disk_write_bytes` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_disk_write_rate`
-|type: integer
-
-The `collectd` `disk_write_rate` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_disk_true_size`
-|type: integer
-
-The `collectd` `disk_true_size` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_disk_read_rate`
-|type: integer
-
-The `collectd` `disk_read_rate` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_disk_write_latency`
-|type: integer
-
-The `collectd` `disk_write_latency` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_disk_read_latency`
-|type: integer
-
-The `collectd` `disk_read_latency` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_disk_read_bytes`
-|type: integer
-
-The `collectd` `disk_read_bytes` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_nic_rx_dropped`
-|type: integer
-
-The `collectd` `nic_rx_dropped` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_cpu_user`
-|type: integer
-
-The `collectd` `cpu_user` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_nic_rx_errors`
-|type: integer
-
-The `collectd` `nic_rx_errors` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_nic_tx_errors`
-|type: integer
-
-The `collectd` `nic_tx_errors` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_nic_speed`
-|type: integer
-
-The `collectd` `nic_speed` type of `statsd` plug-in.
-|===
-
-[discrete]
-[id="exported-fields-collectd.postgresql_{context}"]
-=== `collectd.postgresql Fields`
-
-Corresponds to `collectd` `postgresql` plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.postgresql.pg_n_tup_g`
-|type: integer
-
-The `collectd` type `pg_n_tup_g` of plug-in postgresql.
-
-| `collectd.postgresql.pg_n_tup_c`
-|type: integer
-
-The `collectd` type `pg_n_tup_c` of plug-in postgresql.
-
-| `collectd.postgresql.pg_numbackends`
-|type: integer
-
-The `collectd` type `pg_numbackends` of plug-in postgresql.
-
-| `collectd.postgresql.pg_xact`
-|type: integer
-
-The `collectd` type `pg_xact` of plug-in postgresql.
-
-| `collectd.postgresql.pg_db_size`
-|type: integer
-
-The `collectd` type `pg_db_size` of plug-in postgresql.
-
-| `collectd.postgresql.pg_blks`
-|type: integer
-
-The `collectd` type `pg_blks` of plug-in postgresql.
-|===
diff --git a/_unused_topics/cluster-logging-exported-fields-container.2021-06-04.adoc b/_unused_topics/cluster-logging-exported-fields-container.2021-06-04.adoc
deleted file mode 100644
index d893b804f0cc..000000000000
--- a/_unused_topics/cluster-logging-exported-fields-container.2021-06-04.adoc
+++ /dev/null
@@ -1,89 +0,0 @@
-// Module included in the following assemblies:
-//
-// * logging/cluster-logging-exported-fields.adoc
-
-[id="cluster-logging-exported-fields-container_{context}"]
-= Container exported fields
-
-These are the Docker fields exported by OpenShift Logging available for searching from Elasticsearch and Kibana.
-Namespace for docker container-specific metadata. The docker.container_id is the Docker container ID.
-
-
-[discrete]
-[id="exported-fields-pipeline_metadata.collector_{context}"]
-=== `pipeline_metadata.collector` Fields
-
-This section contains metadata specific to the collector.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `pipeline_metadata.collector.hostname`
-|FQDN of the collector. It might be different from the FQDN of the actual emitter
-of the logs.
-
-| `pipeline_metadata.collector.name`
-|Name of the collector.
-
-| `pipeline_metadata.collector.version`
-|Version of the collector.
-
-| `pipeline_metadata.collector.ipaddr4`
-|IP address v4 of the collector server, can be an array.
-
-| `pipeline_metadata.collector.ipaddr6`
-|IP address v6 of the collector server, can be an array.
-
-| `pipeline_metadata.collector.inputname`
-|How the log message was received by the collector whether it was TCP/UDP, or
-imjournal/imfile.
-
-| `pipeline_metadata.collector.received_at`
-|Time when the message was received by the collector.
-
-| `pipeline_metadata.collector.original_raw_message`
-|The original non-parsed log message, collected by the collector or as close to the
-source as possible.
-|===
-
-[discrete]
-[id="exported-fields-pipeline_metadata.normalizer_{context}"]
-=== `pipeline_metadata.normalizer` Fields
-
-This section contains metadata specific to the normalizer.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `pipeline_metadata.normalizer.hostname`
-|FQDN of the normalizer.
-
-| `pipeline_metadata.normalizer.name`
-|Name of the normalizer.
-
-| `pipeline_metadata.normalizer.version`
-|Version of the normalizer.
-
-| `pipeline_metadata.normalizer.ipaddr4`
-|IP address v4 of the normalizer server, can be an array.
-
-| `pipeline_metadata.normalizer.ipaddr6`
-|IP address v6 of the normalizer server, can be an array.
-
-| `pipeline_metadata.normalizer.inputname`
-|how the log message was received by the normalizer whether it was TCP/UDP.
-
-| `pipeline_metadata.normalizer.received_at`
-|Time when the message was received by the normalizer.
-
-| `pipeline_metadata.normalizer.original_raw_message`
-|The original non-parsed log message as it is received by the normalizer.
-
-| `pipeline_metadata.trace`
-|The field records the trace of the message. Each collector and normalizer appends
-information about itself and the date and time when the message was processed.
-|===
diff --git a/_unused_topics/cluster-logging-exported-fields-default.2021-06-04.adoc b/_unused_topics/cluster-logging-exported-fields-default.2021-06-04.adoc
deleted file mode 100644
index e26b60808513..000000000000
--- a/_unused_topics/cluster-logging-exported-fields-default.2021-06-04.adoc
+++ /dev/null
@@ -1,1100 +0,0 @@
-// Module included in the following assemblies:
-//
-// * logging/cluster-logging-exported-fields.adoc
-
-[id="cluster-logging-exported-fields-default_{context}"]
-= Default exported fields
-
-These are the default fields exported by the logging system and available for searching
-from Elasticsearch and Kibana. The default fields are Top Level and `collectd*`
-
-[discrete]
-=== Top Level Fields
-
-The top level fields are common to every application and can be present in
-every record. For the Elasticsearch template, top level fields populate the actual
-mappings of `default` in the template's mapping section.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `@timestamp`
-| The UTC value marking when the log payload was created, or when the log payload
-was first collected if the creation time is not known. This is the log
-processing pipeline's best effort determination of when the log payload was
-generated. Add the `@` prefix convention to note a field as being reserved for a
-particular use. With Elasticsearch, most tools look for `@timestamp` by default.
-For example, the format would be 2015-01-24 14:06:05.071000.
-
-| `geoip`
-|This is geo-ip of the machine.
-
-| `hostname`
-|The `hostname` is the fully qualified domain name (FQDN) of the entity
-generating the original payload. This field is an attempt to derive this
-context. Sometimes the entity generating it knows the context. While other times
-that entity has a restricted namespace itself, which is known by the collector
-or normalizer.
-
-| `ipaddr4`
-|The IP address V4 of the source server, which can be an array.
-
-| `ipaddr6`
-|The IP address V6 of the source server, if available.
-
-| `level`
-|The logging level as provided by rsyslog (severitytext property), python's
-logging module. Possible values are as listed at
-link:http://sourceware.org/git/?p=glibc.git;a=blob;f=misc/sys/syslog.h;h=ee01478c4b19a954426a96448577c5a76e6647c0;hb=HEAD#l74[`misc/sys/syslog.h`]
-plus `trace` and `unknown`. For example, _alert crit debug emerg err info notice
-trace unknown warning_. Note that `trace` is not in the `syslog.h` list but many
-applications use it.
-
-* You should only use `unknown` when the logging system gets a value it does not
-understand, and note that it is the highest level.
-
-* Consider `trace` as higher or more verbose, than `debug`.
-
-* `error` is deprecated, use `err`.
-
-* Convert `panic` to `emerg`.
-
-* Convert `warn` to `warning`.
-
-Numeric values from `syslog/journal PRIORITY` can usually be mapped using the
-priority values as listed at
-link:http://sourceware.org/git/?p=glibc.git;a=blob;f=misc/sys/syslog.h;h=ee01478c4b19a954426a96448577c5a76e6647c0;hb=HEAD#l51[misc/sys/syslog.h].
-
-Log levels and priorities from other logging systems should be mapped to the
-nearest match. See
-link:https://docs.python.org/2.7/library/logging.html#logging-levels[python
-logging] for an example.
-
-| `message`
-|A typical log entry message, or payload. It can be stripped of metadata pulled
-out of it by the collector or normalizer, that is UTF-8 encoded.
-
-| `pid`
-|This is the process ID of the logging entity, if available.
-
-| `service`
-|The name of the service associated with the logging entity, if available. For
-example, the `syslog APP-NAME` property is mapped to
-the service field.
-
-| `tags`
-|Optionally provided operator defined list of tags placed on each log by the
-collector or normalizer. The payload can be a string with whitespace-delimited
-string tokens, or a JSON list of string tokens.
-
-| `file`
-|Optional path to the file containing the log entry local to the collector `TODO`
-analyzer for file paths.
-
-| `offset`
-|The offset value can represent bytes to the start of the log line in the file
-(zero or one based), or log line numbers (zero or one based), as long as the
-values are strictly monotonically increasing in the context of a single log
-file. The values are allowed to wrap, representing a new version of the log file
-(rotation).
-
-| `namespace_name`
-|Associate this record with the `namespace` that shares it's name. This value
-will not be stored, but it is used to associate the record with the appropriate
-`namespace` for access control and visualization. Normally this value will be
-given in the tag, but if the protocol does not support sending a tag, this field
-can be used. If this field is present, it will override the `namespace` given in
-the tag or in `kubernetes.namespace_name`.
-
-| `namespace_uuid`
-|This is the `uuid` associated with the `namespace_name`. This value will not be
-stored, but is used to associate the record with the appropriate namespace for
-access control and visualization. If this field is present, it will override the
-`uuid` given in `kubernetes.namespace_uuid`. This will also cause the Kubernetes
-metadata lookup to be skipped for this log record.
-|===
-
-[discrete]
-=== `collectd` Fields
-
-The following fields represent namespace metrics metadata.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.interval`
-|type: float
-
-The `collectd` interval.
-
-| `collectd.plugin`
-|type: string
-
-The `collectd` plug-in.
-
-| `collectd.plugin_instance`
-|type: string
-
-The `collectd` plugin_instance.
-
-| `collectd.type_instance`
-|type: string
-
-The `collectd` `type_instance`.
-
-| `collectd.type`
-|type: string
-
-The `collectd` type.
-
-| `collectd.dstypes`
-|type: string
-
-The `collectd` dstypes.
-|===
-
-[discrete]
-[id="exported-fields-collectd.processes_{context}"]
-=== `collectd.processes` Fields
-
-The following field corresponds to the `collectd` processes plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.processes.ps_state`
-|type: integer
-The `collectd ps_state` type of processes plug-in.
-|===
-
-[discrete]
-[id="exported-fields-collectd.processes.ps_disk_ops_{context}"]
-=== `collectd.processes.ps_disk_ops` Fields
-
-The `collectd` `ps_disk_ops` type of processes plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.processes.ps_disk_ops.read`
-|type: float
-
-`TODO`
-
-| `collectd.processes.ps_disk_ops.write`
-|type: float
-
-`TODO`
-
-| `collectd.processes.ps_vm`
-|type: integer
-
-The `collectd` `ps_vm` type of processes plug-in.
-
-| `collectd.processes.ps_rss`
-|type: integer
-
-The `collectd` `ps_rss` type of processes plug-in.
-
-| `collectd.processes.ps_data`
-|type: integer
-
-The `collectd` `ps_data` type of processes plug-in.
-
-| `collectd.processes.ps_code`
-|type: integer
-
-The `collectd` `ps_code` type of processes plug-in.
-
-| `collectd.processes.ps_stacksize`
-| type: integer
-
-The `collectd` `ps_stacksize` type of processes plug-in.
-|===
-
-[discrete]
-[id="exported-fields-collectd.processes.ps_cputime_{context}"]
-=== `collectd.processes.ps_cputime` Fields
-
-The `collectd` `ps_cputime` type of processes plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.processes.ps_cputime.user`
-|type: float
-
-`TODO`
-
-| `collectd.processes.ps_cputime.syst`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.processes.ps_count_{context}"]
-=== `collectd.processes.ps_count` Fields
-
-The `collectd` `ps_count` type of processes plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.processes.ps_count.processes`
-|type: integer
-
-`TODO`
-
-| `collectd.processes.ps_count.threads`
-|type: integer
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.processes.ps_pagefaults_{context}"]
-=== `collectd.processes.ps_pagefaults` Fields
-
-The `collectd` `ps_pagefaults` type of processes plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.processes.ps_pagefaults.majflt`
-|type: float
-
-`TODO`
-
-| `collectd.processes.ps_pagefaults.minflt`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.processes.ps_disk_octets_{context}"]
-=== `collectd.processes.ps_disk_octets` Fields
-
-The `collectd ps_disk_octets` type of processes plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.processes.ps_disk_octets.read`
-|type: float
-
-`TODO`
-
-| `collectd.processes.ps_disk_octets.write`
-|type: float
-
-`TODO`
-
-| `collectd.processes.fork_rate`
-|type: float
-
-The `collectd` `fork_rate` type of processes plug-in.
-|===
-
-[discrete]
-[id="exported-fields-collectd.disk_{context}"]
-=== `collectd.disk` Fields
-
-Corresponds to `collectd` disk plug-in.
-
-[discrete]
-[id="exported-fields-collectd.disk.disk_merged_{context}"]
-=== `collectd.disk.disk_merged` Fields
-
-The `collectd` `disk_merged` type of disk plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.disk.disk_merged.read`
-|type: float
-
-`TODO`
-
-| `collectd.disk.disk_merged.write`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.disk.disk_octets_{context}"]
-=== `collectd.disk.disk_octets` Fields
-
-The `collectd` `disk_octets` type of disk plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.disk.disk_octets.read`
-|type: float
-
-`TODO`
-
-| `collectd.disk.disk_octets.write`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.disk.disk_time_{context}"]
-=== `collectd.disk.disk_time` Fields
-
-The `collectd` `disk_time` type of disk plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.disk.disk_time.read`
-|type: float
-
-`TODO`
-
-| `collectd.disk.disk_time.write`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.disk.disk_ops_{context}"]
-=== `collectd.disk.disk_ops` Fields
-
-The `collectd` `disk_ops` type of disk plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.disk.disk_ops.read`
-|type: float
-
-`TODO`
-
-| `collectd.disk.disk_ops.write`
-|type: float
-
-`TODO`
-
-| `collectd.disk.pending_operations`
-|type: integer
-
-The `collectd` `pending_operations` type of disk plug-in.
-|===
-
-[discrete]
-[id="exported-fields-collectd.disk.disk_io_time_{context}"]
-=== `collectd.disk.disk_io_time` Fields
-
-The `collectd disk_io_time` type of disk plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.disk.disk_io_time.io_time`
-|type: float
-
-`TODO`
-
-| `collectd.disk.disk_io_time.weighted_io_time`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.interface_{context}"]
-=== `collectd.interface` Fields
-
-Corresponds to the `collectd` interface plug-in.
-
-[discrete]
-[id="exported-fields-collectd.interface.if_octets_{context}"]
-=== `collectd.interface.if_octets` Fields
-
-The `collectd` `if_octets` type of interface plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.interface.if_octets.rx`
-|type: float
-
-`TODO`
-
-| `collectd.interface.if_octets.tx`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.interface.if_packets_{context}"]
-=== `collectd.interface.if_packets` Fields
-
-The `collectd` `if_packets` type of interface plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.interface.if_packets.rx`
-|type: float
-
-`TODO`
-
-| `collectd.interface.if_packets.tx`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.interface.if_errors_{context}"]
-=== `collectd.interface.if_errors` Fields
-
-The `collectd` `if_errors` type of interface plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.interface.if_errors.rx`
-|type: float
-
-`TODO`
-
-| `collectd.interface.if_errors.tx`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.interface.if_dropped_{context}"]
-=== collectd.interface.if_dropped Fields
-
-The `collectd` `if_dropped` type of interface plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.interface.if_dropped.rx`
-|type: float
-
-`TODO`
-
-| `collectd.interface.if_dropped.tx`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.virt_{context}"]
-=== `collectd.virt` Fields
-
-Corresponds to `collectd` virt plug-in.
-
-[discrete]
-[id="exported-fields-collectd.virt.if_octets_{context}"]
-=== `collectd.virt.if_octets` Fields
-
-The `collectd if_octets` type of virt plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.virt.if_octets.rx`
-|type: float
-
-`TODO`
-
-| `collectd.virt.if_octets.tx`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.virt.if_packets_{context}"]
-=== `collectd.virt.if_packets` Fields
-
-The `collectd` `if_packets` type of virt plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.virt.if_packets.rx`
-|type: float
-
-`TODO`
-
-| `collectd.virt.if_packets.tx`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.virt.if_errors_{context}"]
-=== `collectd.virt.if_errors` Fields
-
-The `collectd` `if_errors` type of virt plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.virt.if_errors.rx`
-|type: float
-
-`TODO`
-
-| `collectd.virt.if_errors.tx`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.virt.if_dropped_{context}"]
-=== `collectd.virt.if_dropped` Fields
-
-The `collectd` `if_dropped` type of virt plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.virt.if_dropped.rx`
-|type: float
-
-`TODO`
-
-| `collectd.virt.if_dropped.tx`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.virt.disk_ops_{context}"]
-=== `collectd.virt.disk_ops` Fields
-
-The `collectd` `disk_ops` type of virt plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.virt.disk_ops.read`
-|type: float
-
-`TODO`
-
-| `collectd.virt.disk_ops.write`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.virt.disk_octets_{context}"]
-=== `collectd.virt.disk_octets` Fields
-
-The `collectd` `disk_octets` type of virt plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.virt.disk_octets.read`
-|type: float
-
-`TODO`
-
-| `collectd.virt.disk_octets.write`
-|type: float
-
-`TODO`
-
-| `collectd.virt.memory`
-|type: float
-
-The `collectd` memory type of virt plug-in.
-
-| `collectd.virt.virt_vcpu`
-|type: float
-
-The `collectd` `virt_vcpu` type of virt plug-in.
-
-| `collectd.virt.virt_cpu_total`
-|type: float
-
-The `collectd` `virt_cpu_total` type of virt plug-in.
-|===
-
-[discrete]
-[id="exported-fields-collectd.CPU_{context}"]
-=== `collectd.CPU` Fields
-
-Corresponds to the `collectd` CPU plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.CPU.percent`
-|type: float
-
-The `collectd` type percent of plug-in CPU.
-|===
-
-[discrete]
-[id="exported-fields-collectd.df_{context}"]
-=== collectd.df Fields
-
-Corresponds to the `collectd` `df` plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.df.df_complex`
-|type: float
-
-The `collectd` type `df_complex` of plug-in `df`.
-
-| `collectd.df.percent_bytes`
-|type: float
-
-The `collectd` type `percent_bytes` of plug-in `df`.
-|===
-
-[discrete]
-[id="exported-fields-collectd.entropy_{context}"]
-=== `collectd.entropy` Fields
-
-Corresponds to the `collectd` entropy plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.entropy.entropy`
-|type: integer
-
-The `collectd` entropy type of entropy plug-in.
-|===
-
-////
-[discrete]
-[id="exported-fields-collectd.nfs_{context}"]
-=== `collectd.nfs` Fields
-
-Corresponds to the `collectd` NFS plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.nfs.nfs_procedure`
-|type: integer
-
-The `collectd` `nfs_procedure` type of nfs plug-in.
-|===
-////
-
-[discrete]
-[id="exported-fields-collectd.memory_{context}"]
-=== `collectd.memory` Fields
-
-Corresponds to the `collectd` memory plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.memory.memory`
-|type: float
-
-The `collectd` memory type of memory plug-in.
-
-| `collectd.memory.percent`
-|type: float
-
-The `collectd` percent type of memory plug-in.
-|===
-
-[discrete]
-[id="exported-fields-collectd.swap_{context}"]
-=== `collectd.swap` Fields
-
-Corresponds to the `collectd` swap plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.swap.swap`
-|type: integer
-
-The `collectd` swap type of swap plug-in.
-
-| `collectd.swap.swap_io`
-|type: integer
-
-The `collectd swap_io` type of swap plug-in.
-|===
-
-[discrete]
-[id="exported-fields-collectd.load_{context}"]
-=== `collectd.load` Fields
-
-Corresponds to the `collectd` load plug-in.
-
-[discrete]
-[id="exported-fields-collectd.load.load_{context}"]
-=== `collectd.load.load` Fields
-
-The `collectd` load type of load plug-in
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.load.load.shortterm`
-|type: float
-
-`TODO`
-
-| `collectd.load.load.midterm`
-|type: float
-
-`TODO`
-
-| `collectd.load.load.longterm`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.aggregation_{context}"]
-=== `collectd.aggregation` Fields
-
-Corresponds to `collectd` aggregation plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.aggregation.percent`
-|type: float
-
-`TODO`
-|===
-
-[discrete]
-[id="exported-fields-collectd.statsd_{context}"]
-=== `collectd.statsd` Fields
-
-Corresponds to `collectd` `statsd` plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.statsd.host_cpu`
-|type: integer
-
-The `collectd` CPU type of `statsd` plug-in.
-
-| `collectd.statsd.host_elapsed_time`
-|type: integer
-
-The `collectd` `elapsed_time` type of `statsd` plug-in.
-
-| `collectd.statsd.host_memory`
-|type: integer
-
-The `collectd` memory type of `statsd` plug-in.
-
-| `collectd.statsd.host_nic_speed`
-|type: integer
-
-The `collectd` `nic_speed` type of `statsd` plug-in.
-
-| `collectd.statsd.host_nic_rx`
-|type: integer
-
-The `collectd` `nic_rx` type of `statsd` plug-in.
-
-| `collectd.statsd.host_nic_tx`
-|type: integer
-
-The `collectd` `nic_tx` type of `statsd` plug-in.
-
-| `collectd.statsd.host_nic_rx_dropped`
-|type: integer
-
-The `collectd` `nic_rx_dropped` type of `statsd` plug-in.
-
-| `collectd.statsd.host_nic_tx_dropped`
-|type: integer
-
-The `collectd` `nic_tx_dropped` type of `statsd` plug-in.
-
-| `collectd.statsd.host_nic_rx_errors`
-|type: integer
-
-The `collectd` `nic_rx_errors` type of `statsd` plug-in.
-
-| `collectd.statsd.host_nic_tx_errors`
-|type: integer
-
-The `collectd` `nic_tx_errors` type of `statsd` plug-in.
-
-| `collectd.statsd.host_storage`
-|type: integer
-
-The `collectd` storage type of `statsd` plug-in.
-
-| `collectd.statsd.host_swap`
-|type: integer
-
-The `collectd` swap type of `statsd` plug-in.
-
-| `collectd.statsd.host_vdsm`
-|type: integer
-
-The `collectd` VDSM type of `statsd` plug-in.
-
-| `collectd.statsd.host_vms`
-|type: integer
-
-The `collectd` VMS type of `statsd` plug-in.
-
-| `collectd.statsd.vm_nic_tx_dropped`
-|type: integer
-
-The `collectd` `nic_tx_dropped` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_nic_rx_bytes`
-|type: integer
-
-The `collectd` `nic_rx_bytes` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_nic_tx_bytes`
-|type: integer
-
-The `collectd` `nic_tx_bytes` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_balloon_min`
-|type: integer
-
-The `collectd` `balloon_min` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_balloon_max`
-|type: integer
-
-The `collectd` `balloon_max` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_balloon_target`
-|type: integer
-
-The `collectd` `balloon_target` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_balloon_cur`
-| type: integer
-
-The `collectd` `balloon_cur` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_cpu_sys`
-|type: integer
-
-The `collectd` `cpu_sys` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_cpu_usage`
-|type: integer
-
-The `collectd` `cpu_usage` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_disk_read_ops`
-|type: integer
-
-The `collectd` `disk_read_ops` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_disk_write_ops`
-|type: integer
-
-The `collectd` `disk_write_ops` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_disk_flush_latency`
-|type: integer
-
-The `collectd` `disk_flush_latency` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_disk_apparent_size`
-|type: integer
-
-The `collectd` `disk_apparent_size` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_disk_write_bytes`
-|type: integer
-
-The `collectd` `disk_write_bytes` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_disk_write_rate`
-|type: integer
-
-The `collectd` `disk_write_rate` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_disk_true_size`
-|type: integer
-
-The `collectd` `disk_true_size` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_disk_read_rate`
-|type: integer
-
-The `collectd` `disk_read_rate` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_disk_write_latency`
-|type: integer
-
-The `collectd` `disk_write_latency` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_disk_read_latency`
-|type: integer
-
-The `collectd` `disk_read_latency` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_disk_read_bytes`
-|type: integer
-
-The `collectd` `disk_read_bytes` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_nic_rx_dropped`
-|type: integer
-
-The `collectd` `nic_rx_dropped` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_cpu_user`
-|type: integer
-
-The `collectd` `cpu_user` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_nic_rx_errors`
-|type: integer
-
-The `collectd` `nic_rx_errors` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_nic_tx_errors`
-|type: integer
-
-The `collectd` `nic_tx_errors` type of `statsd` plug-in.
-
-| `collectd.statsd.vm_nic_speed`
-|type: integer
-
-The `collectd` `nic_speed` type of `statsd` plug-in.
-|===
-
-[discrete]
-[id="exported-fields-collectd.postgresql_{context}"]
-=== `collectd.postgresql Fields`
-
-Corresponds to `collectd` `postgresql` plug-in.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `collectd.postgresql.pg_n_tup_g`
-|type: integer
-
-The `collectd` type `pg_n_tup_g` of plug-in postgresql.
-
-| `collectd.postgresql.pg_n_tup_c`
-|type: integer
-
-The `collectd` type `pg_n_tup_c` of plug-in postgresql.
-
-| `collectd.postgresql.pg_numbackends`
-|type: integer
-
-The `collectd` type `pg_numbackends` of plug-in postgresql.
-
-| `collectd.postgresql.pg_xact`
-|type: integer
-
-The `collectd` type `pg_xact` of plug-in postgresql.
-
-| `collectd.postgresql.pg_db_size`
-|type: integer
-
-The `collectd` type `pg_db_size` of plug-in postgresql.
-
-| `collectd.postgresql.pg_blks`
-|type: integer
-
-The `collectd` type `pg_blks` of plug-in postgresql.
-|===
diff --git a/_unused_topics/cluster-logging-exported-fields-docker.adoc b/_unused_topics/cluster-logging-exported-fields-docker.adoc
deleted file mode 100644
index 26d77f062ca0..000000000000
--- a/_unused_topics/cluster-logging-exported-fields-docker.adoc
+++ /dev/null
@@ -1,89 +0,0 @@
-// Module included in the following assemblies:
-//
-// * logging/cluster-logging-exported-fields.adoc
-
-[id="cluster-logging-exported-fields-container_{context}"]
-= Container exported fields
-
-These are the Docker fields exported by OpenShift Logging available for searching from Elasticsearch and Kibana.
-Namespace for docker container-specific metadata. The docker.container_id is the Docker container ID.
-
-
-[discrete]
-[id="pipeline_metadata.collector_{context}"]
-=== `pipeline_metadata.collector` Fields
-
-This section contains metadata specific to the collector.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `pipeline_metadata.collector.hostname`
-|FQDN of the collector. It might be different from the FQDN of the actual emitter
-of the logs.
-
-| `pipeline_metadata.collector.name`
-|Name of the collector.
-
-| `pipeline_metadata.collector.version`
-|Version of the collector.
-
-| `pipeline_metadata.collector.ipaddr4`
-|IP address v4 of the collector server, can be an array.
-
-| `pipeline_metadata.collector.ipaddr6`
-|IP address v6 of the collector server, can be an array.
-
-| `pipeline_metadata.collector.inputname`
-|How the log message was received by the collector whether it was TCP/UDP, or
-imjournal/imfile.
-
-| `pipeline_metadata.collector.received_at`
-|Time when the message was received by the collector.
-
-| `pipeline_metadata.collector.original_raw_message`
-|The original non-parsed log message, collected by the collector or as close to the
-source as possible.
-|===
-
-[discrete]
-[id="exported-fields-pipeline_metadata.normalizer_{context}"]
-=== `pipeline_metadata.normalizer` Fields
-
-This section contains metadata specific to the normalizer.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `pipeline_metadata.normalizer.hostname`
-|FQDN of the normalizer.
-
-| `pipeline_metadata.normalizer.name`
-|Name of the normalizer.
-
-| `pipeline_metadata.normalizer.version`
-|Version of the normalizer.
-
-| `pipeline_metadata.normalizer.ipaddr4`
-|IP address v4 of the normalizer server, can be an array.
-
-| `pipeline_metadata.normalizer.ipaddr6`
-|IP address v6 of the normalizer server, can be an array.
-
-| `pipeline_metadata.normalizer.inputname`
-|how the log message was received by the normalizer whether it was TCP/UDP.
-
-| `pipeline_metadata.normalizer.received_at`
-|Time when the message was received by the normalizer.
-
-| `pipeline_metadata.normalizer.original_raw_message`
-|The original non-parsed log message as it is received by the normalizer.
-
-| `pipeline_metadata.trace`
-|The field records the trace of the message. Each collector and normalizer appends
-information about itself and the date and time when the message was processed.
-|===
diff --git a/_unused_topics/cluster-logging-exported-fields-kubernetes.2021-06-04.adoc b/_unused_topics/cluster-logging-exported-fields-kubernetes.2021-06-04.adoc
deleted file mode 100644
index d40a3ddd446e..000000000000
--- a/_unused_topics/cluster-logging-exported-fields-kubernetes.2021-06-04.adoc
+++ /dev/null
@@ -1,83 +0,0 @@
-[id="cluster-logging-exported-fields-kubernetes_{context}"]
-= Kubernetes
-
-The following fields can be present in the namespace for kubernetes-specific metadata.
-
-== kubernetes.pod_name
-
-The name of the pod
-
-[horizontal]
-Data type:: keyword
-
-
-== kubernetes.pod_id
-
-Kubernetes ID of the pod.
-
-[horizontal]
-Data type:: keyword
-
-
-== kubernetes.namespace_name
-
-The name of the namespace in Kubernetes.
-
-[horizontal]
-Data type:: keyword
-
-
-== kubernetes.namespace_id
-
-ID of the namespace in Kubernetes.
-
-[horizontal]
-Data type:: keyword
-
-
-== kubernetes.host
-
-Kubernetes node name
-
-[horizontal]
-Data type:: keyword
-
-
-== kubernetes.master_url
-
-Kubernetes Master URL
-
-[horizontal]
-Data type:: keyword
-
-
-== kubernetes.container_name
-
-The name of the container in Kubernetes.
-
-[horizontal]
-Data type:: text
-
-
-== kubernetes.annotations
-
-Annotations associated with the Kubernetes object
-
-[horizontal]
-Data type:: group
-
-
-== kubernetes.labels
-
-Labels attached to the Kubernetes object Each label name is a subfield of labels field. Each label name is de-dotted: dots in the name are replaced with underscores.
-
-[horizontal]
-Data type:: group
-
-
-== kubernetes.event
-
-The kubernetes event obtained from kubernetes master API The event is already JSON object and as whole nested under kubernetes field This description should loosely follow 'type Event' in https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#event-v1-core
-
-[horizontal]
-Data type:: group
diff --git a/_unused_topics/cluster-logging-exported-fields-ovirt.2021-06-04.adoc b/_unused_topics/cluster-logging-exported-fields-ovirt.2021-06-04.adoc
deleted file mode 100644
index 6c5dcd5b4470..000000000000
--- a/_unused_topics/cluster-logging-exported-fields-ovirt.2021-06-04.adoc
+++ /dev/null
@@ -1,30 +0,0 @@
-// Module included in the following assemblies:
-//
-// * logging/cluster-logging-exported-fields.adoc
-
-[id="cluster-logging-exported-fields-ovirt_{context}"]
-= oVirt exported fields
-
-These are the oVirt fields exported by OpenShift Logging available for searching
-from Elasticsearch and Kibana.
-
-Namespace for oVirt metadata.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `ovirt.entity`
-|The type of the data source, hosts, VMS, and engine.
-
-| `ovirt.host_id`
-|The oVirt host UUID.
-|===
-
-[discrete]
-[id="exported-fields-ovirt.engine_{context}"]
-=== `ovirt.engine` Fields
-
-Namespace for metadata related to the {rh-virtualization-engine-name}. The FQDN of the {rh-virtualization-engine-name} is
-`ovirt.engine.fqdn`
diff --git a/_unused_topics/cluster-logging-exported-fields-rsyslog.2021-06-04.adoc b/_unused_topics/cluster-logging-exported-fields-rsyslog.2021-06-04.adoc
deleted file mode 100644
index fec43d97ad1a..000000000000
--- a/_unused_topics/cluster-logging-exported-fields-rsyslog.2021-06-04.adoc
+++ /dev/null
@@ -1,34 +0,0 @@
-// Module included in the following assemblies:
-//
-// * logging/cluster-logging-exported-fields.adoc
-
-[id="cluster-logging-exported-fields-rsyslog_{context}"]
-= `rsyslog` exported fields
-
-These are the `rsyslog` fields exported by the logging system and available for searching
-from Elasticsearch and Kibana.
-
-The following fields are RFC5424 based metadata.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `rsyslog.facility`
-|See `syslog` specification for more information on `rsyslog`.
-
-| `rsyslog.protocol-version`
-|This is the `rsyslog` protocol version.
-
-| `rsyslog.structured-data`
-|See `syslog` specification for more information on `syslog` structured-data.
-
-| `rsyslog.msgid`
-|This is the `syslog` msgid field.
-
-| `rsyslog.appname`
-|If `app-name` is the same as `programname`, then only fill top-level field `service`.
-If `app-name` is not equal to `programname`, this field will hold `app-name`.
-See syslog specifications for more information.
-|===
diff --git a/_unused_topics/cluster-logging-exported-fields-systemd.2021-06-04.adoc b/_unused_topics/cluster-logging-exported-fields-systemd.2021-06-04.adoc
deleted file mode 100644
index 19e1d6a4cdca..000000000000
--- a/_unused_topics/cluster-logging-exported-fields-systemd.2021-06-04.adoc
+++ /dev/null
@@ -1,195 +0,0 @@
-// Module included in the following assemblies:
-//
-// * logging/cluster-logging-exported-fields.adoc
-
-[id="cluster-logging-exported-fields-systemd_{context}"]
-= systemd exported fields
-
-These are the `systemd` fields exported by OpenShift Logging available for searching
-from Elasticsearch and Kibana.
-
-Contains common fields specific to `systemd` journal.
-link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html[Applications]
-can write their own fields to the journal. These will be available under the
-`systemd.u` namespace. `RESULT` and `UNIT` are two such fields.
-
-[discrete]
-[id="exported-fields-systemd.k_{context}"]
-=== `systemd.k` Fields
-
-The following table contains `systemd` kernel-specific metadata.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `systemd.k.KERNEL_DEVICE`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_KERNEL_DEVICE=[`systemd.k.KERNEL_DEVICE`]
-is the kernel device name.
-
-| `systemd.k.KERNEL_SUBSYSTEM`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_KERNEL_SUBSYSTEM=[`systemd.k.KERNEL_SUBSYSTEM`]
-is the kernel subsystem name.
-
-| `systemd.k.UDEV_DEVLINK`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_UDEV_DEVLINK=[`systemd.k.UDEV_DEVLINK`]
-includes additional symlink names that point to the node.
-
-| `systemd.k.UDEV_DEVNODE`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_UDEV_DEVNODE=[`systemd.k.UDEV_DEVNODE`]
-is the node path of the device.
-
-| `systemd.k.UDEV_SYSNAME`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_UDEV_SYSNAME=[ `systemd.k.UDEV_SYSNAME`]
-is the kernel device name.
-
-|===
-
-[discrete]
-[id="exported-fields-systemd.t_{context}"]
-=== `systemd.t` Fields
-
-`systemd.t Fields` are trusted journal fields, fields that are implicitly added
-by the journal, and cannot be altered by client code.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `systemd.t.AUDIT_LOGINUID`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_AUDIT_SESSION=[`systemd.t.AUDIT_LOGINUID`]
-is the user ID for the journal entry process.
-
-| `systemd.t.BOOT_ID`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_BOOT_ID=[`systemd.t.BOOT_ID`]
-is the kernel boot ID.
-
-| `systemd.t.AUDIT_SESSION`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_AUDIT_SESSION=[`systemd.t.AUDIT_SESSION`]
-is the session for the journal entry process.
-
-| `systemd.t.CAP_EFFECTIVE`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_CAP_EFFECTIVE=[`systemd.t.CAP_EFFECTIVE`]
-represents the capabilities of the journal entry process.
-
-| `systemd.t.CMDLINE`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_COMM=[`systemd.t.CMDLINE`]
-is the command line of the journal entry process.
-
-| `systemd.t.COMM`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_COMM=[`systemd.t.COMM`]
-is the name of the journal entry process.
-
-| `systemd.t.EXE`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_COMM=[`systemd.t.EXE`]
-is the executable path of the journal entry process.
-
-| `systemd.t.GID`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_PID=[`systemd.t.GID`]
-is the group ID for the journal entry process.
-
-| `systemd.t.HOSTNAME`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_HOSTNAME=[`systemd.t.HOSTNAME`]
-is the name of the host.
-
-| `systemd.t.MACHINE_ID`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_MACHINE_ID=[`systemd.t.MACHINE_ID`]
-is the machine ID of the host.
-
-| `systemd.t.PID`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_PID=[`systemd.t.PID`]
-is the process ID for the journal entry process.
-
-| `systemd.t.SELINUX_CONTEXT`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_SELINUX_CONTEXT=[`systemd.t.SELINUX_CONTEXT`]
-is the security context, or label, for the journal entry process.
-
-| `systemd.t.SOURCE_REALTIME_TIMESTAMP`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_SOURCE_REALTIME_TIMESTAMP=[`systemd.t.SOURCE_REALTIME_TIMESTAMP`]
-is the earliest and most reliable timestamp of the message. This is converted to RFC 3339 NS format.
-
-| `systemd.t.SYSTEMD_CGROUP`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_SYSTEMD_CGROUP=[`systemd.t.SYSTEMD_CGROUP`]
-is the `systemd` control group path.
-
-| `systemd.t.SYSTEMD_OWNER_UID`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_SYSTEMD_CGROUP=[`systemd.t.SYSTEMD_OWNER_UID`]
-is the owner ID of the session.
-
-| `systemd.t.SYSTEMD_SESSION`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_SYSTEMD_CGROUP=[`systemd.t.SYSTEMD_SESSION`],
-if applicable, is the `systemd` session ID.
-
-| `systemd.t.SYSTEMD_SLICE`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_SYSTEMD_CGROUP=[`systemd.t.SYSTEMD_SLICE`]
-is the slice unit of the journal entry process.
-
-| `systemd.t.SYSTEMD_UNIT`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_SYSTEMD_CGROUP=[`systemd.t.SYSTEMD_UNIT`]
-is the unit name for a session.
-
-| `systemd.t.SYSTEMD_USER_UNIT`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_SYSTEMD_CGROUP=[`systemd.t.SYSTEMD_USER_UNIT`],
-if applicable, is the user unit name for a session.
-
-| `systemd.t.TRANSPORT`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_TRANSPORT=[`systemd.t.TRANSPORT`]
-is the method of entry by the journal service. This includes, `audit`, `driver`,
-`syslog`, `journal`, `stdout`, and `kernel`.
-
-| `systemd.t.UID`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_PID=[`systemd.t.UID`]
-is the user ID for the journal entry process.
-
-| `systemd.t.SYSLOG_FACILITY`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#SYSLOG_FACILITY=[`systemd.t.SYSLOG_FACILITY`]
-is the field containing the facility, formatted as a decimal string, for `syslog`.
-
-| `systemd.t.SYSLOG_IDENTIFIER`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#SYSLOG_FACILITY=[`systemd.t.systemd.t.SYSLOG_IDENTIFIER`]
-is the identifier for `syslog`.
-
-| `systemd.t.SYSLOG_PID`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#SYSLOG_FACILITY=[`SYSLOG_PID`]
-is the client process ID for `syslog`.
-|===
-
-[discrete]
-[id="exported-fields-systemd.u_{context}"]
-=== `systemd.u` Fields
-
-`systemd.u Fields` are directly passed from clients and stored in the journal.
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `systemd.u.CODE_FILE`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#CODE_FILE=[`systemd.u.CODE_FILE`]
-is the code location containing the filename of the source.
-
-| `systemd.u.CODE_FUNCTION`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#CODE_FILE=[`systemd.u.CODE_FUNCTION`]
-is the code location containing the function of the source.
-
-| `systemd.u.CODE_LINE`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#CODE_FILE=[`systemd.u.CODE_LINE`]
-is the code location containing the line number of the source.
-
-| `systemd.u.ERRNO`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#ERRNO=[`systemd.u.ERRNO`],
-if present, is the low-level error number formatted in numeric value, as a decimal string.
-
-| `systemd.u.MESSAGE_ID`
-|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#MESSAGE_ID=[`systemd.u.MESSAGE_ID`]
-is the message identifier ID for recognizing message types.
-
-| `systemd.u.RESULT`
-|For private use only.
-
-| `systemd.u.UNIT`
-|For private use only.
-|===
diff --git a/_unused_topics/cluster-logging-exported-fields-tlog.2021-06-04.adoc b/_unused_topics/cluster-logging-exported-fields-tlog.2021-06-04.adoc
deleted file mode 100644
index 82724afc1591..000000000000
--- a/_unused_topics/cluster-logging-exported-fields-tlog.2021-06-04.adoc
+++ /dev/null
@@ -1,51 +0,0 @@
-// Module included in the following assemblies:
-//
-// * logging/cluster-logging-exported-fields.adoc
-
-[id="cluster-logging-exported-fields-tlog_{context}"]
-= Tlog exported fields
-
-These are the Tlog fields exported by the OpenShift Logging system and available for searching
-from Elasticsearch and Kibana.
-
-Tlog terminal I/O recording messages. For more information see
-link:https://github.com/Scribery/tlog[Tlog].
-
-[cols="3,7",options="header"]
-|===
-|Parameter
-|Description
-
-| `tlog.ver`
-|Message format version number.
-
-| `tlog.user`
-|Recorded user name.
-
-| `tlog.term`
-|Terminal type name.
-
-| `tlog.session`
-|Audit session ID of the recorded session.
-
-| `tlog.id`
-|ID of the message within the session.
-
-| `tlog.pos`
-|Message position in the session, milliseconds.
-
-| `tlog.timing`
-|Distribution of this message's events in time.
-
-| `tlog.in_txt`
-|Input text with invalid characters scrubbed.
-
-| `tlog.in_bin`
-|Scrubbed invalid input characters as bytes.
-
-| `tlog.out_txt`
-|Output text with invalid characters scrubbed.
-
-| `tlog.out_bin`
-|Scrubbed invalid output characters as bytes.
-|===
diff --git a/_unused_topics/cluster-logging-kibana-console-launch.adoc b/_unused_topics/cluster-logging-kibana-console-launch.adoc
deleted file mode 100644
index 44b23c483030..000000000000
--- a/_unused_topics/cluster-logging-kibana-console-launch.adoc
+++ /dev/null
@@ -1,28 +0,0 @@
-// Module included in the following assemblies:
-//
-// * logging/cluster-logging-kibana-console.adoc
-// * logging/cluster-logging-visualizer.adoc
-
-[id="cluster-logging-kibana-visualize_{context}"]
-= Launching the Kibana interface
-
-The Kibana interface is a browser-based console
-to query, discover, and visualize your Elasticsearch data through histograms, line graphs,
-pie charts, heat maps, built-in geospatial support, and other visualizations.
-
-.Procedure
-
-To launch the Kibana interface:
-
-. In the {product-title} console, click *Observe* -> *Logging*.
-
-. Log in using the same credentials you use to log in to the {product-title} console.
-+
-The Kibana interface launches. You can now:
-+
-* Search and browse your data using the Discover page.
-* Chart and map your data using the Visualize page.
-* Create and view custom dashboards using the Dashboard page.
-+
-Use and configuration of the Kibana interface is beyond the scope of this documentation. For more information,
-on using the interface, see the link:https://www.elastic.co/guide/en/kibana/5.6/connect-to-elasticsearch.html[Kibana documentation].
diff --git a/_unused_topics/cluster-logging-log-forwarding-disable.adoc b/_unused_topics/cluster-logging-log-forwarding-disable.adoc
deleted file mode 100644
index 680ea9b95686..000000000000
--- a/_unused_topics/cluster-logging-log-forwarding-disable.adoc
+++ /dev/null
@@ -1,47 +0,0 @@
-// Module included in the following assemblies:
-//
-// * logging/cluster-logging-external.adoc
-
-[id="cluster-logging-log-forwarding-disable_{context}"]
-= Disabling the Log Forwarding feature
-
-To disable the Log Forwarding feature, remove the `clusterlogging.openshift.io/logforwardingtechpreview:enabled` parameter from the Cluster Logging custom resource (CR) and delete the `ClusterLogForwarder` CR. The container and node logs will be forwarded to the internal {product-title} Elasticsearch instance.
-
-[IMPORTANT]
-====
-You cannot disable Log Forwarding by setting the `disableDefaultForwarding` to `false` in the `ClusterLogForwarder` CR. This prevents OpenShift Logging from sending logs to the specified endpoints *and* to default internal {product-title} Elasticsearch instance.
-====
-
-.Procedure
-
-To disable the Log Forwarding feature:
-
-. Edit the OpenShift Logging CR in the `openshift-logging` project:
-+
-[source,terminal]
-----
-$ oc edit ClusterLogging instance
-----
-
-. Remove the `clusterlogging.openshift.io/logforwardingtechpreview` annotation:
-+
-[source,yaml]
-----
-apiVersion: "logging.openshift.io/v1"
-kind: "ClusterLogging"
-metadata:
- annotations:
- clusterlogging.openshift.io/logforwardingtechpreview: enabled <1>
- name: "instance"
- namespace: "openshift-logging"
-...
-----
-<1> Remove this annotation.
-
-. Delete the `ClusterLogForwarder` CR:
-+
-[source,terminal]
-----
-$ oc delete LogForwarding instance -n openshift-logging
-----
-
diff --git a/_unused_topics/cluster-logging-uninstall-cluster-ops.adoc b/_unused_topics/cluster-logging-uninstall-cluster-ops.adoc
deleted file mode 100644
index ec4c0d37eac0..000000000000
--- a/_unused_topics/cluster-logging-uninstall-cluster-ops.adoc
+++ /dev/null
@@ -1,19 +0,0 @@
-// Module included in the following assemblies:
-//
-// * logging/cluster-logging-uninstall.adoc
-
-[id="cluster-logging-uninstall-ops_{context}"]
-= Uninstall the infra cluster
-
-You can uninstall the infra cluster from OpenShift Logging.
-After uninstalling, Fluentd no longer splits logs.
-
-.Procedure
-
-To uninstall the infra cluster:
-
-.
-
-.
-
-.
diff --git a/_unused_topics/completing-installation.adoc b/_unused_topics/completing-installation.adoc
index 911997c61e1a..a3d3235f7312 100644
--- a/_unused_topics/completing-installation.adoc
+++ b/_unused_topics/completing-installation.adoc
@@ -12,14 +12,14 @@ INFO Install complete!
INFO Run 'export KUBECONFIG=/home/joe/ocp/auth/kubeconfig' to manage the cluster with 'oc', the {product-title} CLI.
-INFO The cluster is ready when 'oc login -u kubeadmin -p 39RPg-y4c7V-n4bbn-vAF3M' succeeds (wait a few minutes).
+INFO The cluster is ready when 'oc login -u kubeadmin -p ' succeeds (wait a few minutes).
INFO Access the {product-title} web-console here: https://console-openshift-console.apps.mycluster.devel.example.com
-INFO Login to the console with user: kubeadmin, password: 39RPg-y4c7V-n4bbn-vAF3M
+INFO Login to the console with user: kubeadmin, password: "password"
----
-To access the {product-title} cluster from your web browser, log in as kubeadmin with the password (for example, 39RPg-y4c7V-n4bbn-vAF3M), using the URL shown:
+To access the {product-title} cluster from your web browser, log in as kubeadmin with the password, using the URL shown:
https://console-openshift-console.apps.mycluster.devel.example.com
@@ -27,7 +27,7 @@ To access the {product-title} cluster from the command line, identify the locati
----
$ export KUBECONFIG=/home/joe/ocp/auth/kubeconfig
-$ oc login -u kubeadmin -p 39RPg-y4c7V-n4bbn-vAF3M
+$ oc login -u kubeadmin -p
----
At this point, you can begin using the {product-title} cluster. To understand the management of your {product-title} cluster going forward, you should explore the {product-title} control plane.
diff --git a/distr_tracing/distr_tracing_config/_attributes b/_unused_topics/container_storage_interface_microshift/_attributes
similarity index 100%
rename from distr_tracing/distr_tracing_config/_attributes
rename to _unused_topics/container_storage_interface_microshift/_attributes
diff --git a/microshift_storage/container_storage_interface_microshift/images b/_unused_topics/container_storage_interface_microshift/images
similarity index 100%
rename from microshift_storage/container_storage_interface_microshift/images
rename to _unused_topics/container_storage_interface_microshift/images
diff --git a/microshift_storage/container_storage_interface_microshift/microshift-persistent-storage-csi.adoc b/_unused_topics/container_storage_interface_microshift/microshift-persistent-storage-csi.adoc
similarity index 96%
rename from microshift_storage/container_storage_interface_microshift/microshift-persistent-storage-csi.adoc
rename to _unused_topics/container_storage_interface_microshift/microshift-persistent-storage-csi.adoc
index a30265734821..4275a8ecbbdd 100644
--- a/microshift_storage/container_storage_interface_microshift/microshift-persistent-storage-csi.adoc
+++ b/_unused_topics/container_storage_interface_microshift/microshift-persistent-storage-csi.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="persistent-storage-csi-microshift"]
= Configuring CSI volumes for {product-title}
include::_attributes/attributes-microshift.adoc[]
diff --git a/logging/v5_5/modules b/_unused_topics/container_storage_interface_microshift/modules
similarity index 100%
rename from logging/v5_5/modules
rename to _unused_topics/container_storage_interface_microshift/modules
diff --git a/microshift_storage/container_storage_interface_microshift/snippets b/_unused_topics/container_storage_interface_microshift/snippets
similarity index 100%
rename from microshift_storage/container_storage_interface_microshift/snippets
rename to _unused_topics/container_storage_interface_microshift/snippets
diff --git a/_unused_topics/distr-tracing-deploy-otel-collector.adoc b/_unused_topics/distr-tracing-deploy-otel-collector.adoc
deleted file mode 100644
index d628b2501f73..000000000000
--- a/_unused_topics/distr-tracing-deploy-otel-collector.adoc
+++ /dev/null
@@ -1,128 +0,0 @@
-////
-This module included in the following assemblies:
-- distr_tracing_install/distr-tracing-deploying.adoc
-////
-
-:_content-type: PROCEDURE
-[id="distr-tracing-deploy-otel-collector_{context}"]
-= Deploying distributed tracing data collection
-
-The custom resource definition (CRD) defines the configuration used when you deploy an instance of {OTELName}.
-
-.Prerequisites
-
-* The {OTELName} Operator has been installed.
-//* You have reviewed the instructions for how to customize the deployment.
-* You have access to the cluster as a user with the `cluster-admin` role.
-
-.Procedure
-
-. Log in to the OpenShift web console as a user with the `cluster-admin` role.
-
-. Create a new project, for example `tracing-system`.
-+
-[NOTE]
-====
-If you are installing distributed tracing as part of Service Mesh, the {DTShortName} resources must be installed in the same namespace as the `ServiceMeshControlPlane` resource, for example `istio-system`.
-====
-+
-.. Navigate to *Home* -> *Projects*.
-
-.. Click *Create Project*.
-
-.. Enter `tracing-system` in the *Name* field.
-
-.. Click *Create*.
-
-. Navigate to *Operators* -> *Installed Operators*.
-
-. If necessary, select `tracing-system` from the *Project* menu. You might have to wait a few moments for the Operators to be copied to the new project.
-
-. Click the *{OTELName} Operator*. On the *Details* tab, under *Provided APIs*, the Operator provides a single link.
-
-. Under *OpenTelemetryCollector*, click *Create Instance*.
-
-. On the *Create OpenTelemetry Collector* page, to install using the defaults, click *Create* to create the {OTELShortName} instance.
-
-. On the *OpenTelemetryCollectors* page, click the name of the {OTELShortName} instance, for example, `opentelemetrycollector-sample`.
-
-. On the *Details* page, click the *Resources* tab. Wait until the pod has a status of "Running" before continuing.
-
-[id="distr-tracing-deploy-otel-collector-cli_{context}"]
-= Deploying {OTELShortName} from the CLI
-
-Follow this procedure to create an instance of {OTELShortName} from the command line.
-
-.Prerequisites
-
-* The {OTELName} Operator has been installed and verified.
-+
-//* You have reviewed the instructions for how to customize the deployment.
-+
-* You have access to the OpenShift CLI (`oc`) that matches your {product-title} version.
-* You have access to the cluster as a user with the `cluster-admin` role.
-
-.Procedure
-
-. Log in to the {product-title} CLI as a user with the `cluster-admin` role.
-+
-[source,terminal]
-----
-$ oc login https://:8443
-----
-
-. Create a new project named `tracing-system`.
-+
-[source,terminal]
-----
-$ oc new-project tracing-system
-----
-
-. Create a custom resource file named `jopentelemetrycollector-sample.yaml` that contains the following text:
-+
-.Example opentelemetrycollector.yaml
-[source,yaml]
-----
- apiVersion: opentelemetry.io/v1alpha1
- kind: OpenTelemetryCollector
- metadata:
- name: opentelemetrycollector-sample
- namespace: openshift-operators
- spec:
- image: >-
- registry.redhat.io/rhosdt/opentelemetry-collector-rhel8@sha256:61934ea5793c55900d09893e8f8b1f2dbd2e712faba8e97684e744691b29f25e
- config: |
- receivers:
- jaeger:
- protocols:
- grpc:
- exporters:
- logging:
- service:
- pipelines:
- traces:
- receivers: [jaeger]
- exporters: [logging]
-----
-
-. Run the following command to deploy {JaegerShortName}:
-+
-[source,terminal]
-----
-$ oc create -n tracing-system -f opentelemetrycollector.yaml
-----
-
-. Run the following command to watch the progress of the pods during the installation process:
-+
-[source,terminal]
-----
-$ oc get pods -n tracing-system -w
-----
-+
-After the installation process has completed, you should see output similar to the following example:
-+
-[source,terminal]
-----
-NAME READY STATUS RESTARTS AGE
-opentelemetrycollector-cdff7897b-qhfdx 2/2 Running 0 24s
-----
diff --git a/installing/installing_azure/manually-creating-iam-azure.adoc b/_unused_topics/manually-creating-iam-azure.adoc
similarity index 86%
rename from installing/installing_azure/manually-creating-iam-azure.adoc
rename to _unused_topics/manually-creating-iam-azure.adoc
index 879beea678f3..fe736063c6b3 100644
--- a/installing/installing_azure/manually-creating-iam-azure.adoc
+++ b/_unused_topics/manually-creating-iam-azure.adoc
@@ -1,6 +1,6 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="manually-creating-iam-azure"]
-= Manually creating IAM for Azure
+= Manually creating long-term credentials for Azure
include::_attributes/common-attributes.adoc[]
:context: manually-creating-iam-azure
@@ -18,7 +18,7 @@ include::modules/manually-create-identity-access-management.adoc[leveloffset=+1]
[role="_additional-resources"]
.Additional resources
-* xref:../../updating/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials]
+* xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials]
[id="manually-creating-iam-azure-next-steps"]
== Next steps
diff --git a/installing/installing_gcp/manually-creating-iam-gcp.adoc b/_unused_topics/manually-creating-iam-gcp.adoc
similarity index 91%
rename from installing/installing_gcp/manually-creating-iam-gcp.adoc
rename to _unused_topics/manually-creating-iam-gcp.adoc
index de4865932e9c..2d315023f0fc 100644
--- a/installing/installing_gcp/manually-creating-iam-gcp.adoc
+++ b/_unused_topics/manually-creating-iam-gcp.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="manually-creating-iam-gcp"]
= Manually creating IAM for GCP
include::_attributes/common-attributes.adoc[]
@@ -22,7 +22,7 @@ include::modules/manually-create-identity-access-management.adoc[leveloffset=+1]
[role="_additional-resources"]
.Additional resources
-* xref:../../updating/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials]
+* xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials]
include::modules/mint-mode.adoc[leveloffset=+1]
diff --git a/installing/installing_aws/manually-creating-iam.adoc b/_unused_topics/manually-creating-iam.adoc
similarity index 92%
rename from installing/installing_aws/manually-creating-iam.adoc
rename to _unused_topics/manually-creating-iam.adoc
index 11eaf4d626dc..03690cb0214d 100644
--- a/installing/installing_aws/manually-creating-iam.adoc
+++ b/_unused_topics/manually-creating-iam.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="manually-creating-iam-aws"]
= Manually creating IAM for AWS
include::_attributes/common-attributes.adoc[]
@@ -27,7 +27,7 @@ include::modules/manually-create-identity-access-management.adoc[leveloffset=+1]
[role="_additional-resources"]
.Additional resources
-* xref:../../updating/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials]
+* xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials]
include::modules/mint-mode.adoc[leveloffset=+1]
diff --git a/_unused_topics/microshift-adding-containers-to-blueprint.adoc b/_unused_topics/microshift-adding-containers-to-blueprint.adoc
index d4eb48d33d08..bb4e24116315 100644
--- a/_unused_topics/microshift-adding-containers-to-blueprint.adoc
+++ b/_unused_topics/microshift-adding-containers-to-blueprint.adoc
@@ -2,7 +2,7 @@
//
// microshift/microshift-embed-into-rpm-ostree.adoc
-:_content-type: PROCEDURE
+:_mod-docs-content-type: PROCEDURE
[id="adding-microshift-container-images_{context}"]
= Adding the {product-title} container images
@@ -51,7 +51,7 @@ $ rpm2cpio microshift-release-info-${VERSION}.noarch.rpm | cpio -idmv
+
[source,terminal]
----
-$ jq -r '.images | .[] | ("[[containers]]\nsource = \"" + . + "\"\n")' ./usr/share/microshift/release/release-$(uname -i).json
+$ jq -r '.images | .[] | ("[[containers]]\nsource = \"" + . + "\"\n")' ./usr/share/microshift/release/release-$(uname -m).json
----
+
.Brief output sample
diff --git a/_unused_topics/microshift-man-config-ovs-bridge.adoc b/_unused_topics/microshift-man-config-ovs-bridge.adoc
index bf6226f997ae..5cd9ef845376 100644
--- a/_unused_topics/microshift-man-config-ovs-bridge.adoc
+++ b/_unused_topics/microshift-man-config-ovs-bridge.adoc
@@ -7,31 +7,31 @@
//* Initiate OVS:
//+
-//[source, terminal]
+//[source,terminal]
//----
//$ sudo systemctl enable openvswitch --now
//----
//* Add the network bridge:
//+
-//[source, terminal]
+//[source,terminal]
//----
//$ sudo ovs-vsctl add-br br-ex
//----
//* Add the interface to the network bridge:
//+
-//[source, terminal]
+//[source,terminal]
//----
//$ sudo ovs-vsctl add-port br-ex
//----
//The `` is the network interface name where the node IP address is assigned.
//* Get the bridge up and running:
//+
-//[source, terminal]
+//[source,terminal]
//----
//$ sudo ip link set br-ex up
//----
//* After `br-ex up` is running, assign the node IP address to `br-ex` bridge:
-//[source, terminal]
+//[source,terminal]
//----
//$ sudo ...
//----
diff --git a/_unused_topics/microshift-nodeport-unreachable-workaround.adoc b/_unused_topics/microshift-nodeport-unreachable-workaround.adoc
index 4bef2a62fce3..39fc6c0db6f4 100644
--- a/_unused_topics/microshift-nodeport-unreachable-workaround.adoc
+++ b/_unused_topics/microshift-nodeport-unreachable-workaround.adoc
@@ -2,7 +2,7 @@
//
// * module may be unused in 4.13
-:_content-type: PROCEDURE
+:_mod-docs-content-type: PROCEDURE
[id="microshift-nodeport-unreachable-workaround_{context}"]
= Manually restarting the `ovnkube-master` pod to resume node port traffic
@@ -21,21 +21,21 @@ Run the commands listed in each step that follows to restore the `NodePort` serv
. Find the name of the ovn-master pod that you want to restart by running the following command:
+
-[source, terminal]
+[source,terminal]
----
$ pod=$(oc get pods -n openshift-ovn-kubernetes | grep ovnkube-master | awk -F " " '{print $1}')
----
. Force a restart of the of the ovnkube-master pod by running the following command:
+
-[source, terminal]
+[source,terminal]
----
$ oc -n openshift-ovn-kubernetes delete pod $pod
----
. Optional: To confirm that the ovnkube-master pod restarted, run the following command:
+
-[source, terminal]
+[source,terminal]
----
$ oc get pods -n openshift-ovn-kubernetes
----
diff --git a/modules/mint-mode-with-removal-of-admin-credential.adoc b/_unused_topics/mint-mode-with-removal-of-admin-credential.adoc
similarity index 100%
rename from modules/mint-mode-with-removal-of-admin-credential.adoc
rename to _unused_topics/mint-mode-with-removal-of-admin-credential.adoc
diff --git a/modules/mint-mode.adoc b/_unused_topics/mint-mode.adoc
similarity index 96%
rename from modules/mint-mode.adoc
rename to _unused_topics/mint-mode.adoc
index 336407749062..fbcc9675fd92 100644
--- a/modules/mint-mode.adoc
+++ b/_unused_topics/mint-mode.adoc
@@ -3,7 +3,7 @@
// * installing/installing_aws/manually-creating-iam.adoc
// * installing/installing_gcp/manually-creating-iam-gcp.adoc
-:_content-type: CONCEPT
+:_mod-docs-content-type: CONCEPT
[id="mint-mode_{context}"]
= Mint mode
diff --git a/_unused_topics/nodes-containers-using-about.adoc b/_unused_topics/nodes-containers-using-about.adoc
index 0213c6acecc1..2f25d0cc342d 100644
--- a/_unused_topics/nodes-containers-using-about.adoc
+++ b/_unused_topics/nodes-containers-using-about.adoc
@@ -6,7 +6,7 @@
= Understanding Containers
The basic units of {product-title} applications are called _containers_.
-link:https://access.redhat.com/articles/1353593[Linux container technologies]
+link:https://www.redhat.com/en/topics/containers#overview[Linux container technologies]
are lightweight mechanisms for isolating running processes so that they are
limited to interacting with only their designated resources.
diff --git a/_unused_topics/osdk-updating-projects.adoc b/_unused_topics/osdk-updating-projects.adoc
index ee2cf2600ae0..ce6dc366fc2a 100644
--- a/_unused_topics/osdk-updating-projects.adoc
+++ b/_unused_topics/osdk-updating-projects.adoc
@@ -25,7 +25,7 @@ endif::[]
:osdk_ver: v1.25.0
:osdk_ver_n1: v1.22.0
-:_content-type: PROCEDURE
+:_mod-docs-content-type: PROCEDURE
[id="osdk-upgrading-projects_{context}"]
= Updating {type}-based Operator projects for Operator SDK {osdk_ver}
diff --git a/_unused_topics/osdk-updating-v1101-to-v1160.adoc b/_unused_topics/osdk-updating-v1101-to-v1160.adoc
index 11450144ef89..61484e0e9430 100644
--- a/_unused_topics/osdk-updating-v1101-to-v1160.adoc
+++ b/_unused_topics/osdk-updating-v1101-to-v1160.adoc
@@ -5,7 +5,7 @@
:osdk_ver: v1.16.0
:osdk_ver_n1: v1.10.1
-:_content-type: PROCEDURE
+:_mod-docs-content-type: PROCEDURE
[id="osdk-upgrading-v1101-to-v1160_{context}"]
= Updating projects for Operator SDK {osdk_ver}
diff --git a/modules/osdk-updating-v125-to-v128.adoc b/_unused_topics/osdk-updating-v125-to-v128.adoc
similarity index 93%
rename from modules/osdk-updating-v125-to-v128.adoc
rename to _unused_topics/osdk-updating-v125-to-v128.adoc
index 66d354173470..1ee8e317e20e 100644
--- a/modules/osdk-updating-v125-to-v128.adoc
+++ b/_unused_topics/osdk-updating-v125-to-v128.adoc
@@ -27,7 +27,7 @@ ifeval::["{context}" == "osdk-java-updating-projects"]
:type: Java
endif::[]
-:_content-type: PROCEDURE
+:_mod-docs-content-type: PROCEDURE
[id="osdk-upgrading-projects_{context}"]
= Updating {type}-based Operator projects for Operator SDK {osdk_ver}
@@ -41,10 +41,10 @@ The following procedure updates an existing {type}-based Operator project for co
.Procedure
ifdef::helm,hybrid,java[]
-* Find the `ose-kube-rbac-proxy` pull spec in the following files, and update the image tag to `v4.13`:
+* Find the `ose-kube-rbac-proxy` pull spec in the following files, and update the image tag to `v4.14`:
endif::[]
ifdef::ansible,golang[]
-. Find the `ose-kube-rbac-proxy` pull spec in the following files, and update the image tag to `v4.13`:
+. Find the `ose-kube-rbac-proxy` pull spec in the following files, and update the image tag to `v4.14`:
endif::[]
+
--
@@ -57,10 +57,10 @@ endif::[]
…
containers:
- name: kube-rbac-proxy
- image: registry.redhat.io/openshift4/ose-kube-rbac-proxy:v4.13 <1>
+ image: registry.redhat.io/openshift4/ose-kube-rbac-proxy:v4.14 <1>
…
----
-<1> Update the tag version from `v4.12` to `v4.13`.
+<1> Update the tag version from `v4.13` to `v4.14`.
ifdef::ansible[]
. Update your Makefile's `run` target to the following:
@@ -115,7 +115,7 @@ $ go mod tidy
. Modify your Makefile with the following changes:
-.. Change the `ENVTEST_K8S_VERSION` field from `1.25` to `1.26`.
+.. Change the `ENVTEST_K8S_VERSION` field from `1.26` to `1.27`.
.. Change the `build` target from `generate fmt vet` to `manifests generate fmt vet`:
+
[source,diff]
diff --git a/_unused_topics/osdk-upgrading-v180-to-v1101.adoc b/_unused_topics/osdk-upgrading-v180-to-v1101.adoc
index 91a7d73f2957..89e8643443de 100644
--- a/_unused_topics/osdk-upgrading-v180-to-v1101.adoc
+++ b/_unused_topics/osdk-upgrading-v180-to-v1101.adoc
@@ -5,7 +5,7 @@
:osdk_ver: v1.10.1
:osdk_ver_n1: v1.8.0
-:_content-type: PROCEDURE
+:_mod-docs-content-type: PROCEDURE
[id="osdk-upgrading-v180-to-v1101_{context}"]
= Upgrading projects for Operator SDK {osdk_ver}
diff --git a/modules/rosa-aws-understand.adoc b/_unused_topics/rosa-aws-understand.adoc
similarity index 94%
rename from modules/rosa-aws-understand.adoc
rename to _unused_topics/rosa-aws-understand.adoc
index e2c3a5270edb..851979c5507b 100644
--- a/modules/rosa-aws-understand.adoc
+++ b/_unused_topics/rosa-aws-understand.adoc
@@ -10,3 +10,5 @@ To deploy {product-title} (ROSA) into your existing Amazon Web Services (AWS) ac
Red Hat recommends the use of AWS Organizations to manage multiple AWS accounts. The AWS Organizations, managed by the customer, host multiple AWS accounts. There is a root account in the organization that all accounts will refer to in the account hierarchy.
It is a best practice for the ROSA cluster to be hosted in an AWS account within an AWS Organizational Unit. A service control policy (SCP) is created and applied to the AWS Organizational Unit that manages what services the AWS sub-accounts are permitted to access. The SCP applies only to available permissions within a single AWS account for all AWS sub-accounts within the Organizational Unit. It is also possible to apply a SCP to a single AWS account. All other accounts in the customer’s AWS Organizations are managed in whatever manner the customer requires. Red Hat Site Reliability Engineers (SRE) will not have any control over SCPs within AWS Organizations.
+
+//2023-09-22: this module is not applicable to the prerequisites content.
\ No newline at end of file
diff --git a/_unused_topics/serverless-rn-template-module.adoc b/_unused_topics/serverless-rn-template-module.adoc
index 2b373d05d109..4c8ff63b2eda 100644
--- a/_unused_topics/serverless-rn-template-module.adoc
+++ b/_unused_topics/serverless-rn-template-module.adoc
@@ -2,7 +2,7 @@
//
// * /serverless/serverless-release-notes.adoc
-:_content-type: REFERENCE
+:_mod-docs-content-type: REFERENCE
[id="serverless-rn-_{context}"]
= Release notes for Red Hat {ServerlessProductName}
// add a version, e.g. 1.20.0
diff --git a/modules/sts-mode-installing-manual-config.adoc b/_unused_topics/sts-mode-installing-manual-config.adoc
similarity index 100%
rename from modules/sts-mode-installing-manual-config.adoc
rename to _unused_topics/sts-mode-installing-manual-config.adoc
diff --git a/modules/sts-mode-installing-verifying.adoc b/_unused_topics/sts-mode-installing-verifying.adoc
similarity index 100%
rename from modules/sts-mode-installing-verifying.adoc
rename to _unused_topics/sts-mode-installing-verifying.adoc
diff --git a/_unused_topics/virt-creating-data-volumes-using-storage-api.adoc b/_unused_topics/virt-creating-data-volumes-using-storage-api.adoc
new file mode 100644
index 000000000000..b386a6828065
--- /dev/null
+++ b/_unused_topics/virt-creating-data-volumes-using-storage-api.adoc
@@ -0,0 +1,46 @@
+// Module included in the following assemblies:
+//
+// * virt/storage/virt-creating-data-volumes.adoc
+
+:_mod-docs-content-type: PROCEDURE
+[id="virt-creating-data-volumes-using-storage-api_{context}"]
+= Creating data volumes by using the storage API
+
+When you create a data volume by using the storage API, the Containerized Data Interface (CDI) optimizes your persistent volume claim (PVC) allocation based on the type of storage supported by your selected storage class. You only have to specify the data volume name, namespace, and the amount of storage that you want to allocate.
+
+For example:
+
+* When using Ceph RBD, `accessModes` is automatically set to `ReadWriteMany`, which enables live migration. `volumeMode` is set to `Block` to maximize performance.
+* When you are using `volumeMode: Filesystem`, more space will automatically be requested by CDI, if required to accommodate file system overhead.
+
+In the following YAML, using the storage API requests a data volume with two gigabytes of usable space. The user does not need to know the `volumeMode` in order to correctly estimate the required persistent volume claim (PVC) size. CDI chooses the optimal combination of `accessModes` and `volumeMode` attributes automatically. These optimal values are based on the type of storage or the defaults that you define in your storage profile. If you want to provide custom values, they override the system-calculated values.
+
+.Procedure
+
+. Create a YAML file for a `DataVolume` object as shown in the following example:
++
+[source,yaml]
+----
+apiVersion: cdi.kubevirt.io/v1beta1
+kind: DataVolume
+metadata:
+ name: <1>
+spec:
+ source:
+ pvc:
+ name: "" <2>
+ namespace: "" <3>
+ storage:
+ storageClassName: <4>
+----
+<1> Specify the name of the new data volume.
+<2> Specify the namespace of the source PVC.
+<3> Specify the name of the source PVC.
+<4> Optional: If the storage class is not specified, the default storage class is used.
+
+. Create the data volume by running the following command:
++
+[source,terminal]
+----
+$ oc create -f .yaml
+----
\ No newline at end of file
diff --git a/adding_service_cluster/adding-service.adoc b/adding_service_cluster/adding-service.adoc
index bceea84dd308..b5684aa30682 100644
--- a/adding_service_cluster/adding-service.adoc
+++ b/adding_service_cluster/adding-service.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
include::_attributes/attributes-openshift-dedicated.adoc[]
[id="adding-service"]
= Adding services to a cluster using {cluster-manager-first} console
@@ -6,9 +6,9 @@ include::_attributes/attributes-openshift-dedicated.adoc[]
toc::[]
-You can add, access, and remove add-on services for your {product-title}
+You can add, access, and remove add-on services for your {product-title}
ifdef::openshift-rosa[]
-(ROSA)
+(ROSA)
endif::openshift-rosa[]
cluster by using {cluster-manager-first}.
@@ -25,5 +25,5 @@ include::modules/deleting-service.adoc[leveloffset=+1]
ifdef::openshift-rosa[]
[role="_additional-resources"]
== Additional resources
-* For information about the `cluster-logging-operator` and the AWS CloudWatch log forwarding service, see xref:../logging/cluster-logging-external.adoc#cluster-logging-collector-log-forward-cloudwatch_cluster-logging-external[Forwarding logs to Amazon CloudWatch]
+* xref:../logging/log_collection_forwarding/configuring-log-forwarding.adoc#cluster-logging-collector-log-forward-cloudwatch_configuring-log-forwarding[Forwarding logs to Amazon CloudWatch]
endif::[]
diff --git a/adding_service_cluster/available-services.adoc b/adding_service_cluster/available-services.adoc
index e1404beb62ea..382b72492a2d 100644
--- a/adding_service_cluster/available-services.adoc
+++ b/adding_service_cluster/available-services.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
include::_attributes/attributes-openshift-dedicated.adoc[]
[id="available-services"]
= Add-on services available for {product-title}
diff --git a/adding_service_cluster/rosa-available-services.adoc b/adding_service_cluster/rosa-available-services.adoc
index bc7325278d6f..feec8febc42b 100644
--- a/adding_service_cluster/rosa-available-services.adoc
+++ b/adding_service_cluster/rosa-available-services.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
include::_attributes/attributes-openshift-dedicated.adoc[]
[id="rosa-available-services"]
= Add-on services available for {product-title}
@@ -7,7 +7,7 @@ include::_attributes/attributes-openshift-dedicated.adoc[]
You can add services to your existing {product-title} (ROSA) cluster using the xref:../adding_service_cluster/adding-service.adoc#adding-service[{cluster-manager-first} console].
-These services can also be installed xref:../rosa_cli/rosa-manage-objects-cli.adoc#rosa-managing-objects-cli[using the ROSA CLI (`rosa`)].
+These services can also be installed xref:../cli_reference/rosa_cli/rosa-manage-objects-cli.adoc#rosa-managing-objects-cli[using the `rosa` CLI].
include::modules/aws-cloudwatch.adoc[leveloffset=+1]
@@ -16,7 +16,7 @@ include::modules/aws-cloudwatch.adoc[leveloffset=+1]
.Additional resources
* link:https://aws.amazon.com/cloudwatch/[Amazon CloudWatch product information]
-* xref:../logging/cluster-logging-external.adoc#cluster-logging-collector-log-forward-cloudwatch_cluster-logging-external[Forwarding logs to Amazon CloudWatch]
+* xref:../logging/log_collection_forwarding/configuring-log-forwarding.adoc#cluster-logging-collector-log-forward-cloudwatch_configuring-log-forwarding[Forwarding logs to Amazon CloudWatch]
include::modules/osd-rhoam.adoc[leveloffset=+1]
diff --git a/api-config.yaml b/api-config.yaml
index 56fa5e8d8319..5c253f8c9d5f 100644
--- a/api-config.yaml
+++ b/api-config.yaml
@@ -22,6 +22,9 @@ apiMap:
- kind: SubjectRulesReview
group: authorization.openshift.io
version: v1
+ - kind: SelfSubjectReview
+ group: authentication.k8s.io
+ version: v1
- kind: TokenRequest
group: authentication.k8s.io
version: v1
@@ -145,6 +148,9 @@ apiMap:
- kind: ConsoleQuickStart
group: console.openshift.io
version: v1
+ - kind: ConsoleSample
+ group: console.openshift.io
+ version: v1
- kind: ConsoleYAMLSample
group: console.openshift.io
version: v1
@@ -208,10 +214,13 @@ apiMap:
- kind: KubeletConfig
group: machineconfiguration.openshift.io
version: v1
- - kind: MachineConfigPool
+ - kind: MachineConfig
group: machineconfiguration.openshift.io
version: v1
- - kind: MachineConfig
+ - kind: MachineConfigNode
+ group: machineconfiguration.openshift.io
+ version: v1alpha1
+ - kind: MachineConfigPool
group: machineconfiguration.openshift.io
version: v1
- kind: MachineHealthCheck
@@ -255,6 +264,12 @@ apiMap:
- kind: AlertmanagerConfig
group: monitoring.coreos.com
version: v1beta1
+ - kind: AlertRelabelConfig
+ group: monitoring.openshift.io
+ version: v1
+ - kind: AlertingRule
+ group: monitoring.openshift.io
+ version: v1
- kind: PodMonitor
group: monitoring.coreos.com
version: v1
@@ -279,6 +294,9 @@ apiMap:
# - kind: ClusterNetwork
# group: network.openshift.io
# version: v1
+ - kind: AdminPolicyBasedExternalRoute
+ group: k8s.ovn.org
+ version: v1
- kind: CloudPrivateIPConfig
group: cloud.network.openshift.io
version: v1
@@ -291,6 +309,9 @@ apiMap:
- kind: EgressQoS
group: k8s.ovn.org
version: v1
+ - kind: EgressService
+ group: k8s.ovn.org
+ version: v1
- kind: Endpoints
version: v1
- kind: EndpointSlice
@@ -435,6 +456,9 @@ apiMap:
- kind: KubeStorageVersionMigrator
group: operator.openshift.io
version: v1
+ - kind: MachineConfiguration
+ group: operator.openshift.io
+ version: v1
- kind: Network
group: operator.openshift.io
version: v1
@@ -574,7 +598,7 @@ apiMap:
version: v1
- kind: FlowSchema
group: flowcontrol.apiserver.k8s.io
- version: v1beta1
+ version: v1beta3
- kind: LimitRange
version: v1
- kind: PriorityClass
@@ -582,7 +606,7 @@ apiMap:
version: v1
- kind: PriorityLevelConfiguration
group: flowcontrol.apiserver.k8s.io
- version: v1beta1
+ version: v1beta3
- kind: ResourceQuota
version: v1
- name: Security APIs
diff --git a/applications/application-health.adoc b/applications/application-health.adoc
index de694b2f8e2c..2df7f7029049 100644
--- a/applications/application-health.adoc
+++ b/applications/application-health.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
:context: application-health
[id="application-health"]
= Monitoring application health by using health checks
diff --git a/applications/config-maps.adoc b/applications/config-maps.adoc
index 39f675a19c14..8d86092a9062 100644
--- a/applications/config-maps.adoc
+++ b/applications/config-maps.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="config-maps"]
= Using config maps with applications
include::_attributes/common-attributes.adoc[]
diff --git a/applications/connecting_applications_to_services/binding-workloads-using-sbo.adoc b/applications/connecting_applications_to_services/binding-workloads-using-sbo.adoc
index 3a22f233cb3d..850f5c9d7be8 100644
--- a/applications/connecting_applications_to_services/binding-workloads-using-sbo.adoc
+++ b/applications/connecting_applications_to_services/binding-workloads-using-sbo.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="binding-workloads-using-sbo"]
= Binding workloads using Service Binding Operator
include::_attributes/common-attributes.adoc[]
@@ -47,5 +47,5 @@ include::modules/sbo-unbinding-workloads-from-a-backing-service.adoc[leveloffset
* xref:../../applications/connecting_applications_to_services/understanding-service-binding-operator.adoc#binding-a-workload-together-with-a-backing-service_understanding-service-binding-operator[Binding a workload together with a backing service].
* xref:../../applications/connecting_applications_to_services/getting-started-with-service-binding.adoc#connecting-the-spring-petclinic-sample-application-to-the-postgresql-database-service[Connecting the Spring PetClinic sample application to the PostgreSQL database service].
* xref:../../operators/understanding/crds/crd-managing-resources-from-crds.adoc#crd-creating-custom-resources-from-file_crd-managing-resources-from-crds[Creating custom resources from a file]
-* link:https://redhat-developer.github.io/service-binding-operator/userguide/binding-workloads-using-sbo/custom-path-injection.html#_workload_resource_mapping[Example schema of the ClusterWorkloadResourceMapping resource].
+* link:https://redhat-developer.github.io/service-binding-operator/userguide/binding-workloads-using-sbo/custom-path-injection.html#_workload_resource_mapping[Example schema of the ClusterWorkloadResourceMapping resource].
diff --git a/applications/connecting_applications_to_services/exposing-binding-data-from-a-service.adoc b/applications/connecting_applications_to_services/exposing-binding-data-from-a-service.adoc
index 50c305d5f699..c323c805a63c 100644
--- a/applications/connecting_applications_to_services/exposing-binding-data-from-a-service.adoc
+++ b/applications/connecting_applications_to_services/exposing-binding-data-from-a-service.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="exposing-binding-data-from-a-service"]
= Exposing binding data from a service
include::_attributes/common-attributes.adoc[]
diff --git a/applications/connecting_applications_to_services/getting-started-with-service-binding-ibm-power-ibm-z.adoc b/applications/connecting_applications_to_services/getting-started-with-service-binding-ibm-power-ibm-z.adoc
index 001b8a622b0e..bd200f551a1d 100644
--- a/applications/connecting_applications_to_services/getting-started-with-service-binding-ibm-power-ibm-z.adoc
+++ b/applications/connecting_applications_to_services/getting-started-with-service-binding-ibm-power-ibm-z.adoc
@@ -1,6 +1,6 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="getting-started-with-service-binding-ibm-power-ibm-z"]
-= Getting started with service binding on {ibmpowerProductName}, {ibmzProductName}, and {linuxoneProductName}
+= Getting started with service binding on {ibm-power-title}, {ibm-z-title}, and {ibm-linuxone-title}
include::_attributes/common-attributes.adoc[]
include::_attributes/servicebinding-document-attributes.adoc[]
:context: getting-started-with-service-binding-ibm-power-ibm-z
diff --git a/applications/connecting_applications_to_services/getting-started-with-service-binding.adoc b/applications/connecting_applications_to_services/getting-started-with-service-binding.adoc
index 6fb5ac0f69ab..6c216bdcf7b6 100644
--- a/applications/connecting_applications_to_services/getting-started-with-service-binding.adoc
+++ b/applications/connecting_applications_to_services/getting-started-with-service-binding.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="getting-started-with-service-binding"]
= Getting started with service binding
include::_attributes/common-attributes.adoc[]
@@ -39,4 +39,4 @@ include::modules/sbo-connecting-spring-petclinic-sample-app-to-postgresql-databa
* xref:../../applications/connecting_applications_to_services/installing-sbo.adoc#installing-sbo[Installing Service Binding Operator].
* xref:../../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[Creating applications using the Developer perspective].
* xref:../../operators/understanding/crds/crd-managing-resources-from-crds.adoc[Managing resources from custom resource definitions].
-* link:https://github.com/redhat-developer/service-binding-operator#known-bindable-operators[Known bindable Operators].
\ No newline at end of file
+* link:https://github.com/redhat-developer/service-binding-operator#known-bindable-operators[Known bindable Operators].
\ No newline at end of file
diff --git a/applications/connecting_applications_to_services/installing-sbo.adoc b/applications/connecting_applications_to_services/installing-sbo.adoc
index afd97908dad6..7f16790a6f4c 100644
--- a/applications/connecting_applications_to_services/installing-sbo.adoc
+++ b/applications/connecting_applications_to_services/installing-sbo.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="installing-sbo"]
= Installing Service Binding Operator
include::_attributes/common-attributes.adoc[]
diff --git a/applications/connecting_applications_to_services/odc-connecting-an-application-to-a-service-using-the-developer-perspective.adoc b/applications/connecting_applications_to_services/odc-connecting-an-application-to-a-service-using-the-developer-perspective.adoc
index 5ea513d917dd..2708e054fbed 100644
--- a/applications/connecting_applications_to_services/odc-connecting-an-application-to-a-service-using-the-developer-perspective.adoc
+++ b/applications/connecting_applications_to_services/odc-connecting-an-application-to-a-service-using-the-developer-perspective.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="odc-connecting-an-application-to-a-service-using-the-developer-perspective"]
= Connecting an application to a service using the Developer perspective
include::_attributes/common-attributes.adoc[]
diff --git a/applications/connecting_applications_to_services/projecting-binding-data.adoc b/applications/connecting_applications_to_services/projecting-binding-data.adoc
index c27b55566bc1..dc23b0b2b071 100644
--- a/applications/connecting_applications_to_services/projecting-binding-data.adoc
+++ b/applications/connecting_applications_to_services/projecting-binding-data.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="projecting-binding-data"]
= Projecting binding data
include::_attributes/common-attributes.adoc[]
diff --git a/applications/connecting_applications_to_services/sbo-release-notes.adoc b/applications/connecting_applications_to_services/sbo-release-notes.adoc
index c4a67d04d8c8..87e63866765a 100644
--- a/applications/connecting_applications_to_services/sbo-release-notes.adoc
+++ b/applications/connecting_applications_to_services/sbo-release-notes.adoc
@@ -1,5 +1,5 @@
//OpenShift Service Binding Release Notes
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="servicebinding-release-notes"]
= Release notes for {servicebinding-title}
:context: servicebinding-release-notes
diff --git a/applications/connecting_applications_to_services/understanding-service-binding-operator.adoc b/applications/connecting_applications_to_services/understanding-service-binding-operator.adoc
index 5a6c5500fe95..c403bcb70570 100644
--- a/applications/connecting_applications_to_services/understanding-service-binding-operator.adoc
+++ b/applications/connecting_applications_to_services/understanding-service-binding-operator.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="understanding-service-binding-operator"]
= Understanding Service Binding Operator
include::_attributes/common-attributes.adoc[]
diff --git a/applications/creating_applications/creating-applications-using-cli.adoc b/applications/creating_applications/creating-applications-using-cli.adoc
index 10357412a63d..df6c0d346573 100644
--- a/applications/creating_applications/creating-applications-using-cli.adoc
+++ b/applications/creating_applications/creating-applications-using-cli.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="creating-applications-using-cli"]
= Creating applications using the CLI
include::_attributes/common-attributes.adoc[]
diff --git a/applications/creating_applications/creating-apps-from-installed-operators.adoc b/applications/creating_applications/creating-apps-from-installed-operators.adoc
index 728bb3c441d3..a36c46f237b8 100644
--- a/applications/creating_applications/creating-apps-from-installed-operators.adoc
+++ b/applications/creating_applications/creating-apps-from-installed-operators.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="creating-apps-from-installed-operators"]
= Creating applications from installed Operators
include::_attributes/common-attributes.adoc[]
diff --git a/applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc b/applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc
index 4d93abbdfb4d..1340efce30c9 100644
--- a/applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc
+++ b/applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="odc-creating-applications-using-developer-perspective"]
= Creating applications using the Developer perspective
include::_attributes/common-attributes.adoc[]
@@ -50,7 +50,7 @@ The Managed services option is also available if the RHOAS Operator is installed
* *Re-ordering of resources*: Use these resources to re-order pinned resources added to your navigation pane. The drag-and-drop icon is displayed on the left side of the pinned resource when you hover over it in the navigation pane. The dragged resource can be dropped only in the section where it resides.
ifdef::openshift-enterprise,openshift-webscale[]
-Note that certain options, such as *Pipelines*, *Event Source*, and *Import Virtual Machines*, are displayed only when the xref:../../cicd/pipelines/installing-pipelines.adoc#op-installing-pipelines-operator-in-web-console_installing-pipelines[OpenShift Pipelines Operator], link:https://docs.openshift.com/serverless/1.28/install/install-serverless-operator.html#serverless-install-web-console_install-serverless-operator[{ServerlessOperatorName}], and xref:../../virt/install/installing-virt.adoc#virt-subscribing-cli_installing-virt[OpenShift Virtualization Operator] are installed, respectively.
+Note that certain options, such as *Pipelines*, *Event Source*, and *Import Virtual Machines*, are displayed only when the link:https://docs.openshift.com/pipelines/latest/install_config/installing-pipelines.html#op-installing-pipelines-operator-in-web-console_installing-pipelines[OpenShift Pipelines Operator], link:https://docs.openshift.com/serverless/1.28/install/install-serverless-operator.html#serverless-install-web-console_install-serverless-operator[{ServerlessOperatorName}], and xref:../../virt/install/installing-virt.adoc#virt-subscribing-cli_installing-virt[OpenShift Virtualization Operator] are installed, respectively.
endif::[]
[id="prerequisites_odc-creating-applications-using-developer-perspective"]
@@ -91,5 +91,5 @@ include::modules/odc-using-the-developer-catalog-to-add-services-or-components.a
* For more information about Knative routing settings for {ServerlessProductName}, see link:https://docs.openshift.com/serverless/1.28/knative-serving/external-ingress-routing/routing-overview.html#routing-overview[Routing].
* For more information about domain mapping settings for {ServerlessProductName}, see link:https://docs.openshift.com/serverless/1.28/knative-serving/config-custom-domains/serverless-custom-domains.html#serverless-custom-domains[Configuring a custom domain for a Knative service].
* For more information about Knative autoscaling settings for {ServerlessProductName}, see link:https://docs.openshift.com/serverless/1.28/knative-serving/autoscaling/serverless-autoscaling-developer.html#serverless-autoscaling-developer[Autoscaling].
-* For more information about adding a new user to a project, see xref:../projects/working-with-projects.adoc#odc-providing-project-permissions-using-developer-perspective_projects[Working with projects].
-* For more information about creating a Helm Chart repository, see xref:../working_with_helm_charts/configuring-custom-helm-chart-repositories.adoc#odc-creating-helm-releases-using-developer-perspective_configuring-custom-helm-chart-repositories[Creating Helm Chart repositories].
+* For more information about adding a new user to a project, see xref:../../applications/projects/working-with-projects.adoc#odc-providing-project-permissions-using-developer-perspective_projects[Working with projects].
+* For more information about creating a Helm Chart repository, see xref:../../applications/working_with_helm_charts/configuring-custom-helm-chart-repositories.adoc#odc-creating-helm-releases-using-developer-perspective_configuring-custom-helm-chart-repositories[Creating Helm Chart repositories].
diff --git a/applications/deployments/deployment-strategies.adoc b/applications/deployments/deployment-strategies.adoc
index 5d18c9f72dba..0af104aa2bb0 100644
--- a/applications/deployments/deployment-strategies.adoc
+++ b/applications/deployments/deployment-strategies.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="deployment-strategies"]
= Using deployment strategies
include::_attributes/common-attributes.adoc[]
diff --git a/applications/deployments/managing-deployment-processes.adoc b/applications/deployments/managing-deployment-processes.adoc
index 5226f871c93a..334d9b9e252f 100644
--- a/applications/deployments/managing-deployment-processes.adoc
+++ b/applications/deployments/managing-deployment-processes.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="deployment-operations"]
= Managing deployment processes
include::_attributes/common-attributes.adoc[]
@@ -9,6 +9,8 @@ toc::[]
[id="deploymentconfig-operations"]
== Managing DeploymentConfig objects
+include::snippets/deployment-config-deprecated.adoc[]
+
`DeploymentConfig` objects can be managed from the {product-title} web console's *Workloads* page or using the `oc` CLI. The following procedures show CLI usage unless otherwise stated.
include::modules/deployments-starting-deployment.adoc[leveloffset=+2]
diff --git a/applications/deployments/osd-config-custom-domains-applications.adoc b/applications/deployments/osd-config-custom-domains-applications.adoc
index e652e9b7e075..07d36f16b6ad 100644
--- a/applications/deployments/osd-config-custom-domains-applications.adoc
+++ b/applications/deployments/osd-config-custom-domains-applications.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="osd-config-custom-domains-applications"]
= Custom domains for applications
include::_attributes/attributes-openshift-dedicated.adoc[]
@@ -6,6 +6,11 @@ include::_attributes/attributes-openshift-dedicated.adoc[]
toc::[]
+[NOTE]
+====
+Starting with {product-title} 4.14, the Custom Domain Operator is deprecated. To manage Ingress in {product-title} 4.14, use the Ingress Operator. The functionality is unchanged for {product-title} 4.13 and earlier versions.
+====
+
You can configure a custom domain for your applications. Custom domains are specific wildcard domains that can be used with {product-title} applications.
include::modules/osd-applications-config-custom-domains.adoc[leveloffset=+1]
diff --git a/applications/deployments/route-based-deployment-strategies.adoc b/applications/deployments/route-based-deployment-strategies.adoc
index 87df7e2548e8..cc1e6b5a577c 100644
--- a/applications/deployments/route-based-deployment-strategies.adoc
+++ b/applications/deployments/route-based-deployment-strategies.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="route-based-deployment-strategies"]
= Using route-based deployment strategies
include::_attributes/common-attributes.adoc[]
diff --git a/applications/deployments/what-deployments-are.adoc b/applications/deployments/what-deployments-are.adoc
index 09654a5d92f6..a8a935138714 100644
--- a/applications/deployments/what-deployments-are.adoc
+++ b/applications/deployments/what-deployments-are.adoc
@@ -1,6 +1,6 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="what-deployments-are"]
-= Understanding Deployment and DeploymentConfig objects
+= Understanding deployments
include::_attributes/common-attributes.adoc[]
:context: what-deployments-are
@@ -8,10 +8,14 @@ toc::[]
The `Deployment` and `DeploymentConfig` API objects in {product-title} provide two similar but different methods for fine-grained management over common user applications. They are composed of the following separate API objects:
-* A `DeploymentConfig` or `Deployment` object, either of which describes the desired state of a particular component of the application as a pod template.
-* `DeploymentConfig` objects involve one or more _replication controllers_, which contain a point-in-time record of the state of a deployment as a pod template. Similarly, `Deployment` objects involve one or more _replica sets_, a successor of replication controllers.
+* A `Deployment` or `DeploymentConfig` object, either of which describes the desired state of a particular component of the application as a pod template.
+* `Deployment` objects involve one or more _replica sets_, which contain a point-in-time record of the state of a deployment as a pod template. Similarly, `DeploymentConfig` objects involve one or more _replication controllers_, which preceded replica sets.
* One or more pods, which represent an instance of a particular version of an application.
+Use `Deployment` objects unless you need a specific feature or behavior provided by `DeploymentConfig` objects.
+
+include::snippets/deployment-config-deprecated.adoc[]
+
////
Update when converted:
[role="_additional-resources"]
@@ -31,7 +35,7 @@ xref:../../dev_guide/pod_autoscaling.adoc#dev-guide-pod-autoscaling[autoscaling]
Deployments and deployment configs are enabled by the use of native Kubernetes API objects `ReplicaSet` and `ReplicationController`, respectively, as their building blocks.
-Users do not have to manipulate replication controllers, replica sets, or pods owned by `DeploymentConfig` objects or deployments. The deployment systems ensure changes are propagated appropriately.
+Users do not have to manipulate replica sets, replication controllers, or pods owned by `Deployment` or `DeploymentConfig` objects. The deployment systems ensure changes are propagated appropriately.
[TIP]
====
@@ -40,11 +44,12 @@ If the existing deployment strategies are not suited for your use case and you m
The following sections provide further details on these objects.
-include::modules/deployments-replicationcontrollers.adoc[leveloffset=+2]
include::modules/deployments-replicasets.adoc[leveloffset=+2]
+include::modules/deployments-replicationcontrollers.adoc[leveloffset=+2]
-include::modules/deployments-deploymentconfigs.adoc[leveloffset=+1]
include::modules/deployments-kube-deployments.adoc[leveloffset=+1]
+include::modules/deployments-deploymentconfigs.adoc[leveloffset=+1]
+
include::modules/deployments-comparing-deploymentconfigs.adoc[leveloffset=+1]
////
Update when converted:
diff --git a/applications/idling-applications.adoc b/applications/idling-applications.adoc
index 179701df2a45..8f163c4463f5 100644
--- a/applications/idling-applications.adoc
+++ b/applications/idling-applications.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="idling-applications"]
= Idling applications
include::_attributes/common-attributes.adoc[]
diff --git a/applications/index.adoc b/applications/index.adoc
index 7f3a761b6feb..ac102b5f6eb2 100644
--- a/applications/index.adoc
+++ b/applications/index.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="building-applications-overview"]
= Building applications overview
include::_attributes/common-attributes.adoc[]
@@ -36,7 +36,7 @@ When the application is running, not all applications resources are used. As a c
[id="connecting-application"]
=== Connecting an application to services
-An application uses backing services to build and connect workloads, which vary according to the service provider. Using the xref:../applications/connecting_applications_to_services/understanding-service-binding-operator.adoc#understanding-service-binding-operator[Service Binding Operator], as a developer, you can bind workloads together with Operator-managed backing services, without any manual procedures to configure the binding connection. You can apply service binding also on xref:../applications/connecting_applications_to_services/getting-started-with-service-binding-ibm-power-ibm-z.adoc#getting-started-with-service-binding-ibm-power-ibm-z[{ibmpowerProductName}, {ibmzProductName}, and {linuxoneProductName} environments].
+An application uses backing services to build and connect workloads, which vary according to the service provider. Using the xref:../applications/connecting_applications_to_services/understanding-service-binding-operator.adoc#understanding-service-binding-operator[Service Binding Operator], as a developer, you can bind workloads together with Operator-managed backing services, without any manual procedures to configure the binding connection. You can apply service binding also on xref:../applications/connecting_applications_to_services/getting-started-with-service-binding-ibm-power-ibm-z.adoc#getting-started-with-service-binding-ibm-power-ibm-z[{ibm-power-name}, {ibm-z-name}, and {ibm-linuxone-name} environments].
[id="deploying-application"]
=== Deploying an application
diff --git a/applications/odc-deleting-applications.adoc b/applications/odc-deleting-applications.adoc
index 6082134feb51..125ba1aa0fe9 100644
--- a/applications/odc-deleting-applications.adoc
+++ b/applications/odc-deleting-applications.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="odc-deleting-applications"]
= Deleting applications
include::_attributes/common-attributes.adoc[]
diff --git a/applications/odc-editing-applications.adoc b/applications/odc-editing-applications.adoc
index 6c27e73f4e89..99fb9e7e067f 100644
--- a/applications/odc-editing-applications.adoc
+++ b/applications/odc-editing-applications.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="odc-editing-applications"]
= Editing applications
include::_attributes/common-attributes.adoc[]
diff --git a/applications/odc-exporting-applications.adoc b/applications/odc-exporting-applications.adoc
index ebef502465a4..f89cdf7e56ce 100644
--- a/applications/odc-exporting-applications.adoc
+++ b/applications/odc-exporting-applications.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="odc-exporting-applications"]
= Exporting applications
include::_attributes/common-attributes.adoc[]
diff --git a/applications/odc-monitoring-project-and-application-metrics-using-developer-perspective.adoc b/applications/odc-monitoring-project-and-application-metrics-using-developer-perspective.adoc
index b70a8da2de8b..079b1646b6fc 100644
--- a/applications/odc-monitoring-project-and-application-metrics-using-developer-perspective.adoc
+++ b/applications/odc-monitoring-project-and-application-metrics-using-developer-perspective.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="odc-monitoring-project-and-application-metrics-using-developer-perspective"]
= Monitoring project and application metrics using the Developer perspective
include::_attributes/common-attributes.adoc[]
diff --git a/applications/odc-viewing-application-composition-using-topology-view.adoc b/applications/odc-viewing-application-composition-using-topology-view.adoc
index 576a2d56f718..9375b3705049 100644
--- a/applications/odc-viewing-application-composition-using-topology-view.adoc
+++ b/applications/odc-viewing-application-composition-using-topology-view.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="odc-viewing-application-composition-using-topology-view"]
= Viewing application composition using the Topology view
include::_attributes/common-attributes.adoc[]
diff --git a/applications/projects/configuring-project-creation.adoc b/applications/projects/configuring-project-creation.adoc
index 1d9aa09de82f..9b4242361304 100644
--- a/applications/projects/configuring-project-creation.adoc
+++ b/applications/projects/configuring-project-creation.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="configuring-project-creation"]
= Configuring project creation
include::_attributes/common-attributes.adoc[]
diff --git a/applications/projects/creating-project-other-user.adoc b/applications/projects/creating-project-other-user.adoc
index 49c9844f7e0a..304ceebb8d1e 100644
--- a/applications/projects/creating-project-other-user.adoc
+++ b/applications/projects/creating-project-other-user.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="creating-project-other-user"]
= Creating a project as another user
include::_attributes/common-attributes.adoc[]
diff --git a/applications/projects/working-with-projects.adoc b/applications/projects/working-with-projects.adoc
index a155c0139e4a..3e3d00c7c12d 100644
--- a/applications/projects/working-with-projects.adoc
+++ b/applications/projects/working-with-projects.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="working-with-projects"]
= Working with projects
include::_attributes/common-attributes.adoc[]
@@ -14,10 +14,7 @@ isolation from other communities.
Projects starting with `openshift-` and `kube-` are xref:../../authentication/using-rbac.adoc#rbac-default-projects_using-rbac[default projects]. These projects host cluster components that run as pods and other infrastructure components. As such, {product-title} does not allow you to create projects starting with `openshift-` or `kube-` using the `oc new-project` command. Cluster administrators can create these projects using the `oc adm new-project` command.
====
-[NOTE]
-====
-You cannot assign an SCC to pods created in one of the default namespaces: `default`, `kube-system`, `kube-public`, `openshift-node`, `openshift-infra`, and `openshift`. You cannot use these namespaces for running pods or services.
-====
+include::snippets/default-projects.adoc[]
include::modules/creating-a-project-using-the-web-console.adoc[leveloffset=+1]
diff --git a/applications/pruning-objects.adoc b/applications/pruning-objects.adoc
index 0b42fd73b34e..7f80ee11e446 100644
--- a/applications/pruning-objects.adoc
+++ b/applications/pruning-objects.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="pruning-objects"]
= Pruning objects to reclaim resources
include::_attributes/common-attributes.adoc[]
diff --git a/applications/quotas/quotas-setting-across-multiple-projects.adoc b/applications/quotas/quotas-setting-across-multiple-projects.adoc
index 471a0343a6ec..a4a4330ca134 100644
--- a/applications/quotas/quotas-setting-across-multiple-projects.adoc
+++ b/applications/quotas/quotas-setting-across-multiple-projects.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="setting-quotas-across-multiple-projects"]
= Resource quotas across multiple projects
include::_attributes/common-attributes.adoc[]
@@ -10,6 +10,8 @@ A multi-project quota, defined by a `ClusterResourceQuota` object, allows quotas
This guide describes how cluster administrators can set and manage resource quotas across multiple projects.
+include::snippets/default-projects.adoc[]
+
include::modules/quotas-selecting-projects.adoc[leveloffset=+1]
include::modules/quotas-viewing-clusterresourcequotas.adoc[leveloffset=+1]
include::modules/quotas-selection-granularity.adoc[leveloffset=+1]
diff --git a/applications/quotas/quotas-setting-per-project.adoc b/applications/quotas/quotas-setting-per-project.adoc
index daf75d747225..472879c2fede 100644
--- a/applications/quotas/quotas-setting-per-project.adoc
+++ b/applications/quotas/quotas-setting-per-project.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="quotas-setting-per-project"]
= Resource quotas per project
include::_attributes/common-attributes.adoc[]
diff --git a/applications/red-hat-marketplace.adoc b/applications/red-hat-marketplace.adoc
index d875e3eeaac7..99f239b589af 100644
--- a/applications/red-hat-marketplace.adoc
+++ b/applications/red-hat-marketplace.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="red-hat-marketplace"]
= Using the Red Hat Marketplace
include::_attributes/common-attributes.adoc[]
diff --git a/applications/working-with-quotas.adoc b/applications/working-with-quotas.adoc
index dfa5dfd6b866..85715c12eb2b 100644
--- a/applications/working-with-quotas.adoc
+++ b/applications/working-with-quotas.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="working-with-quotas"]
= Working with quotas
include::_attributes/common-attributes.adoc[]
diff --git a/applications/working_with_helm_charts/configuring-custom-helm-chart-repositories.adoc b/applications/working_with_helm_charts/configuring-custom-helm-chart-repositories.adoc
index 933193d52908..6836d07cbfbb 100644
--- a/applications/working_with_helm_charts/configuring-custom-helm-chart-repositories.adoc
+++ b/applications/working_with_helm_charts/configuring-custom-helm-chart-repositories.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="configuring-custom-helm-chart-repositories"]
= Configuring custom Helm chart repositories
include::_attributes/common-attributes.adoc[]
@@ -18,7 +18,7 @@ As a cluster administrator, you can add multiple cluster-scoped and namespace-sc
As a regular user or project member with the appropriate role-based access control (RBAC) permissions, you can add multiple namespace-scoped Helm chart repositories, apart from the default cluster-scoped Helm repository, and display the Helm charts from these repositories in the *Developer Catalog*.
-In the *Developer* perspective of the web console, you can use the *Helm* page to:
+In the *Developer* perspective of the web console, you can use the *Helm* page to:
* Create Helm Releases and Repositories using the *Create* button.
diff --git a/applications/working_with_helm_charts/installing-helm.adoc b/applications/working_with_helm_charts/installing-helm.adoc
index 59a50498563a..10a0bcd71471 100644
--- a/applications/working_with_helm_charts/installing-helm.adoc
+++ b/applications/working_with_helm_charts/installing-helm.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="installing-helm"]
= Installing Helm
include::_attributes/common-attributes.adoc[]
@@ -24,14 +24,14 @@ You can also find the URL to the latest binaries from the {product-title} web co
# curl -L https://mirror.openshift.com/pub/openshift-v4/clients/helm/latest/helm-linux-amd64 -o /usr/local/bin/helm
----
-* Linux on {ibmzProductName} and {linuxoneProductName} (s390x)
+* Linux on {ibm-z-name} and {ibm-linuxone-name} (s390x)
+
[source,terminal]
----
# curl -L https://mirror.openshift.com/pub/openshift-v4/clients/helm/latest/helm-linux-s390x -o /usr/local/bin/helm
----
-* Linux on {ibmpowerProductName} (ppc64le)
+* Linux on {ibm-power-name} (ppc64le)
+
[source,terminal]
----
diff --git a/applications/working_with_helm_charts/odc-working-with-helm-releases.adoc b/applications/working_with_helm_charts/odc-working-with-helm-releases.adoc
index cfecb6b7e9b3..10fe4f158b37 100644
--- a/applications/working_with_helm_charts/odc-working-with-helm-releases.adoc
+++ b/applications/working_with_helm_charts/odc-working-with-helm-releases.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="odc-working-with-helm-releases"]
= Working with Helm releases
include::_attributes/common-attributes.adoc[]
diff --git a/applications/working_with_helm_charts/understanding-helm.adoc b/applications/working_with_helm_charts/understanding-helm.adoc
index 83aead71501f..549b2571859c 100644
--- a/applications/working_with_helm_charts/understanding-helm.adoc
+++ b/applications/working_with_helm_charts/understanding-helm.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="understanding-helm"]
= Understanding Helm
include::_attributes/common-attributes.adoc[]
diff --git a/architecture/admission-plug-ins.adoc b/architecture/admission-plug-ins.adoc
index c20f406a8c97..417c58ef7dec 100644
--- a/architecture/admission-plug-ins.adoc
+++ b/architecture/admission-plug-ins.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="admission-plug-ins"]
= Admission plugins
include::_attributes/common-attributes.adoc[]
@@ -6,6 +6,9 @@ include::_attributes/common-attributes.adoc[]
toc::[]
+// Sentence taken from Architecture -> Index.
+Admission plugins are used to help regulate how {product-title} functions.
+
// Concept modules
include::modules/admission-plug-ins-about.adoc[leveloffset=+1]
diff --git a/architecture/architecture-installation.adoc b/architecture/architecture-installation.adoc
index 39786a08a45d..22e164dad62c 100644
--- a/architecture/architecture-installation.adoc
+++ b/architecture/architecture-installation.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="architecture-installation"]
= Installation and update
include::_attributes/common-attributes.adoc[]
diff --git a/architecture/architecture-rhcos.adoc b/architecture/architecture-rhcos.adoc
index 50eacc440e5a..1954f535e300 100644
--- a/architecture/architecture-rhcos.adoc
+++ b/architecture/architecture-rhcos.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="architecture-rhcos"]
= {op-system-first}
include::_attributes/common-attributes.adoc[]
diff --git a/architecture/architecture.adoc b/architecture/architecture.adoc
index 9ab4f4fa36cc..711cf3f4c12b 100644
--- a/architecture/architecture.adoc
+++ b/architecture/architecture.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="architecture"]
= {product-title} architecture
include::_attributes/common-attributes.adoc[]
diff --git a/architecture/argocd.adoc b/architecture/argocd.adoc
index a8b4c5d1f258..ede48546c22b 100644
--- a/architecture/argocd.adoc
+++ b/architecture/argocd.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="argocd"]
= Using ArgoCD with {product-title}
include::_attributes/common-attributes.adoc[]
diff --git a/architecture/cicd_gitops.adoc b/architecture/cicd_gitops.adoc
index 09bee1d19c1b..d99084f9ae1e 100644
--- a/architecture/cicd_gitops.adoc
+++ b/architecture/cicd_gitops.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="cicd_gitops"]
= The CI/CD methodology and practice
include::_attributes/common-attributes.adoc[]
diff --git a/architecture/control-plane.adoc b/architecture/control-plane.adoc
index ea482bd64c3f..2c2b64e0d4f5 100644
--- a/architecture/control-plane.adoc
+++ b/architecture/control-plane.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="control-plane"]
= Control plane architecture
include::_attributes/common-attributes.adoc[]
@@ -63,9 +63,7 @@ include::modules/hosted-control-planes-overview.adoc[leveloffset=+1]
[role="_additional-resources"]
.Additional resources
-* link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#hypershift-addon-intro[HyperShift add-on (Technology Preview)]
-
-* link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#hosted-control-planes-intro[Hosted control planes (Technology Preview)]
+* link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.9/html/clusters/cluster_mce_overview#hosted-control-planes-intro[Hosted control planes]
include::modules/hosted-control-planes-concepts-personas.adoc[leveloffset=+2]
include::modules/hosted-control-planes-version-support.adoc[leveloffset=+2]
diff --git a/architecture/index.adoc b/architecture/index.adoc
index 08892de20d31..3fce1f22d015 100644
--- a/architecture/index.adoc
+++ b/architecture/index.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="architecture-overview"]
= Architecture overview
include::_attributes/common-attributes.adoc[]
@@ -19,8 +19,8 @@ include::modules/openshift-architecture-common-terms.adoc[leveloffset=+1]
* For more information on storage, see xref:../storage/index.adoc#index[{product-title} storage].
* For more information on authentication, see xref:../authentication/index.adoc#index[{product-title} authentication].
* For more information on Operator Lifecycle Manager (OLM), see xref:../operators/understanding/olm/olm-understanding-olm.adoc#olm-understanding-olm[OLM].
-* For more information on logging, see xref:../logging/viewing-resource-logs.adoc#viewing-resource-logs[{product-title} Logging].
-* For more information on over-the-air (OTA) updates, see xref:../updating/index.adoc#index[Updating {product-title} clusters].
+* For more information on logging, see xref:../logging/cluster-logging.adoc#cluster-logging[About Logging].
+* For more information on over-the-air (OTA) updates, see xref:../updating/understanding_updates/intro-to-updates.adoc#understanding-openshift-updates[Introduction to OpenShift updates].
[id="about-installation-and-updates"]
== About installation and updates
diff --git a/architecture/mce-overview-ocp.adoc b/architecture/mce-overview-ocp.adoc
index 869f970703e4..beec551aa22d 100644
--- a/architecture/mce-overview-ocp.adoc
+++ b/architecture/mce-overview-ocp.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="mce-overview-ocp"]
= About multicluster engine for Kubernetes operator
include::_attributes/common-attributes.adoc[]
@@ -22,7 +22,7 @@ When you enable multicluster engine on {product-title}, you gain the following c
* Infrastructure Operator, which manages the deployment of the Assisted Service to orchestrate on-premise bare metal and vSphere installations of {product-title}, such as SNO on bare metal. The Infrastructure Operator includes xref:../scalability_and_performance/ztp_far_edge/ztp-deploying-far-edge-clusters-at-scale.adoc#ztp-challenges-of-far-edge-deployments_ztp-deploying-far-edge-clusters-at-scale[{ztp-first}], which fully automates cluster creation on bare metal and vSphere provisioning with GitOps workflows to manage deployments and configuration changes.
* Open cluster management, which provides resources to manage Kubernetes clusters.
-The multicluster engine is included with your {product-title} support subscription and is delivered separately from the core payload. To start to use multicluster engine, you deploy the {product-title} cluster and then install the operator. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#mce-install-intro[Installing and upgrading multicluster engine operator].
+The multicluster engine is included with your {product-title} support subscription and is delivered separately from the core payload. To start to use multicluster engine, you deploy the {product-title} cluster and then install the operator. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.9/html/clusters/cluster_mce_overview#mce-install-intro[Installing and upgrading multicluster engine operator].
[id="mce-on-rhacm"]
== Cluster management with Red Hat Advanced Cluster Management
@@ -32,4 +32,4 @@ If you need cluster management capabilities beyond what {product-title} with mul
[id="mce-additional-resources-ocp"]
== Additional resources
-For the complete documentation for multicluster engine, see link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#doc-wrapper[Cluster lifecycle with multicluster engine documentation], which is part of the product documentation for Red Hat Advanced Cluster Management.
+For the complete documentation for multicluster engine, see link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.9/html/clusters/cluster_mce_overview#doc-wrapper[Cluster lifecycle with multicluster engine documentation], which is part of the product documentation for Red Hat Advanced Cluster Management.
diff --git a/architecture/nvidia-gpu-architecture-overview.adoc b/architecture/nvidia-gpu-architecture-overview.adoc
new file mode 100644
index 000000000000..181dc0c6b75f
--- /dev/null
+++ b/architecture/nvidia-gpu-architecture-overview.adoc
@@ -0,0 +1,88 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="nvidia-gpu-architecture-overview"]
+= NVIDIA GPU architecture overview
+include::_attributes/common-attributes.adoc[]
+:context: nvidia-gpu-architecture-overview
+
+toc::[]
+
+NVIDIA supports the use of graphics processing unit (GPU) resources on {product-title}. {product-title} is a security-focused and hardened Kubernetes platform developed and supported by Red Hat for deploying and managing Kubernetes clusters at scale. {product-title} includes enhancements to Kubernetes so that users can easily configure and use NVIDIA GPU resources to accelerate workloads.
+
+The NVIDIA GPU Operator leverages the Operator framework within {product-title} to manage the full lifecycle of NVIDIA software components required to run GPU-accelerated workloads.
+
+These components include the NVIDIA drivers (to enable CUDA), the Kubernetes device plugin for GPUs, the NVIDIA Container Toolkit, automatic node tagging using GPU feature discovery (GFD), DCGM-based monitoring, and others.
+
+[NOTE]
+====
+The NVIDIA GPU Operator is only supported by NVIDIA. For more information about obtaining support from NVIDIA, see link:https://access.redhat.com/solutions/5174941[Obtaining Support from NVIDIA].
+====
+
+include::modules/nvidia-gpu-prerequisites.adoc[leveloffset=+1]
+// New enablement modules
+include::modules/nvidia-gpu-enablement.adoc[leveloffset=+1]
+
+include::modules/nvidia-gpu-bare-metal.adoc[leveloffset=+2]
+[role="_additional-resources"]
+.Additional resources
+* link:https://docs.nvidia.com/ai-enterprise/deployment-guide-openshift-on-bare-metal/0.1.0/on-bare-metal.html[Red Hat OpenShift on Bare Metal Stack]
+
+include::modules/nvidia-gpu-virtualization.adoc[leveloffset=+2]
+[role="_additional-resources"]
+.Additional resources
+* link:https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/latest/openshift/openshift-virtualization.html[NVIDIA GPU Operator with OpenShift Virtualization]
+
+include::modules/nvidia-gpu-vsphere.adoc[leveloffset=+2]
+[role="_additional-resources"]
+.Additional resources
+* link:https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/latest/openshift/nvaie-with-ocp.html#openshift-container-platform-on-vmware-vsphere-with-nvidia-vgpus[OpenShift Container Platform on VMware vSphere with NVIDIA vGPUs]
+
+include::modules/nvidia-gpu-kvm.adoc[leveloffset=+2]
+[role="_additional-resources"]
+.Additional resources
+* link:https://computingforgeeks.com/how-to-deploy-openshift-container-platform-on-kvm/[How To Deploy OpenShift Container Platform 4.13 on KVM]
+
+include::modules/nvidia-gpu-csps.adoc[leveloffset=+2]
+[role="_additional-resources"]
+.Additional resources
+* link:https://docs.nvidia.com/ai-enterprise/deployment-guide-cloud/0.1.0/aws-redhat-openshift.html[Red Hat Openshift in the Cloud]
+
+include::modules/nvidia-gpu-red-hat-device-edge.adoc[leveloffset=+2]
+[role="_additional-resources"]
+.Additional resources
+* link:https://cloud.redhat.com/blog/how-to-accelerate-workloads-with-nvidia-gpus-on-red-hat-device-edge[How to accelerate workloads with NVIDIA GPUs on Red Hat Device Edge]
+
+// TELCODOCS-1092 GPU sharing methods
+include::modules/nvidia-gpu-sharing-methods.adoc[leveloffset=+1]
+.Additional resources
+* link:https://developer.nvidia.com/blog/improving-gpu-utilization-in-kubernetes/[Improving GPU Utilization]
+
+include::modules/nvidia-gpu-cuda-streams.adoc[leveloffset=+2]
+.Additional resources
+* link:https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#asynchronous-concurrent-execution[Asynchronous Concurrent Execution]
+
+include::modules/nvidia-gpu-time-slicing.adoc[leveloffset=+2]
+
+include::modules/nvidia-gpu-cuda-mps.adoc[leveloffset=+2]
+.Additional resources
+* link:https://docs.nvidia.com/deploy/mps/index.html[CUDA MPS]
+
+include::modules/nvidia-gpu-mig-gpu.adoc[leveloffset=+2]
+.Additional resources
+* link:https://docs.nvidia.com/datacenter/tesla/mig-user-guide/[NVIDIA Multi-Instance GPU User Guide]
+
+include::modules/nvidia-gpu-virtualization-with-gpu.adoc[leveloffset=+2]
+.Additional resources
+* link:https://www.nvidia.com/en-us/data-center/virtual-solutions/[Virtual GPUs]
+
+include::modules/nvidia-gpu-features.adoc[leveloffset=+1]
+[role="_additional-resources"]
+.Additional resources
+
+* link:https://docs.nvidia.com/ngc/ngc-deploy-on-premises/nvidia-certified-systems/index.html[NVIDIA-Certified Systems]
+* link:https://docs.nvidia.com/ai-enterprise/index.html#deployment-guides[NVIDIA AI Enterprise]
+* link:https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/overview.html#[NVIDIA Container Toolkit]
+* link:https://docs.nvidia.com/datacenter/cloud-native/openshift/latest/enable-gpu-monitoring-dashboard.html[Enabling the GPU Monitoring Dashboard]
+* link:https://docs.nvidia.com/datacenter/cloud-native/openshift/latest/mig-ocp.html[MIG Support in OpenShift Container Platform]
+* link:https://docs.nvidia.com/datacenter/cloud-native/openshift/latest/time-slicing-gpus-in-openshift.html[Time-slicing NVIDIA GPUs in OpenShift]
+* link:https://docs.nvidia.com/datacenter/cloud-native/openshift/latest/mirror-gpu-ocp-disconnected.html[Deploy GPU Operators in a disconnected or airgapped environment]
+* xref:../hardware_enablement/psap-node-feature-discovery-operator.html[Node Feature Discovery Operator]
diff --git a/architecture/ocm-overview-ocp.adoc b/architecture/ocm-overview-ocp.adoc
index d1eaf4095dd4..8ad963fc5d45 100644
--- a/architecture/ocm-overview-ocp.adoc
+++ b/architecture/ocm-overview-ocp.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="ocm-overview-ocp"]
= Red Hat OpenShift Cluster Manager
include::_attributes/attributes-openshift-dedicated.adoc[]
@@ -54,4 +54,4 @@ include::modules/ocm-settings-tab.adoc[leveloffset=+2]
[id="ocm-additional-resources-ocp"]
== Additional resources
-* For the complete documentation for {cluster-manager}, see link:https://access.redhat.com/documentation/en-us/openshift_cluster_manager/2022/html-single/managing_clusters/index[{cluster-manager} documentation].
+* For the complete documentation for {cluster-manager}, see link:https://access.redhat.com/documentation/en-us/openshift_cluster_manager/2022/html-single/managing_clusters/index[{cluster-manager} documentation].
diff --git a/architecture/understanding-development.adoc b/architecture/understanding-development.adoc
index 0d7313848322..80d8f6d4d7c9 100644
--- a/architecture/understanding-development.adoc
+++ b/architecture/understanding-development.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="understanding-development"]
= Understanding {product-title} development
include::_attributes/common-attributes.adoc[]
diff --git a/authentication/assuming-an-aws-iam-role-for-a-service-account.adoc b/authentication/assuming-an-aws-iam-role-for-a-service-account.adoc
index 80c43b7cb1c8..54484e2e03a9 100644
--- a/authentication/assuming-an-aws-iam-role-for-a-service-account.adoc
+++ b/authentication/assuming-an-aws-iam-role-for-a-service-account.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="assuming-an-aws-iam-role-for-a-service-account"]
= Assuming an AWS IAM role for a service account
include::_attributes/common-attributes.adoc[]
@@ -11,11 +11,12 @@ toc::[]
[role="_abstract"]
ifdef::openshift-rosa[]
-{product-title} clusters that use the AWS Security Token Service (STS) include a pod identity webhook for use with pods that run in user-defined projects.
+In {product-title} clusters that use the AWS Security Token Service (STS), the OpenShift API server can be enabled to project signed service account tokens that can be used to assume an AWS Identity and Access Management (IAM) role in a pod. If the assumed IAM role has the required AWS permissions, the pods can authenticate against the AWS API using temporary STS credentials to perform AWS operations.
endif::openshift-rosa[]
-You can use the pod identity webhook to enable a service account to automatically assume an AWS Identity and Access Management (IAM) role in your own pods. If the assumed IAM role has the required AWS permissions, the pods can run AWS SDK operations by using temporary STS credentials.
+You can use the pod identity webhook to project service account tokens to assume an AWS Identity and Access Management (IAM) role for your own workloads. If the assumed IAM role has the required AWS permissions, the pods can run AWS SDK operations by using temporary STS credentials.
+include::modules/how-service-accounts-assume-aws-iam-roles-in-sre-owned-projects.adoc[leveloffset=+1]
include::modules/understanding-pod-identity-webhook-workflow-in-user-defined-projects.adoc[leveloffset=+1]
include::modules/assuming-an-aws-iam-role-in-your-own-pods.adoc[leveloffset=+1]
include::modules/setting-up-an-aws-iam-role-a-service-account.adoc[leveloffset=+2]
@@ -37,5 +38,5 @@ include::modules/verifying-the-assumed-iam-role-in-your-pod.adoc[leveloffset=+2]
* For more information about installing and using the AWS Boto3 SDK for Python, see the link:https://boto3.amazonaws.com/v1/documentation/api/latest/index.html[AWS Boto3 documentation].
ifdef::openshift-rosa,openshift-dedicated[]
-* For general information about webhook admission plugins for OpenShift, see link:https://docs.openshift.com/container-platform/4.13/architecture/admission-plug-ins.html#admission-webhooks-about_admission-plug-ins[Webhook admission plugins] in the OpenShift Container Platform documentation.
+* For general information about webhook admission plugins for OpenShift, see link:https://docs.openshift.com/container-platform/4.15/architecture/admission-plug-ins.html#admission-webhooks-about_admission-plug-ins[Webhook admission plugins] in the OpenShift Container Platform documentation.
endif::openshift-rosa,openshift-dedicated[]
diff --git a/authentication/bound-service-account-tokens.adoc b/authentication/bound-service-account-tokens.adoc
index c16fda7a3f0d..783913260f46 100644
--- a/authentication/bound-service-account-tokens.adoc
+++ b/authentication/bound-service-account-tokens.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="bound-service-account-tokens"]
= Using bound service account tokens
include::_attributes/common-attributes.adoc[]
@@ -20,7 +20,10 @@ include::modules/bound-sa-tokens-configuring-externally.adoc[leveloffset=+1]
[role="_additional-resources"]
.Additional resources
+// This xref target does not exist in the OSD/ROSA docs.
+ifndef::openshift-dedicated,openshift-rosa[]
* xref:../nodes/nodes/nodes-nodes-rebooting.adoc#nodes-nodes-rebooting-gracefully_nodes-nodes-rebooting[Rebooting a node gracefully]
+endif::openshift-dedicated,openshift-rosa[]
* xref:../authentication/understanding-and-creating-service-accounts.adoc#service-accounts-managing_understanding-service-accounts[Creating service accounts]
diff --git a/authentication/configuring-internal-oauth.adoc b/authentication/configuring-internal-oauth.adoc
index 7e3f86c9e602..2d37dfb0e118 100644
--- a/authentication/configuring-internal-oauth.adoc
+++ b/authentication/configuring-internal-oauth.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="configuring-internal-oauth"]
= Configuring the internal OAuth server
include::_attributes/common-attributes.adoc[]
diff --git a/authentication/configuring-ldap-failover.adoc b/authentication/configuring-ldap-failover.adoc
index ede202898a53..5558ccb42d60 100644
--- a/authentication/configuring-ldap-failover.adoc
+++ b/authentication/configuring-ldap-failover.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="configuring-ldap-failover"]
= Configuring LDAP failover
include::_attributes/common-attributes.adoc[]
diff --git a/authentication/configuring-oauth-clients.adoc b/authentication/configuring-oauth-clients.adoc
index 2059ef4293b6..f41836a8276b 100644
--- a/authentication/configuring-oauth-clients.adoc
+++ b/authentication/configuring-oauth-clients.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="configuring-oauth-clients"]
= Configuring OAuth clients
include::_attributes/common-attributes.adoc[]
diff --git a/authentication/dedicated-understanding-authentication.adoc b/authentication/dedicated-understanding-authentication.adoc
index 7d27e9512e64..23927ffdaa5f 100644
--- a/authentication/dedicated-understanding-authentication.adoc
+++ b/authentication/dedicated-understanding-authentication.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="understanding-identity-provider"]
= Understanding identity provider configuration
include::_attributes/common-attributes.adoc[]
diff --git a/authentication/identity_providers/configuring-basic-authentication-identity-provider.adoc b/authentication/identity_providers/configuring-basic-authentication-identity-provider.adoc
index bad3240b4239..70691f0be455 100644
--- a/authentication/identity_providers/configuring-basic-authentication-identity-provider.adoc
+++ b/authentication/identity_providers/configuring-basic-authentication-identity-provider.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="configuring-basic-authentication-identity-provider"]
= Configuring a basic authentication identity provider
include::_attributes/common-attributes.adoc[]
diff --git a/authentication/identity_providers/configuring-github-identity-provider.adoc b/authentication/identity_providers/configuring-github-identity-provider.adoc
index 76a1b23f4c54..e170b8ff627b 100644
--- a/authentication/identity_providers/configuring-github-identity-provider.adoc
+++ b/authentication/identity_providers/configuring-github-identity-provider.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="configuring-github-identity-provider"]
= Configuring a GitHub or GitHub Enterprise identity provider
include::_attributes/common-attributes.adoc[]
diff --git a/authentication/identity_providers/configuring-gitlab-identity-provider.adoc b/authentication/identity_providers/configuring-gitlab-identity-provider.adoc
index 023dd8dec0fb..75f8dc914a08 100644
--- a/authentication/identity_providers/configuring-gitlab-identity-provider.adoc
+++ b/authentication/identity_providers/configuring-gitlab-identity-provider.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="configuring-gitlab-identity-provider"]
= Configuring a GitLab identity provider
include::_attributes/common-attributes.adoc[]
diff --git a/authentication/identity_providers/configuring-google-identity-provider.adoc b/authentication/identity_providers/configuring-google-identity-provider.adoc
index 90faa932bff9..b447062ec2b5 100644
--- a/authentication/identity_providers/configuring-google-identity-provider.adoc
+++ b/authentication/identity_providers/configuring-google-identity-provider.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="configuring-google-identity-provider"]
= Configuring a Google identity provider
include::_attributes/common-attributes.adoc[]
diff --git a/authentication/identity_providers/configuring-htpasswd-identity-provider.adoc b/authentication/identity_providers/configuring-htpasswd-identity-provider.adoc
index f13cfb919b85..2a7a2882f7c8 100644
--- a/authentication/identity_providers/configuring-htpasswd-identity-provider.adoc
+++ b/authentication/identity_providers/configuring-htpasswd-identity-provider.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="configuring-htpasswd-identity-provider"]
= Configuring an htpasswd identity provider
include::_attributes/common-attributes.adoc[]
diff --git a/authentication/identity_providers/configuring-keystone-identity-provider.adoc b/authentication/identity_providers/configuring-keystone-identity-provider.adoc
index 1bac30ad85a5..a53b1779cf06 100644
--- a/authentication/identity_providers/configuring-keystone-identity-provider.adoc
+++ b/authentication/identity_providers/configuring-keystone-identity-provider.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="configuring-keystone-identity-provider"]
= Configuring a Keystone identity provider
include::_attributes/common-attributes.adoc[]
diff --git a/authentication/identity_providers/configuring-ldap-identity-provider.adoc b/authentication/identity_providers/configuring-ldap-identity-provider.adoc
index b659386195b6..dce6d697af01 100644
--- a/authentication/identity_providers/configuring-ldap-identity-provider.adoc
+++ b/authentication/identity_providers/configuring-ldap-identity-provider.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="configuring-ldap-identity-provider"]
= Configuring an LDAP identity provider
include::_attributes/common-attributes.adoc[]
diff --git a/authentication/identity_providers/configuring-oidc-identity-provider.adoc b/authentication/identity_providers/configuring-oidc-identity-provider.adoc
index 2c3e74fba472..9482be8b009f 100644
--- a/authentication/identity_providers/configuring-oidc-identity-provider.adoc
+++ b/authentication/identity_providers/configuring-oidc-identity-provider.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="configuring-oidc-identity-provider"]
= Configuring an OpenID Connect identity provider
include::_attributes/common-attributes.adoc[]
diff --git a/authentication/identity_providers/configuring-request-header-identity-provider.adoc b/authentication/identity_providers/configuring-request-header-identity-provider.adoc
index ee20455070b4..2cea646b461b 100644
--- a/authentication/identity_providers/configuring-request-header-identity-provider.adoc
+++ b/authentication/identity_providers/configuring-request-header-identity-provider.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="configuring-request-header-identity-provider"]
= Configuring a request header identity provider
include::_attributes/common-attributes.adoc[]
diff --git a/authentication/impersonating-system-admin.adoc b/authentication/impersonating-system-admin.adoc
index 32843c9f3a2d..69f0c8625e21 100644
--- a/authentication/impersonating-system-admin.adoc
+++ b/authentication/impersonating-system-admin.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="impersonating-system-admin"]
= Impersonating the system:admin user
include::_attributes/common-attributes.adoc[]
diff --git a/authentication/index.adoc b/authentication/index.adoc
index 691ea227852a..7004a1417235 100644
--- a/authentication/index.adoc
+++ b/authentication/index.adoc
@@ -9,7 +9,14 @@ include::modules/authentication-authorization-common-terms.adoc[leveloffset=+1]
[id="authentication-overview"]
== About authentication in {product-title}
-To control access to an {product-title} cluster, a cluster administrator can configure xref:../authentication/understanding-authentication.adoc#understanding-authentication[user authentication] and ensure only approved users access the cluster.
+To control access to an {product-title} cluster,
+ifndef::openshift-dedicated,openshift-rosa[]
+a cluster administrator
+endif::openshift-dedicated,openshift-rosa[]
+ifdef::openshift-dedicated,openshift-rosa[]
+an administrator with the `dedicated-admin` role
+endif::openshift-dedicated,openshift-rosa[]
+can configure xref:../authentication/understanding-authentication.adoc#understanding-authentication[user authentication] and ensure only approved users access the cluster.
To interact with an {product-title} cluster, users must first authenticate to the {product-title} API in some way. You can authenticate by providing an xref:../authentication/understanding-authentication.adoc#rbac-api-authentication_understanding-authentication[OAuth access token or an X.509 client certificate] in your requests to the {product-title} API.
@@ -17,15 +24,23 @@ To interact with an {product-title} cluster, users must first authenticate to th
====
If you do not present a valid access token or certificate, your request is unauthenticated and you receive an HTTP 401 error.
====
+
+ifdef::openshift-dedicated,openshift-rosa[]
+An administrator can configure authentication by configuring an identity provider. You can define any xref:../authentication/sd-configuring-identity-providers.adoc#understanding-idp-supported_sd-configuring-identity-providers[supported identity provider in {product-title}] and add it to your cluster.
+endif::openshift-dedicated,openshift-rosa[]
+
+ifndef::openshift-dedicated,openshift-rosa[]
An administrator can configure authentication through the following tasks:
* Configuring an identity provider: You can define any xref:../authentication/understanding-identity-provider.adoc#supported-identity-providers[supported identity provider in {product-title}] and add it to your cluster.
-* xref:../authentication/configuring-internal-oauth.adoc#configuring-internal-oauth[Configuring the internal OAuth server]: The {product-title} control plane includes a built-in OAuth server that determines the user’s identity from the configured identity provider and creates an access token. You can configure the token duration and inactivity timeout, and customize the internal OAuth server URL.
+
+* xref:../authentication/configuring-internal-oauth.adoc#configuring-internal-oauth[Configuring the internal OAuth server]: The {product-title} control plane includes a built-in OAuth server that determines the user's identity from the configured identity provider and creates an access token. You can configure the token duration and inactivity timeout, and customize the internal OAuth server URL.
+
[NOTE]
====
Users can xref:../authentication/managing-oauth-access-tokens.adoc#managing-oauth-access-tokens[view and manage OAuth tokens owned by them].
====
+
* Registering an OAuth client: {product-title} includes several xref:../authentication/configuring-oauth-clients.adoc#oauth-default-clients_configuring-oauth-clients[default OAuth clients]. You can xref:../authentication/configuring-oauth-clients.adoc#oauth-register-additional-client_configuring-oauth-clients[register and configure additional OAuth clients].
+
[NOTE]
@@ -35,6 +50,7 @@ When users send a request for an OAuth token, they must specify either a default
* Managing cloud provider credentials using the xref:../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc#about-cloud-credential-operator[Cloud Credentials Operator]: Cluster components use cloud provider credentials to get permissions required to perform cluster-related tasks.
* Impersonating a system admin user: You can grant cluster administrator permissions to a user by xref:../authentication/impersonating-system-admin.adoc#impersonating-system-admin[impersonating a system admin user].
+endif::openshift-dedicated,openshift-rosa[]
[id="authorization-overview"]
== About authorization in {product-title}
@@ -49,14 +65,35 @@ Along with controlling user access to a cluster, you can also control the action
You can manage authorization for {product-title} through the following tasks:
* Viewing xref:../authentication/using-rbac.adoc#viewing-local-roles_using-rbac[local] and xref:../authentication/using-rbac.adoc#viewing-cluster-roles_using-rbac[cluster] roles and bindings.
+
* Creating a xref:../authentication/using-rbac.adoc#creating-local-role_using-rbac[local role] and assigning it to a user or group.
+
+ifndef::openshift-dedicated,openshift-rosa[]
* Creating a cluster role and assigning it to a user or group: {product-title} includes a set of xref:../authentication/using-rbac.adoc#default-roles_using-rbac[default cluster roles]. You can create additional xref:../authentication/using-rbac.adoc#creating-cluster-role_using-rbac[cluster roles] and xref:../authentication/using-rbac.adoc#adding-roles_using-rbac[add them to a user or group].
+endif::openshift-dedicated,openshift-rosa[]
+ifdef::openshift-dedicated,openshift-rosa[]
+* Assigning a cluster role to a user or group: {product-title} includes a set of xref:../authentication/using-rbac.adoc#default-roles_using-rbac[default cluster roles]. You can xref:../authentication/using-rbac.adoc#adding-roles_using-rbac[add them to a user or group].
+endif::openshift-dedicated,openshift-rosa[]
+
+ifndef::openshift-dedicated,openshift-rosa[]
* Creating a cluster-admin user: By default, your cluster has only one cluster administrator called `kubeadmin`. You can xref:../authentication/using-rbac.adoc#creating-cluster-admin_using-rbac[create another cluster administrator]. Before creating a cluster administrator, ensure that you have configured an identity provider.
+
[NOTE]
====
After creating the cluster admin user, xref:../authentication/remove-kubeadmin.adoc#removing-kubeadmin_removing-kubeadmin[delete the existing kubeadmin user] to improve cluster security.
====
+endif::openshift-dedicated,openshift-rosa[]
+
+ifdef::openshift-rosa[]
+* Creating cluster-admin and dedicated-admin users: The user who created the {product-title} cluster can grant access to other xref:../authentication/using-rbac.adoc#rosa-create-cluster-admins_using-rbac[`cluster-admin`] and xref:../authentication/using-rbac.adoc#rosa-create-dedicated-cluster-admins_using-rbac[`dedicated-admin`] users.
+endif::openshift-rosa[]
+
+ifdef::openshift-dedicated[]
+* Granting administrator privileges to users: You can xref:../authentication/using-rbac.adoc#osd-grant-admin-privileges_using-rbac[grant `dedicated-admin` privileges to users].
+endif::openshift-dedicated[]
+
* Creating service accounts: xref:../authentication/understanding-and-creating-service-accounts.adoc#service-accounts-overview_understanding-service-accounts[Service accounts] provide a flexible way to control API access without sharing a regular user’s credentials. A user can xref:../authentication/understanding-and-creating-service-accounts.adoc#service-accounts-managing_understanding-service-accounts[create and use a service account in applications] and also as xref:../authentication/using-service-accounts-as-oauth-client.adoc#using-service-accounts-as-oauth-client[an OAuth client].
+
* xref:../authentication/tokens-scoping.adoc#tokens-scoping[Scoping tokens]: A scoped token is a token that identifies as a specific user who can perform only specific operations. You can create scoped tokens to delegate some of your permissions to another user or a service account.
+
* Syncing LDAP groups: You can manage user groups in one place by xref:../authentication/ldap-syncing.adoc#ldap-syncing[syncing the groups stored in an LDAP server] with the {product-title} user groups.
diff --git a/authentication/ldap-syncing.adoc b/authentication/ldap-syncing.adoc
index 9d788b1540e9..802f055b0f45 100644
--- a/authentication/ldap-syncing.adoc
+++ b/authentication/ldap-syncing.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="ldap-syncing"]
= Syncing LDAP groups
include::_attributes/common-attributes.adoc[]
@@ -9,6 +9,9 @@ toc::[]
ifdef::openshift-enterprise,openshift-webscale,openshift-origin[]
As an administrator,
endif::[]
+ifdef::openshift-dedicated,openshift-rosa[]
+As an administrator with the `dedicated-admin` role,
+endif::openshift-dedicated,openshift-rosa[]
you can use groups to manage users, change
their permissions, and enhance collaboration. Your organization may have already
created user groups and stored them in an LDAP server. {product-title} can sync
@@ -17,8 +20,15 @@ your groups in one place. {product-title} currently supports group sync with
LDAP servers using three common schemas for defining group membership: RFC 2307,
Active Directory, and augmented Active Directory.
+ifndef::openshift-dedicated,openshift-rosa[]
For more information on configuring LDAP, see
xref:../authentication/identity_providers/configuring-ldap-identity-provider.adoc#configuring-ldap-identity-provider[Configuring an LDAP identity provider].
+endif::openshift-dedicated,openshift-rosa[]
+
+ifdef::openshift-dedicated,openshift-rosa[]
+For more information on configuring LDAP, see
+xref:../authentication/sd-configuring-identity-providers.adoc#config-ldap-idp_sd-configuring-identity-providers[Configuring an LDAP identity provider].
+endif::openshift-dedicated,openshift-rosa[]
ifdef::openshift-enterprise,openshift-webscale,openshift-origin[]
[NOTE]
@@ -26,6 +36,12 @@ ifdef::openshift-enterprise,openshift-webscale,openshift-origin[]
You must have `cluster-admin` privileges to sync groups.
====
endif::[]
+ifdef::openshift-dedicated,openshift-rosa[]
+[NOTE]
+====
+You must have `dedicated-admin` privileges to sync groups.
+====
+endif::openshift-dedicated,openshift-rosa[]
include::modules/ldap-syncing-about.adoc[leveloffset=+1]
include::modules/ldap-syncing-config-rfc2307.adoc[leveloffset=+2]
@@ -37,6 +53,8 @@ include::modules/ldap-syncing-running-openshift.adoc[leveloffset=+2]
include::modules/ldap-syncing-running-subset.adoc[leveloffset=+2]
include::modules/ldap-syncing-pruning.adoc[leveloffset=+1]
+// OSD and ROSA dedicated-admins cannot create the cluster roles and cluster role bindings required for this procedure.
+ifndef::openshift-dedicated,openshift-rosa[]
// Automatically syncing LDAP groups
include::modules/ldap-auto-syncing.adoc[leveloffset=+1]
@@ -45,6 +63,7 @@ include::modules/ldap-auto-syncing.adoc[leveloffset=+1]
* xref:../authentication/identity_providers/configuring-ldap-identity-provider.adoc#configuring-ldap-identity-provider[Configuring an LDAP identity provider]
* xref:../nodes/jobs/nodes-nodes-jobs.adoc#nodes-nodes-jobs-creating-cron_nodes-nodes-jobs[Creating cron jobs]
+endif::openshift-dedicated,openshift-rosa[]
include::modules/ldap-syncing-examples.adoc[leveloffset=+1]
include::modules/ldap-syncing-rfc2307.adoc[leveloffset=+2]
diff --git a/authentication/managing-oauth-access-tokens.adoc b/authentication/managing-oauth-access-tokens.adoc
index 10867b018e6f..7995ff4dd33d 100644
--- a/authentication/managing-oauth-access-tokens.adoc
+++ b/authentication/managing-oauth-access-tokens.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="managing-oauth-access-tokens"]
= Managing user-owned OAuth access tokens
include::_attributes/common-attributes.adoc[]
diff --git a/authentication/managing-security-context-constraints.adoc b/authentication/managing-security-context-constraints.adoc
index 638cbba748b4..0a67c077b59f 100644
--- a/authentication/managing-security-context-constraints.adoc
+++ b/authentication/managing-security-context-constraints.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="managing-pod-security-policies"]
= Managing security context constraints
include::_attributes/common-attributes.adoc[]
@@ -12,12 +12,12 @@ Default SCCs are created during installation and when you install some Operators
[IMPORTANT]
====
-Do not modify the default SCCs. Customizing the default SCCs can lead to issues when some of the platform pods deploy or
+Do not modify the default SCCs. Customizing the default SCCs can lead to issues when some of the platform pods deploy or
ifndef::openshift-rosa[]
-{product-title}
+{product-title}
endif::[]
ifdef::openshift-rosa[]
-ROSA
+ROSA
endif::openshift-rosa[]
is upgraded. Additionally, the default SCC values are reset to the defaults during some cluster upgrades, which discards all customizations to those SCCs.
ifdef::openshift-origin,openshift-enterprise,openshift-webscale,openshift-dedicated,openshift-rosa[]
@@ -37,6 +37,9 @@ include::modules/security-context-constraints-about.adoc[leveloffset=+1]
include::modules/security-context-constraints-pre-allocated-values.adoc[leveloffset=+1]
include::modules/security-context-constraints-example.adoc[leveloffset=+1]
include::modules/security-context-constraints-creating.adoc[leveloffset=+1]
+
+// Configuring a workload to require a specific SCC
+include::modules/security-context-constraints-requiring.adoc[leveloffset=+1]
include::modules/security-context-constraints-rbac.adoc[leveloffset=+1]
include::modules/security-context-constraints-command-reference.adoc[leveloffset=+1]
@@ -44,12 +47,4 @@ include::modules/security-context-constraints-command-reference.adoc[leveloffset
[id="additional-resources_configuring-internal-oauth"]
== Additional resources
-ifndef::openshift-dedicated,openshift-rosa[]
-* xref:../support/getting-support.adoc#getting-support[Getting support]
-endif::[]
-ifdef::openshift-dedicated[]
-* xref:../osd_architecture/osd-support.adoc#osd-getting-support[Getting support]
-endif::[]
-ifdef::openshift-rosa[]
-* xref:../rosa_architecture/rosa-getting-support.adoc#rosa-getting-support[Getting support]
-endif::[]
+* xref:../support/getting-support.adoc#getting-support[Getting support]
\ No newline at end of file
diff --git a/authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc b/authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc
index 5a608835d7c3..f2ab1c4f1308 100644
--- a/authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc
+++ b/authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="about-cloud-credential-operator"]
= About the Cloud Credential Operator
include::_attributes/common-attributes.adoc[]
@@ -19,67 +19,73 @@ By setting different values for the `credentialsMode` parameter in the `install-
* **xref:../../authentication/managing_cloud_provider_credentials/cco-mode-passthrough.adoc#cco-mode-passthrough[Passthrough]**: In passthrough mode, the CCO passes the provided cloud credential to the components that request cloud credentials.
-* **xref:../../authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc#cco-mode-manual[Manual]**: In manual mode, a user manages cloud credentials instead of the CCO.
+* **xref:../../authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc#cco-mode-manual[Manual mode with long-term credentials for components]**: In manual mode, you can manage long-term cloud credentials instead of the CCO.
-** **xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-mode-sts[Manual with AWS Security Token Service]**: In manual mode, you can configure an AWS cluster to use Amazon Web Services Security Token Service (AWS STS). With this configuration, the CCO uses temporary credentials for different components.
-
-** **xref:../../authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc#cco-mode-gcp-workload-identity[Manual with GCP Workload Identity]**: In manual mode, you can configure a GCP cluster to use GCP Workload Identity. With this configuration, the CCO uses temporary credentials for different components.
+* **xref:../../authentication/managing_cloud_provider_credentials/cco-short-term-creds.adoc#cco-short-term-creds[Manual mode with short-term credentials for components]**: For some providers, you can use the CCO utility (`ccoctl`) during installation to implement short-term credentials for individual components. These credentials are created and managed outside the {product-title} cluster.
.CCO mode support matrix
-[cols="<.^2,^.^1,^.^1,^.^1"]
+[cols="<.^2,^.^1,^.^1,^.^1,^.^1"]
|====
-|Cloud provider |Mint |Passthrough |Manual
+|Cloud provider |Mint |Passthrough |Manual with long-term credentials |Manual with short-term credentials
|{alibaba}
|
|
-|X
+|X ^[1]^
+|
|Amazon Web Services (AWS)
|X
|X
|X
+|X
+|Global Microsoft Azure
+|
+|X
+|X
+|X
-|Microsoft Azure
+|Microsoft Azure Stack Hub
+|
|
-|X ^[1]^
|X
+|
|Google Cloud Platform (GCP)
|X
|X
|X
+|X
-|IBM Cloud
+|{ibm-cloud-name}
|
|
-|X
+|X ^[1]^
+|
|Nutanix
|
|
-|X
+|X ^[1]^
+|
|{rh-openstack-first}
|
|X
|
-
-|{rh-virtualization-first}
-|
-|X
|
|VMware vSphere
|
|X
|
+|
|====
[.small]
--
-1. Manual mode is the only supported CCO configuration for Microsoft Azure Stack Hub.
+1. This platform uses the `ccoctl` utility during installation to configure long-term credentials.
--
[id="cco-determine-mode_{context}"]
@@ -88,7 +94,7 @@ By setting different values for the `credentialsMode` parameter in the `install-
For platforms that support using the CCO in multiple modes, you can determine what mode the CCO is configured to use by using the web console or the CLI.
.Determining the CCO configuration
-image::334_OpenShift_cluster_updating_and_CCO_workflows_0523_4.11_A.png[Decision tree showing how to determine the configured CCO credentials mode for your cluster.]
+image::334_OpenShift_cluster_updating_and_CCO_workflows_0923_4.11_A.png[Decision tree showing how to determine the configured CCO credentials mode for your cluster.]
//Determining the Cloud Credential Operator mode by using the web console
include::modules/cco-determine-mode-gui.adoc[leveloffset=+2]
@@ -102,11 +108,11 @@ For platforms on which multiple modes are supported (AWS, Azure, and GCP), when
By default, the CCO determines whether the credentials are sufficient for mint mode, which is the preferred mode of operation, and uses those credentials to create appropriate credentials for components in the cluster. If the credentials are not sufficient for mint mode, it determines whether they are sufficient for passthrough mode. If the credentials are not sufficient for passthrough mode, the CCO cannot adequately process `CredentialsRequest` CRs.
-If the provided credentials are determined to be insufficient during installation, the installation fails. For AWS, the installer fails early in the process and indicates which required permissions are missing. Other providers might not provide specific information about the cause of the error until errors are encountered.
+If the provided credentials are determined to be insufficient during installation, the installation fails. For AWS, the installation program fails early in the process and indicates which required permissions are missing. Other providers might not provide specific information about the cause of the error until errors are encountered.
If the credentials are changed after a successful installation and the CCO determines that the new credentials are insufficient, the CCO puts conditions on any new `CredentialsRequest` CRs to indicate that it cannot process them because of the insufficient credentials.
-To resolve insufficient credentials issues, provide a credential with sufficient permissions. If an error occurred during installation, try installing again. For issues with new `CredentialsRequest` CRs, wait for the CCO to try to process the CR again. As an alternative, you can manually create IAM for xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[AWS], xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[Azure], and xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[GCP].
+To resolve insufficient credentials issues, provide a credential with sufficient permissions. If an error occurred during installation, try installing again. For issues with new `CredentialsRequest` CRs, wait for the CCO to try to process the CR again. As an alternative, you can configure your cluster to use a different CCO mode that is supported for your cloud provider.
[role="_additional-resources"]
[id="additional-resources_about-cloud-credential-operator_{context}"]
diff --git a/authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc b/authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc
index 5b403c38979a..20e42f123603 100644
--- a/authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc
+++ b/authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc
@@ -1,48 +1,36 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="cco-mode-manual"]
-= Using manual mode
+= Manual mode with long-term credentials for components
include::_attributes/common-attributes.adoc[]
:context: cco-mode-manual
toc::[]
-Manual mode is supported for Alibaba Cloud, Amazon Web Services (AWS), Microsoft Azure, IBM Cloud, and Google Cloud Platform (GCP).
+Manual mode is supported for Alibaba Cloud, Amazon Web Services (AWS), global Microsoft Azure, Microsoft Azure Stack Hub, Google Cloud Platform (GCP), {ibm-cloud-name}, and Nutanix.
-In manual mode, a user manages cloud credentials instead of the Cloud Credential Operator (CCO). To use this mode, you must examine the `CredentialsRequest` CRs in the release image for the version of {product-title} that you are running or installing, create corresponding credentials in the underlying cloud provider, and create Kubernetes Secrets in the correct namespaces to satisfy all `CredentialsRequest` CRs for the cluster's cloud provider.
+[id="manual-mode-classic_{context}"]
+== User-managed credentials
-Using manual mode allows each cluster component to have only the permissions it requires, without storing an administrator-level credential in the cluster. This mode also does not require connectivity to the AWS public IAM endpoint. However, you must manually reconcile permissions with new release images for every upgrade.
+In manual mode, a user manages cloud credentials instead of the Cloud Credential Operator (CCO). To use this mode, you must examine the `CredentialsRequest` CRs in the release image for the version of {product-title} that you are running or installing, create corresponding credentials in the underlying cloud provider, and create Kubernetes Secrets in the correct namespaces to satisfy all `CredentialsRequest` CRs for the cluster's cloud provider. Some platforms use the CCO utility (`ccoctl`) to facilitate this process during installation and updates.
-For information about configuring your cloud provider to use manual mode, see the manual credentials management options for your cloud provider:
+Using manual mode with long-term credentials allows each cluster component to have only the permissions it requires, without storing an administrator-level credential in the cluster. This mode also does not require connectivity to services such as the AWS public IAM endpoint. However, you must manually reconcile permissions with new release images for every upgrade.
-* xref:../../installing/installing_alibaba/manually-creating-alibaba-ram.adoc#manually-creating-alibaba-ram[Manually creating RAM resources for Alibaba Cloud]
-* xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[Manually creating IAM for AWS]
-* xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[Manually creating IAM for Azure]
-* xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[Manually creating IAM for GCP]
-* xref:../../installing/installing_ibm_cloud_public/configuring-iam-ibm-cloud.adoc#configuring-iam-ibm-cloud[Configuring IAM for IBM Cloud]
-* xref:../../installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc#manually-create-iam-nutanix_installing-nutanix-installer-provisioned[Configuring IAM for Nutanix]
-
-[id="manual-mode-sts-blurb"]
-== Manual mode with cloud credentials created and managed outside of the cluster
-
-An AWS or GCP cluster that uses manual mode might be configured to create and manage cloud credentials from outside of the cluster using the AWS Security Token Service (STS) or GCP Workload Identity. With this configuration, the CCO uses temporary credentials for different components.
-
-For more information, see xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-mode-sts[Using manual mode with Amazon Web Services Security Token Service] or xref:../../authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc#cco-mode-gcp-workload-identity[Using manual mode with GCP Workload Identity].
-
-//Updating cloud provider resources with manually maintained credentials
-include::modules/manually-maintained-credentials-upgrade.adoc[leveloffset=+1]
+For information about configuring your cloud provider to use manual mode, see the manual credentials management options for your cloud provider.
-//Indicating that the cluster is ready to upgrade
-include::modules/cco-manual-upgrade-annotation.adoc[leveloffset=+2]
+[NOTE]
+====
+An AWS, global Azure, or GCP cluster that uses manual mode might be configured to use short-term credentials for different components. For more information, see xref:../../authentication/managing_cloud_provider_credentials/cco-short-term-creds.adoc#cco-short-term-creds[Manual mode with short-term credentials for components].
+====
[role="_additional-resources"]
[id="additional-resources_cco-mode-manual"]
== Additional resources
* xref:../../installing/installing_alibaba/manually-creating-alibaba-ram.adoc#manually-creating-alibaba-ram[Manually creating RAM resources for Alibaba Cloud]
-* xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[Manually creating IAM for AWS]
-* xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-mode-sts[Using manual mode with Amazon Web Services Security Token Service]
-* xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[Manually creating IAM for Azure]
-* xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[Manually creating IAM for GCP]
-* xref:../../authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc#cco-mode-gcp-workload-identity[Using manual mode with GCP Workload Identity]
-* xref:../../installing/installing_ibm_cloud_public/configuring-iam-ibm-cloud.adoc#configuring-iam-ibm-cloud[Configuring IAM for IBM Cloud]
+* xref:../../installing/installing_aws/installing-aws-customizations.adoc#manually-create-iam_installing-aws-customizations[Manually creating long-term credentials for AWS]
+* xref:../../installing/installing_azure/installing-azure-customizations.adoc#manually-create-iam_installing-azure-customizations[Manually creating long-term credentials for Azure]
+* xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#manually-create-iam_installing-gcp-customizations[Manually creating long-term credentials for GCP]
+* xref:../../installing/installing_ibm_cloud_public/configuring-iam-ibm-cloud.adoc#configuring-iam-ibm-cloud[Configuring IAM for {ibm-cloud-name}]
* xref:../../installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc#manually-create-iam-nutanix_installing-nutanix-installer-provisioned[Configuring IAM for Nutanix]
+* xref:../../authentication/managing_cloud_provider_credentials/cco-short-term-creds.adoc#cco-short-term-creds[Manual mode with short-term credentials for components]
+* xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials]
\ No newline at end of file
diff --git a/authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc b/authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc
index fd756d2e74d6..8b8486703abe 100644
--- a/authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc
+++ b/authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc
@@ -1,27 +1,34 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="cco-mode-mint"]
-= Using mint mode
+= The Cloud Credential Operator in mint mode
include::_attributes/common-attributes.adoc[]
:context: cco-mode-mint
toc::[]
-Mint mode is supported for Amazon Web Services (AWS) and Google Cloud Platform (GCP).
+Mint mode is the default Cloud Credential Operator (CCO) credentials mode for {product-title} on platforms that support it. Mint mode supports Amazon Web Services (AWS) and Google Cloud Platform (GCP) clusters.
-Mint mode is the default mode on the platforms for which it is supported. In this mode, the Cloud Credential Operator (CCO) uses the provided administrator-level cloud credential to create new credentials for components in the cluster with only the specific permissions that are required.
+[id="mint-mode-about"]
+== Mint mode credentials management
-If the credential is not removed after installation, it is stored and used by the CCO to process `CredentialsRequest` CRs for components in the cluster and create new credentials for each with only the specific permissions that are required. The continuous reconciliation of cloud credentials in mint mode allows actions that require additional credentials or permissions, such as upgrading, to proceed.
+For clusters that use the CCO in mint mode, the administrator-level credential is stored in the `kube-system` namespace. The CCO uses the `admin` credential to process the `CredentialsRequest` objects in the cluster and create users for components with limited permissions.
-Mint mode stores the administrator-level credential in the cluster `kube-system` namespace. If this approach does not meet the security requirements of your organization, see _Alternatives to storing administrator-level secrets in the kube-system project_ for xref:../../installing/installing_aws/manually-creating-iam.adoc#alternatives-to-storing-admin-secrets-in-kube-system_manually-creating-iam-aws[AWS] or xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#alternatives-to-storing-admin-secrets-in-kube-system_manually-creating-iam-gcp[GCP].
+With mint mode, each cluster component has only the specific permissions it requires. The automatic, continuous reconciliation of cloud credentials in mint mode allows actions that require additional credentials or permissions, such as upgrading, to proceed.
+
+[NOTE]
+====
+By default, mint mode requires storing the `admin` credential in the cluster `kube-system` namespace. If this approach does not meet the security requirements of your organization, you can xref:../../post_installation_configuration/cluster-tasks.adoc#manually-removing-cloud-creds_post-install-cluster-tasks[remove the credential after installing the cluster].
+====
[id="mint-mode-permissions"]
-== Mint mode permissions requirements
+=== Mint mode permissions requirements
When using the CCO in mint mode, ensure that the credential you provide meets the requirements of the cloud on which you are running or installing {product-title}. If the provided credentials are not sufficient for mint mode, the CCO cannot create an IAM user.
-[id="mint-mode-permissions-aws"]
-=== Amazon Web Services (AWS) permissions
-The credential you provide for mint mode in AWS must have the following permissions:
+The credential you provide for mint mode in Amazon Web Services (AWS) must have the following permissions:
+.Required AWS permissions
+[%collapsible]
+====
* `iam:CreateAccessKey`
* `iam:CreateUser`
* `iam:DeleteAccessKey`
@@ -33,37 +40,36 @@ The credential you provide for mint mode in AWS must have the following permissi
* `iam:PutUserPolicy`
* `iam:TagUser`
* `iam:SimulatePrincipalPolicy`
+====
-[id="mint-mode-permissions-gcp"]
-=== Google Cloud Platform (GCP) permissions
-The credential you provide for mint mode in GCP must have the following permissions:
+The credential you provide for mint mode in Google Cloud Platform (GCP) must have the following permissions:
+.Required GCP permissions
+[%collapsible]
+====
* `resourcemanager.projects.get`
* `serviceusage.services.list`
* `iam.serviceAccountKeys.create`
* `iam.serviceAccountKeys.delete`
+* `iam.serviceAccountKeys.list`
* `iam.serviceAccounts.create`
* `iam.serviceAccounts.delete`
* `iam.serviceAccounts.get`
+* `iam.roles.create`
* `iam.roles.get`
+* `iam.roles.list`
+* `iam.roles.undelete`
+* `iam.roles.update`
* `resourcemanager.projects.getIamPolicy`
* `resourcemanager.projects.setIamPolicy`
+====
//Admin credentials root secret format
-include::modules/admin-credentials-root-secret-formats.adoc[leveloffset=+1]
-
-//Mint Mode with removal or rotation of the admin credential
-include::modules/mint-mode-with-removal-of-admin-credential.adoc[leveloffset=+1]
+include::modules/admin-credentials-root-secret-formats.adoc[leveloffset=+2]
//Rotating cloud provider credentials manually
-include::modules/manually-rotating-cloud-creds.adoc[leveloffset=+2]
-
-//Removing cloud provider credentials
-include::modules/manually-removing-cloud-creds.adoc[leveloffset=+2]
-
+include::modules/manually-rotating-cloud-creds.adoc[leveloffset=+1]
[role="_additional-resources"]
== Additional resources
-
-* xref:../../installing/installing_aws/manually-creating-iam.adoc#alternatives-to-storing-admin-secrets-in-kube-system_manually-creating-iam-aws[Alternatives to storing administrator-level secrets in the kube-system project] for AWS
-* xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#alternatives-to-storing-admin-secrets-in-kube-system_manually-creating-iam-gcp[Alternatives to storing administrator-level secrets in the kube-system project] for GCP
+* xref:../../post_installation_configuration/cluster-tasks.adoc#manually-removing-cloud-creds_post-install-cluster-tasks[Removing cloud provider credentials]
\ No newline at end of file
diff --git a/authentication/managing_cloud_provider_credentials/cco-mode-passthrough.adoc b/authentication/managing_cloud_provider_credentials/cco-mode-passthrough.adoc
index f0d19bfa73d4..c8302db17af6 100644
--- a/authentication/managing_cloud_provider_credentials/cco-mode-passthrough.adoc
+++ b/authentication/managing_cloud_provider_credentials/cco-mode-passthrough.adoc
@@ -1,12 +1,12 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="cco-mode-passthrough"]
-= Using passthrough mode
+= The Cloud Credential Operator in passthrough mode
include::_attributes/common-attributes.adoc[]
:context: cco-mode-passthrough
toc::[]
-Passthrough mode is supported for Amazon Web Services (AWS), Microsoft Azure, Google Cloud Platform (GCP), {rh-openstack-first}, {rh-virtualization-first}, and VMware vSphere.
+Passthrough mode is supported for Amazon Web Services (AWS), Microsoft Azure, Google Cloud Platform (GCP), {rh-openstack-first}, and VMware vSphere.
In passthrough mode, the Cloud Credential Operator (CCO) passes the provided cloud credential to the components that request cloud credentials. The credential must have permissions to perform the installation and complete the operations that are required by components in the cluster, but does not need to be able to create new credentials. The CCO does not attempt to create additional limited-scoped credentials in passthrough mode.
@@ -23,35 +23,24 @@ When using the CCO in passthrough mode, ensure that the credential you provide m
=== Amazon Web Services (AWS) permissions
The credential you provide for passthrough mode in AWS must have all the requested permissions for all `CredentialsRequest` CRs that are required by the version of {product-title} you are running or installing.
-To locate the `CredentialsRequest` CRs that are required, see xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[Manually creating IAM for AWS].
+To locate the `CredentialsRequest` CRs that are required, see xref:../../installing/installing_aws/installing-aws-customizations.adoc#manually-create-iam_installing-aws-customizations[Manually creating long-term credentials for AWS].
[id="passthrough-mode-permissions-azure"]
=== Microsoft Azure permissions
The credential you provide for passthrough mode in Azure must have all the requested permissions for all `CredentialsRequest` CRs that are required by the version of {product-title} you are running or installing.
-To locate the `CredentialsRequest` CRs that are required, see xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[Manually creating IAM for Azure].
+To locate the `CredentialsRequest` CRs that are required, see xref:../../installing/installing_azure/installing-azure-customizations.adoc#manually-create-iam_installing-azure-customizations[Manually creating long-term credentials for Azure].
[id="passthrough-mode-permissions-gcp"]
=== Google Cloud Platform (GCP) permissions
The credential you provide for passthrough mode in GCP must have all the requested permissions for all `CredentialsRequest` CRs that are required by the version of {product-title} you are running or installing.
-To locate the `CredentialsRequest` CRs that are required, see xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[Manually creating IAM for GCP].
+To locate the `CredentialsRequest` CRs that are required, see xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#manually-create-iam_installing-gcp-customizations[Manually creating long-term credentials for GCP].
[id="passthrough-mode-permissions-rhosp"]
=== {rh-openstack-first} permissions
To install an {product-title} cluster on {rh-openstack}, the CCO requires a credential with the permissions of a `member` user role.
-[id="passthrough-mode-permissions-rhv"]
-=== {rh-virtualization-first} permissions
-To install an {product-title} cluster on {rh-virtualization}, the CCO requires a credential with the following privileges:
-
-* `DiskOperator`
-* `DiskCreator`
-* `UserTemplateBasedVm`
-* `TemplateOwner`
-* `TemplateCreator`
-* `ClusterAdmin` on the specific cluster that is targeted for {product-title} deployment
-
[id="passthrough-mode-permissions-vsware"]
=== VMware vSphere permissions
To install an {product-title} cluster on VMware vSphere, the CCO requires a credential with the following vSphere privileges:
@@ -92,7 +81,7 @@ include::modules/admin-credentials-root-secret-formats.adoc[leveloffset=+1]
[id="passthrough-mode-maintenance"]
== Passthrough mode credential maintenance
-If `CredentialsRequest` CRs change over time as the cluster is upgraded, you must manually update the passthrough mode credential to meet the requirements. To avoid credentials issues during an upgrade, check the `CredentialsRequest` CRs in the release image for the new version of {product-title} before upgrading. To locate the `CredentialsRequest` CRs that are required for your cloud provider, see _Manually creating IAM_ for xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[AWS], xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[Azure], or xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[GCP].
+If `CredentialsRequest` CRs change over time as the cluster is upgraded, you must manually update the passthrough mode credential to meet the requirements. To avoid credentials issues during an upgrade, check the `CredentialsRequest` CRs in the release image for the new version of {product-title} before upgrading. To locate the `CredentialsRequest` CRs that are required for your cloud provider, see _Manually creating long-term credentials_ for xref:../../installing/installing_aws/installing-aws-customizations.adoc#manually-create-iam_installing-aws-customizations[AWS], xref:../../installing/installing_azure/installing-azure-customizations.adoc#manually-create-iam_installing-azure-customizations[Azure], or xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#manually-create-iam_installing-gcp-customizations[GCP].
//Rotating cloud provider credentials manually
include::modules/manually-rotating-cloud-creds.adoc[leveloffset=+2]
@@ -107,11 +96,11 @@ When using passthrough mode, each component has the same permissions used by all
After installation, you can reduce the permissions on your credential to only those that are required to run the cluster, as defined by the `CredentialsRequest` CRs in the release image for the version of {product-title} that you are using.
-To locate the `CredentialsRequest` CRs that are required for AWS, Azure, or GCP and learn how to change the permissions the CCO uses, see _Manually creating IAM_ for xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[AWS], xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[Azure], or xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[GCP].
+To locate the `CredentialsRequest` CRs that are required for AWS, Azure, or GCP and learn how to change the permissions the CCO uses, see _Manually creating long-term credentials_ for xref:../../installing/installing_aws/installing-aws-customizations.adoc#manually-create-iam_installing-aws-customizations[AWS], xref:../../installing/installing_azure/installing-azure-customizations.adoc#manually-create-iam_installing-azure-customizations[Azure], or xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#manually-create-iam_installing-gcp-customizations[GCP].
[role="_additional-resources"]
== Additional resources
-* xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[Manually creating IAM for AWS]
-* xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[Manually creating IAM for Azure]
-* xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[Manually creating IAM for GCP]
+* xref:../../installing/installing_aws/installing-aws-customizations.adoc#manually-create-iam_installing-aws-customizations[Manually creating long-term credentials for AWS]
+* xref:../../installing/installing_azure/installing-azure-customizations.adoc#manually-create-iam_installing-azure-customizations[Manually creating long-term credentials for Azure]
+* xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#manually-create-iam_installing-gcp-customizations[Manually creating long-term credentials for GCP]
diff --git a/authentication/managing_cloud_provider_credentials/cco-short-term-creds.adoc b/authentication/managing_cloud_provider_credentials/cco-short-term-creds.adoc
new file mode 100644
index 000000000000..0b9f184277a1
--- /dev/null
+++ b/authentication/managing_cloud_provider_credentials/cco-short-term-creds.adoc
@@ -0,0 +1,113 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="cco-short-term-creds"]
+= Manual mode with short-term credentials for components
+include::_attributes/common-attributes.adoc[]
+:context: cco-short-term-creds
+
+toc::[]
+
+During installation, you can configure the Cloud Credential Operator (CCO) to operate in manual mode and use the CCO utility (`ccoctl`) to implement short-term security credentials for individual components that are created and managed outside the {product-title} cluster.
+
+[NOTE]
+====
+This credentials strategy is supported for Amazon Web Services (AWS), Google Cloud Platform (GCP), and global Microsoft Azure only. The strategy must be configured during installation of a new {product-title} cluster. You cannot configure an existing cluster that uses a different credentials strategy to use this feature.
+====
+
+//todo: Should provide some more info about the benefits of this here as well. Note: Azure is not yet limited-priv, but still gets the benefit of not storing root creds on the cluster and some sort of time-based rotation
+
+Cloud providers use different terms for their implementation of this authentication method.
+
+.Short-term credentials provider terminology
+|====
+|Cloud provider |Provider nomenclature
+
+|Amazon Web Services (AWS)
+|AWS Security Token Service (STS)
+
+|Google Cloud Platform (GCP)
+|GCP Workload Identity
+
+|Global Microsoft Azure
+|Azure AD Workload Identity
+
+|====
+
+[id="cco-short-term-creds-aws_{context}"]
+== AWS Security Token Service
+
+In manual mode with STS, the individual {product-title} cluster components use the AWS Security Token Service (STS) to assign components IAM roles that provide short-term, limited-privilege security credentials. These credentials are associated with IAM roles that are specific to each component that makes AWS API calls.
+
+[role="_additional-resources"]
+.Additional resources
+* xref:../../installing/installing_aws/installing-aws-customizations.adoc#installing-aws-with-short-term-creds_installing-aws-customizations[Configuring an AWS cluster to use short-term credentials]
+
+//AWS Security Token Service authentication process
+include::modules/cco-short-term-creds-auth-flow-aws.adoc[leveloffset=+2]
+
+//AWS component secret formats
+include::modules/cco-short-term-creds-format-aws.adoc[leveloffset=+2]
+
+//AWS component secret permissions requirements
+include::modules/cco-short-term-creds-component-permissions-aws.adoc[leveloffset=+2]
+
+//OLM-managed Operator support for authentication with AWS STS
+include::modules/cco-short-term-creds-aws-olm.adoc[leveloffset=+2]
+
+[role="_additional-resources"]
+.Additional resources
+* xref:../../operators/operator_sdk/osdk-token-auth.adoc#osdk-cco-aws-sts_osdk-token-auth[CCO-based workflow for OLM-managed Operators with AWS STS]
+
+[id="cco-short-term-creds-gcp_{context}"]
+== GCP Workload Identity
+
+In manual mode with GCP Workload Identity, the individual {product-title} cluster components use the GCP workload identity provider to allow components to impersonate GCP service accounts using short-term, limited-privilege credentials.
+
+[role="_additional-resources"]
+.Additional resources
+* xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-with-short-term-creds_installing-gcp-customizations[Configuring a GCP cluster to use short-term credentials]
+
+//GCP Workload Identity authentication process
+include::modules/cco-short-term-creds-auth-flow-gcp.adoc[leveloffset=+2]
+
+//GCP component secret formats
+include::modules/cco-short-term-creds-format-gcp.adoc[leveloffset=+2]
+
+//GCP component secret permissions requirements (placeholder)
+//include::modules/cco-short-term-creds-component-permissions-gcp.adoc[leveloffset=+2]
+
+[id="cco-short-term-creds-azure_{context}"]
+== Azure AD Workload Identity
+
+In manual mode with Azure AD Workload Identity, the individual {product-title} cluster components use the Azure AD workload identity provider to assign components short-term security credentials.
+
+[role="_additional-resources"]
+.Additional resources
+* xref:../../installing/installing_azure/installing-azure-customizations.adoc#installing-azure-with-short-term-creds_installing-azure-customizations[Configuring a global Microsoft Azure cluster to use short-term credentials]
+
+//Azure AD Workload Identity authentication process
+include::modules/cco-short-term-creds-auth-flow-azure.adoc[leveloffset=+2]
+
+//Azure component secret formats
+include::modules/cco-short-term-creds-format-azure.adoc[leveloffset=+2]
+
+//Azure component secret permissions requirements
+include::modules/cco-short-term-creds-component-permissions-azure.adoc[leveloffset=+2]
+
+//OLM-managed Operator support for authentication with Azure AD Workload Identity
+include::modules/cco-short-term-creds-azure-olm.adoc[leveloffset=+2]
+
+////
+// Azure will need a link off to OLM docs like AWS when ready.
+[role="_additional-resources"]
+.Additional resources
+* xref:../../operators/operator_sdk/osdk-token-auth.adoc#osdk-cco-aws-sts_osdk-token-auth[CCO-based workflow for OLM-managed Operators with AWS STS]
+////
+
+[role="_additional-resources"]
+[id="additional-resources_{context}"]
+== Additional resources
+
+* xref:../../installing/installing_aws/installing-aws-customizations.adoc#installing-aws-with-short-term-creds_installing-aws-customizations[Configuring an AWS cluster to use short-term credentials]
+* xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-with-short-term-creds_installing-gcp-customizations[Configuring a GCP cluster to use short-term credentials]
+* xref:../../installing/installing_azure/installing-azure-customizations.adoc#installing-azure-with-short-term-creds_installing-azure-customizations[Configuring a global Microsoft Azure cluster to use short-term credentials]
+* xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials]
\ No newline at end of file
diff --git a/osd_cluster_admin/osd-admin-roles.adoc b/authentication/osd-admin-roles.adoc
similarity index 90%
rename from osd_cluster_admin/osd-admin-roles.adoc
rename to authentication/osd-admin-roles.adoc
index 12184ce21acb..f600adf8ea16 100644
--- a/osd_cluster_admin/osd-admin-roles.adoc
+++ b/authentication/osd-admin-roles.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="osd-admin-roles"]
= Managing administration roles and users
include::_attributes/attributes-openshift-dedicated.adoc[]
diff --git a/authentication/remove-kubeadmin.adoc b/authentication/remove-kubeadmin.adoc
index 7557e91e4820..19addc10ca9d 100644
--- a/authentication/remove-kubeadmin.adoc
+++ b/authentication/remove-kubeadmin.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="removing-kubeadmin"]
= Removing the kubeadmin user
include::_attributes/common-attributes.adoc[]
diff --git a/authentication/sd-configuring-identity-providers.adoc b/authentication/sd-configuring-identity-providers.adoc
new file mode 100644
index 000000000000..1a55e98fd52e
--- /dev/null
+++ b/authentication/sd-configuring-identity-providers.adoc
@@ -0,0 +1,33 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="sd-configuring-identity-providers"]
+= Configuring identity providers
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: sd-configuring-identity-providers
+
+toc::[]
+
+After your {product-title} cluster is created, you must configure identity providers to determine how users log in to access the cluster.
+
+ifdef::openshift-rosa[]
+The following topics describe how to configure an identity provider using {cluster-manager} console. Alternatively, you can use the ROSA CLI (`rosa`) to configure an identity provider and access the cluster.
+endif::openshift-rosa[]
+
+include::modules/understanding-idp.adoc[leveloffset=+1]
+include::modules/identity-provider-parameters.adoc[leveloffset=+2]
+include::modules/config-github-idp.adoc[leveloffset=+1]
+include::modules/config-gitlab-idp.adoc[leveloffset=+1]
+include::modules/config-google-idp.adoc[leveloffset=+1]
+include::modules/config-ldap-idp.adoc[leveloffset=+1]
+include::modules/config-openid-idp.adoc[leveloffset=+1]
+include::modules/config-htpasswd-idp.adoc[leveloffset=+1]
+ifdef::openshift-dedicated[]
+include::modules/access-cluster.adoc[leveloffset=+1]
+endif::openshift-dedicated[]
+
+ifdef::openshift-rosa[]
+[id="additional-resources-cluster-access-sts"]
+[role="_additional-resources"]
+== Additional resources
+* xref:../rosa_install_access_delete_clusters/rosa-sts-accessing-cluster.adoc#rosa-sts-accessing-cluster[Accessing a cluster]
+* xref:../rosa_getting_started/rosa-sts-getting-started-workflow.adoc#rosa-sts-understanding-the-deployment-workflow[Understanding the ROSA with STS deployment workflow]
+endif::openshift-rosa[]
diff --git a/authentication/tokens-scoping.adoc b/authentication/tokens-scoping.adoc
index 126481d224da..291b44877b99 100644
--- a/authentication/tokens-scoping.adoc
+++ b/authentication/tokens-scoping.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="tokens-scoping"]
= Scoping tokens
include::_attributes/common-attributes.adoc[]
diff --git a/authentication/understanding-and-creating-service-accounts.adoc b/authentication/understanding-and-creating-service-accounts.adoc
index 86fb149c7c2e..ed1854708470 100644
--- a/authentication/understanding-and-creating-service-accounts.adoc
+++ b/authentication/understanding-and-creating-service-accounts.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="understanding-and-creating-service-accounts"]
= Understanding and creating service accounts
include::_attributes/common-attributes.adoc[]
diff --git a/authentication/understanding-and-managing-pod-security-admission.adoc b/authentication/understanding-and-managing-pod-security-admission.adoc
index 4bf7dcd953c2..a64a065175bc 100644
--- a/authentication/understanding-and-managing-pod-security-admission.adoc
+++ b/authentication/understanding-and-managing-pod-security-admission.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="understanding-and-managing-pod-security-admission"]
= Understanding and managing pod security admission
include::_attributes/common-attributes.adoc[]
@@ -8,17 +8,33 @@ toc::[]
Pod security admission is an implementation of the link:https://kubernetes.io/docs/concepts/security/pod-security-standards/[Kubernetes pod security standards]. Use pod security admission to restrict the behavior of pods.
-// Security context constraint synchronization with pod security standards
+// About pod security admission
+include::modules/security-context-constraints-psa-about.adoc[leveloffset=+1]
+
+// About pod security admission synchronization
include::modules/security-context-constraints-psa-synchronization.adoc[leveloffset=+1]
+// Pod security admission synchronization namespace exclusions
+include::modules/security-context-constraints-psa-sync-exclusions.adoc[leveloffset=+2]
+
// Controlling pod security admission synchronization
include::modules/security-context-constraints-psa-opting.adoc[leveloffset=+1]
+.Additional resources
+
+* xref:../authentication/understanding-and-managing-pod-security-admission.adoc#security-context-constraints-psa-sync-exclusions_understanding-and-managing-pod-security-admission[Pod security admission synchronization namespace exclusions]
+
+// Configuring pod security admission for a namespace
+include::modules/security-context-constraints-psa-label.adoc[leveloffset=+1]
+
// About pod security admission alerts
include::modules/security-context-constraints-psa-rectifying.adoc[leveloffset=+1]
+// OSD and ROSA dedicated-admin users cannot use the must-gather tool.
+ifndef::openshift-dedicated,openshift-rosa[]
// Identifying pod security violations
include::modules/security-context-constraints-psa-alert-eval.adoc[leveloffset=+2]
+endif::openshift-dedicated,openshift-rosa[]
[role="_additional-resources"]
[id="additional-resources_managing-pod-security-admission"]
diff --git a/authentication/understanding-authentication.adoc b/authentication/understanding-authentication.adoc
index 6f438338cc28..975a771f0169 100644
--- a/authentication/understanding-authentication.adoc
+++ b/authentication/understanding-authentication.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="understanding-authentication"]
= Understanding authentication
include::_attributes/common-attributes.adoc[]
diff --git a/authentication/understanding-identity-provider.adoc b/authentication/understanding-identity-provider.adoc
index 1c3ee54695a3..018ae85bc88e 100644
--- a/authentication/understanding-identity-provider.adoc
+++ b/authentication/understanding-identity-provider.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="understanding-identity-provider"]
= Understanding identity provider configuration
include::_attributes/common-attributes.adoc[]
diff --git a/authentication/using-rbac.adoc b/authentication/using-rbac.adoc
index 83d9ee01c65d..3f3145986574 100644
--- a/authentication/using-rbac.adoc
+++ b/authentication/using-rbac.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="using-rbac"]
= Using RBAC to define and apply permissions
include::_attributes/common-attributes.adoc[]
@@ -18,16 +18,27 @@ include::modules/rbac-viewing-local-roles.adoc[leveloffset=+1]
include::modules/rbac-adding-roles.adoc[leveloffset=+1]
+ifdef::openshift-enterprise,openshift-webscale,openshift-origin,openshift-dedicated,openshift-rosa[]
include::modules/rbac-creating-local-role.adoc[leveloffset=+1]
-ifdef::openshift-enterprise,openshift-webscale,openshift-origin[]
include::modules/rbac-creating-cluster-role.adoc[leveloffset=+1]
endif::[]
include::modules/rbac-local-role-binding-commands.adoc[leveloffset=+1]
-ifdef::openshift-enterprise,openshift-webscale,openshift-origin[]
+ifdef::openshift-enterprise,openshift-webscale,openshift-origin,openshift-dedicated,openshift-rosa[]
include::modules/rbac-cluster-role-binding-commands.adoc[leveloffset=+1]
+endif::[]
+ifndef::openshift-dedicated,openshift-rosa[]
include::modules/rbac-creating-cluster-admin.adoc[leveloffset=+1]
-endif::[]
+endif::openshift-dedicated,openshift-rosa[]
+
+ifdef::openshift-rosa[]
+include::modules/rosa-create-cluster-admins.adoc[leveloffset=+1]
+include::modules/rosa-create-dedicated-cluster-admins.adoc[leveloffset=+1]
+endif::openshift-rosa[]
+
+ifdef::openshift-dedicated[]
+include::modules/osd-grant-admin-privileges.adoc[leveloffset=+1]
+endif::openshift-dedicated[]
diff --git a/authentication/using-service-accounts-as-oauth-client.adoc b/authentication/using-service-accounts-as-oauth-client.adoc
index e6f0834fd958..c8fadd1ca416 100644
--- a/authentication/using-service-accounts-as-oauth-client.adoc
+++ b/authentication/using-service-accounts-as-oauth-client.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="using-service-accounts-as-oauth-client"]
= Using a service account as an OAuth client
include::_attributes/common-attributes.adoc[]
diff --git a/authentication/using-service-accounts-in-applications.adoc b/authentication/using-service-accounts-in-applications.adoc
index 73527664f998..153777c25491 100644
--- a/authentication/using-service-accounts-in-applications.adoc
+++ b/authentication/using-service-accounts-in-applications.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="using-service-accounts"]
= Using service accounts in applications
include::_attributes/common-attributes.adoc[]
@@ -10,13 +10,13 @@ include::modules/service-accounts-overview.adoc[leveloffset=+1]
include::modules/service-accounts-default.adoc[leveloffset=+1]
-// remove these links for 4.12+
+include::modules/service-account-auto-secret-removed.adoc[leveloffset=+2]
.Additional resources
-* For information about requesting bound service account tokens, see xref:../authentication/bound-service-account-tokens.html#bound-sa-tokens-configuring_bound-service-account-tokens[Configuring bound service account tokens using volume projection]
+* For information about requesting bound service account tokens, see xref:../authentication/bound-service-account-tokens.adoc#bound-sa-tokens-configuring_bound-service-account-tokens[Configuring bound service account tokens using volume projection].
-* For information about creating a service account token secret, see xref:../nodes/pods/nodes-pods-secrets.html#nodes-pods-secrets-creating-sa_nodes-pods-secrets[Creating a service account token secret].
+* For information about creating a service account token secret, see xref:../nodes/pods/nodes-pods-secrets.adoc#nodes-pods-secrets-creating-sa_nodes-pods-secrets[Creating a service account token secret].
include::modules/service-accounts-creating.adoc[leveloffset=+1]
diff --git a/autopreview.sh b/autopreview.sh
index 724e19385b24..428c712821f4 100755
--- a/autopreview.sh
+++ b/autopreview.sh
@@ -8,61 +8,69 @@ GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
-USERNAME=${TRAVIS_PULL_REQUEST_SLUG::-15}
+# Check if it is a PR
+if [[ "$TRAVIS_PULL_REQUEST" == "false" ]]; then
+ echo -e "${YELLOW}❗🙅♀️ Not a Pull request. Skipping the preview.${NC}"
+ exit 0
+fi
-if [[ "$USERNAME" == "openshift-cherrypick-robot" ]]; then
+# Check if slug is empty
+if [[ -z "$TRAVIS_PULL_REQUEST_SLUG" ]]; then
+ echo -e "${YELLOW}🤖 Slug is empty this is a push build. Skipping the preview.${NC}"
+ exit 0
+fi
+
+# Check if the slug is "openshift-cherrypick-robot"
+if [[ "${TRAVIS_PULL_REQUEST_SLUG::-15}" == "openshift-cherrypick-robot" ]]; then
echo -e "${YELLOW}🤖 PR by openshift-cherrypick-robot. Skipping the preview.${NC}"
exit 0
fi
-if [[ "$TRAVIS_PULL_REQUEST" ]]; then
- # Check if modified files meet the conditions
- COMMIT_HASH="$(git rev-parse @~)"
- modified_files=$(git diff --name-only "$COMMIT_HASH")
- send_request=false
+# Check if modified files meet the conditions
+COMMIT_HASH="$(git rev-parse @~)"
+modified_files=$(git diff --name-only "$COMMIT_HASH")
+should_send_request() {
for file in $modified_files; do
- if [[ $file == *.adoc || $file == "_topic_map.yml" || $file == "_distro_map.yml" ]]; then
- send_request=true
- break
+ if [[ $file == *.adoc || $file == "_topic_map.yml" || $file == "_distro_map.yml" || $file == "_topic_maps/"* ]]; then
+ return 0
fi
done
+ return 1
+}
- if [ "$send_request" = true ]; then
- # Build the JSON
- json_data=$(
- cat </dev/null; then
- echo -e "${RED}❌😔 Curl request failed: Invalid data!${NC}"
- echo -e "${YELLOW}$json_data${NC}"
- exit 1
- else
- echo -e "${GREEN}✅🥳 $response${NC}"
- fi
- else
- echo -e "${RED}❌😬 Curl request failed: $response${NC}"
+ # Send the curl request
+ if response=$(curl -s -X POST -H "Content-Type: application/json" --data "$json_data" https://ocpdocs-preview-receiver.vercel.app/api/buildPreview); then
+ if echo "$response" | jq -e '.message == "Invalid data!"' >/dev/null; then
+ echo -e "${RED}❌😔 Curl request failed: Invalid data!${NC}"
echo -e "${YELLOW}$json_data${NC}"
exit 1
+ else
+ echo -e "${GREEN}✅🥳 $response${NC}"
fi
-
- echo -e "${GREEN}🚀🎉 Request sent successfully!${NC}"
else
- echo -e "${YELLOW}⚠️🤔 No .adoc files, _topic_map.yml, or _distro_map.yml modified. Skipping the preview.${NC}"
+ echo -e "${RED}❌😬 Curl request failed: $response${NC}"
+ echo -e "${YELLOW}$json_data${NC}"
+ exit 1
fi
+
+ echo -e "${GREEN}🚀🎉 Request sent successfully!${NC}"
else
- echo -e "${YELLOW}❗🙅♀️ Not a Pull request. Skipping the preview.${NC}"
-fi
+ echo -e "${YELLOW}⚠️🤔 No .adoc files, _topic_map.yml, or _distro_map.yml modified. Skipping the preview.${NC}"
+fi
\ No newline at end of file
diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc
index 64f83e388436..0d70f3b70b3a 100644
--- a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc
+++ b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc
@@ -1,129 +1,57 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="backing-up-applications"]
= Backing up applications
include::_attributes/common-attributes.adoc[]
+include::_attributes/attributes-openshift-dedicated.adoc[]
:context: backing-up-applications
toc::[]
-You back up applications by creating a `Backup` custom resource (CR). See xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-creating-backup-cr_backing-up-applications[Creating a Backup CR].
+You back up applications by creating a `Backup` custom resource (CR). See xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-creating-backup-cr.adoc#oadp-creating-backup-cr-doc[Creating a Backup CR].
-The `Backup` CR creates backup files for Kubernetes resources and internal images, on S3 object storage, and snapshots for persistent volumes (PVs), if the cloud provider uses a native snapshot API or the Container Storage Interface (CSI) to create snapshots, such as {rh-storage} 4.
+* The `Backup` CR creates backup files for Kubernetes resources and internal images on S3 object storage.
+* If your cloud provider has a native snapshot API or supports CSI snapshots, the `Backup` CR backs up persistent volumes (PVs) by creating snapshots. For more information about working with CSI snapshots, see xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-backing-up-pvs-csi-doc.adoc#oadp-backing-up-pvs-csi-doc[Backing up persistent volumes with CSI snapshots].
For more information about CSI volume snapshots, see xref:../../../storage/container_storage_interface/persistent-storage-csi-snapshots.adoc#persistent-storage-csi-snapshots[CSI volume snapshots].
-:FeatureName: The `CloudStorage` API for S3 storage
+:FeatureName: The `CloudStorage` API, which automates the creation of a bucket for object storage,
include::snippets/technology-preview.adoc[]
-* If your cloud provider has a native snapshot API or supports CSI snapshots, the `Backup` CR backs up persistent volumes (PVs) by creating snapshots. For more information about working with CSI snapshots, see xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-backing-up-pvs-csi_backing-up-applications[Backing up persistent volumes with CSI snapshots].
-
-* If your cloud provider does not support snapshots or if your applications are on NFS data volumes, you can create backups by using Restic. See xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-backing-up-applications-restic_backing-up-applications[Backing up applications with Restic].
-
-[IMPORTANT]
-====
-The {oadp-first} does not support backing up volume snapshots that were created by other software.
+[NOTE]
====
+The `CloudStorage` API is a Technology Preview feature when you use a `CloudStorage` object and want OADP to use the `CloudStorage` API to automatically create an S3 bucket for use as a `BackupStorageLocation`.
-You can create backup hooks to run commands before or after the backup operation. See xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-creating-backup-hooks_backing-up-applications[Creating backup hooks].
-
-You can schedule backups by creating a `Schedule` CR instead of a `Backup` CR. See xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-scheduling-backups_backing-up-applications[Scheduling backups].
-
-include::modules/oadp-creating-backup-cr.adoc[leveloffset=+1]
-include::modules/oadp-backing-up-pvs-csi.adoc[leveloffset=+1]
-include::modules/oadp-backing-up-applications-restic.adoc[leveloffset=+1]
-include::modules/oadp-using-data-mover-for-csi-snapshots.adoc[leveloffset=+1]
-
-[id="oadp-12-data-mover-ceph"]
-== Using OADP 1.2 Data Mover with Ceph storage
-
-You can use OADP 1.2 Data Mover to backup and restore application data for clusters that use CephFS, CephRBD, or both.
+The `CloudStorage` API supports manually creating a `BackupStorageLocation` object by specifying an existing S3 bucket. The `CloudStorage` API that creates an S3 bucket automatically is currently only enabled for AWS S3 storage.
+====
-OADP 1.2 Data Mover leverages Ceph features that support large-scale environments. One of these is the shallow copy method, which is available for {product-title} 4.12 and later. This feature supports backing up and restoring `StorageClass` and `AccessMode` resources other than what is found on the source persistent volume claim (PVC).
+* If your cloud provider does not support snapshots or if your applications are on NFS data volumes, you can create backups by using Kopia or Restic. See xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-backing-up-applications-restic-doc.adoc#oadp-backing-up-applications-restic-doc[Backing up applications with File System Backup: Kopia or Restic].
[IMPORTANT]
====
-The CephFS shallow copy feature is a back up feature. It is not part of restore operations.
+The {oadp-first} does not support backing up volume snapshots that were created by other software.
====
-include::modules/oadp-ceph-prerequisites.adoc[leveloffset=+2]
-
-[id="defining-crs-for-12-data-mover"]
-=== Defining custom resources for use with OADP 1.2 Data Mover
-
-When you install {rh-storage-first}, it automatically creates default CephFS and a CephRBD `StorageClass` and `VolumeSnapshotClass` custom resources (CRs). You must define these CRs for use with OpenShift API for Data Protection (OADP) 1.2 Data Mover.
-
-After you define the CRs, you must make several other changes to your environment before you can perform your back up and restore operations.
-
-include::modules/oadp-ceph-preparing-cephfs-crs.adoc[leveloffset=+2]
-include::modules/oadp-ceph-preparing-cephrbd-crs.adoc[leveloffset=+2]
-include::modules/oadp-ceph-preparing-crs-additional.adoc[leveloffset=+2]
-
-[id="oadp-ceph-back-up-restore-cephfs"]
-=== Backing up and restoring data using OADP 1.2 Data Mover and CephFS storage
-
-You can use OpenShift API for Data Protection (OADP) 1.2 Data Mover to back up and restore data using CephFS storage by enabling the shallow copy feature of CephFS.
-
-include::snippets/oadp-ceph-cr-prerequisites.adoc[]
+You can create backup hooks to run commands before or after the backup operation. See xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-creating-backup-hooks-doc.adoc#oadp-creating-backup-hooks-doc[Creating backup hooks].
-:context: !backing-up-applications
+You can schedule backups by creating a `Schedule` CR instead of a `Backup` CR. See xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-scheduling-backups-doc.adoc#oadp-scheduling-backups-doc[Scheduling backups using Schedule CR]].
-:context: cephfs
+// include::modules/oadp-creating-backup-cr.adoc[leveloffset=+1]
+// include::modules/oadp-backing-up-pvs-csi.adoc[leveloffset=+1]
+// include::modules/oadp-backing-up-applications-restic.adoc[leveloffset=+1]
-include::modules/oadp-ceph-cephfs-back-up-dba.adoc[leveloffset=+2]
-include::modules/oadp-ceph-cephfs-back-up.adoc[leveloffset=+2]
-include::modules/oadp-ceph-cephfs-restore.adoc[leveloffset=+2]
+[id="known-issues-backing-up-applications"]
+== Known issues
-[id="oadp-ceph-split"]
-=== Backing up and restoring data using OADP 1.2 Data Mover and split volumes (CephFS and Ceph RBD)
+{ocp} {product-version} enforces a pod security admission (PSA) policy that can hinder the readiness of pods during a Restic restore process.
-You can use OpenShift API for Data Protection (OADP) 1.2 Data Mover to back up and restore data in an environment that has _split volumes_, that is, an environment that uses both CephFS and CephRBD.
+This issue has been resolved in the OADP 1.1.6 and OADP 1.2.2 releases, therefore it is recommended that users upgrade to these releases.
-include::snippets/oadp-ceph-cr-prerequisites.adoc[]
-
-:context: !cephfs
-
-:context: split
-
-include::modules/oadp-ceph-split-back-up-dba.adoc[leveloffset=+2]
-include::modules/oadp-ceph-cephfs-back-up.adoc[leveloffset=+2]
-include::modules/oadp-ceph-cephfs-restore.adoc[leveloffset=+2]
-
-:context: !split
-
-:context: backing-up-applications
-
-[id="oadp-cleaning-up-after-data-mover-1-1-backup"]
-== Cleaning up after a backup using OADP 1.1 Data Mover
-
-For OADP 1.1 Data Mover, you must perform a data cleanup after you perform a backup.
-
-The cleanup consists of deleting the following resources:
-
-* Snapshots in a bucket
-* Cluster resources
-* Volume snapshot backups (VSBs) after a backup procedure that is either run by a schedule or is run repetitively
-
-include::modules/oadp-cleaning-up-after-data-mover-snapshots.adoc[leveloffset=+2]
-
-[id="deleting-cluster-resources"]
-=== Deleting cluster resources
-
-OADP 1.1 Data Mover might leave cluster resources whether or not it successfully backs up your container storage interface (CSI) volume snapshots to a remote object store.
-
-include::modules/oadp-deleting-cluster-resources-following-success.adoc[leveloffset=+3]
-include::modules/oadp-deleting-cluster-resources-following-failure.adoc[leveloffset=+3]
-
-include::modules/oadp-vsb-cleanup-after-scheduler.adoc[leveloffset=+2]
+For more information, see xref:../../../backup_and_restore/application_backup_and_restore/troubleshooting.adoc#oadp-restic-restore-failing-psa-policy_oadp-troubleshooting[Restic restore partially failing on OCP 4.15 due to changed PSA policy].
[role="_additional-resources"]
.Additional resources
* xref:../../../operators/admin/olm-adding-operators-to-cluster.adoc#olm-installing-operators-from-operatorhub_olm-adding-operators-to-a-cluster[Installing Operators on clusters for administrators]
* xref:../../../operators/user/olm-installing-operators-in-namespace.adoc#olm-installing-operators-in-namespace[Installing Operators in namespaces for non-administrators]
-include::modules/oadp-creating-backup-hooks.adoc[leveloffset=+1]
-include::modules/oadp-scheduling-backups.adoc[leveloffset=+1]
-include::modules/oadp-deleting-backups.adoc[leveloffset=+1]
-[role="_additional-resources"]
-.Additional resources
-* xref:../../../backup_and_restore/application_backup_and_restore/troubleshooting.adoc#velero-obtaining-by-downloading_oadp-troubleshooting[Downloading the Velero CLI tool]
+
diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-about-kopia.adoc b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-about-kopia.adoc
new file mode 100644
index 000000000000..61b3e0947f5b
--- /dev/null
+++ b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-about-kopia.adoc
@@ -0,0 +1,43 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="oadp-about-kopia"]
+= About Kopia
+include::_attributes/common-attributes.adoc[]
+:context: oadp-about-kopia
+
+toc::[]
+
+Kopia is a fast and secure open-source backup and restore tool that allows you to create encrypted snapshots of your data and save the snapshots to remote or cloud storage of your choice.
+
+Kopia supports network and local storage locations, and many cloud or remote storage locations, including:
+
+* Amazon S3 and any cloud storage that is compatible with S3
+* Azure Blob Storage
+* Google Cloud Storage platform
+
+Kopia uses content-addressable storage for snapshots:
+
+* Snapshots are always incremental; data that is already included in previous snapshots is not re-uploaded to the repository. A file is only uploaded to the repository again if it is modified.
+* Stored data is deduplicated; if multiple copies of the same file exist, only one of them is stored.
+* If files are moved or renamed, Kopia can recognize that they have the same content and does not upload them again.
+
+
+[id="oadp-kopia-integration"]
+== OADP integration with Kopia
+
+OADP 1.3 supports Kopia as the backup mechanism for pod volume backup in addition to Restic. You must choose one or the other at installation by setting the `uploaderType` field in the `DataProtectionApplication` custom resource (CR). The possible values are `restic` or `kopia`. If you do not specify an `uploaderType`, OADP 1.3 defaults to using Kopia as the backup mechanism. The data is written to and read from a unified repository.
+
+The following example shows a `DataProtectionApplication` CR configured for using Kopia:
+
+[source,yaml]
+----
+apiVersion: oadp.openshift.io/v1alpha1
+kind: DataProtectionApplication
+metadata:
+ name: dpa-sample
+spec:
+ configuration:
+ nodeAgent:
+ enable: true
+ uploaderType: kopia
+# ...
+----
\ No newline at end of file
diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-backing-up-applications-restic-doc.adoc b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-backing-up-applications-restic-doc.adoc
new file mode 100644
index 000000000000..8451f1d4fddf
--- /dev/null
+++ b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-backing-up-applications-restic-doc.adoc
@@ -0,0 +1,63 @@
+
+:_mod-docs-content-type: ASSEMBLY
+[id="oadp-backing-up-applications-restic-doc"]
+= Backing up applications with File System Backup: Kopia or Restic
+include::_attributes/common-attributes.adoc[]
+:context: backing-up-applications
+
+toc::[]
+
+You can use OADP to back up and restore Kubernetes volumes attached to pods from the file system of the volumes. This process is called File System Backup (FSB) or Pod Volume Backup (PVB). It is accomplished by using modules from the open source backup tools Restic or Kopia.
+
+If your cloud provider does not support snapshots or if your applications are on NFS data volumes, you can create backups by using FSB.
+
+[NOTE]
+====
+link:https://restic.net/[Restic] is installed by the OADP Operator by default. If you prefer, you can install link:https://kopia.io/[Kopia] instead.
+====
+
+FSB integration with OADP provides a solution for backing up and restoring almost any type of Kubernetes volumes. This integration is an additional capability of OADP and is not a replacement for existing functionality.
+
+You back up Kubernetes resources, internal images, and persistent volumes with Kopia or Restic by editing the `Backup` custom resource (CR).
+
+You do not need to specify a snapshot location in the `DataProtectionApplication` CR.
+
+[NOTE]
+====
+In OADP version 1.3 and later, you can use either Kopia or Restic for backing up applications.
+
+For the Built-in DataMover, you must use Kopia.
+
+In OADP version 1.2 and earlier, you can only use Restic for backing up applications.
+====
+
+[IMPORTANT]
+====
+FSB does not support backing up `hostPath` volumes. For more information, see link:https://velero.io/docs/v1.12/file-system-backup/#limitations[FSB limitations].
+====
+
+.Prerequisites
+
+* You must install the OpenShift API for Data Protection (OADP) Operator.
+* You must not disable the default `nodeAgent` installation by setting `spec.configuration.nodeAgent.enable` to `false` in the `DataProtectionApplication` CR.
+* You must select Kopia or Restic as the uploader by setting `spec.configuration.nodeAgent.uploaderType` to `kopia` or `restic` in the `DataProtectionApplication` CR.
+* The `DataProtectionApplication` CR must be in a `Ready` state.
+
+.Procedure
+
+* Create the `Backup` CR, as in the following example:
++
+[source,yaml]
+----
+apiVersion: velero.io/v1
+kind: Backup
+metadata:
+ name:
+ labels:
+ velero.io/storage-location: default
+ namespace: openshift-adp
+spec:
+ defaultVolumesToFsBackup: true <1>
+...
+----
+<1> In OADP version 1.2 and later, add the `defaultVolumesToFsBackup: true` setting within the `spec` block. In OADP version 1.1, add `defaultVolumesToRestic: true`.
diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-backing-up-pvs-csi-doc.adoc b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-backing-up-pvs-csi-doc.adoc
new file mode 100644
index 000000000000..fdb6f025518d
--- /dev/null
+++ b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-backing-up-pvs-csi-doc.adoc
@@ -0,0 +1,34 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="oadp-backing-up-pvs-csi-doc"]
+= Backing up persistent volumes with CSI snapshots
+include::_attributes/common-attributes.adoc[]
+:context: backing-up-applications
+
+toc::[]
+
+You back up persistent volumes with Container Storage Interface (CSI) snapshots by editing the `VolumeSnapshotClass` custom resource (CR) of the cloud storage before you create the `Backup` CR, see xref:../../../storage/container_storage_interface/persistent-storage-csi-snapshots.adoc#persistent-storage-csi-snapshots-overview_persistent-storage-csi-snapshots[CSI volume snapshots].
+
+For more information, see xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-creating-backup-cr.adoc#oadp-creating-backup-cr-doc[Creating a Backup CR].
+
+.Prerequisites
+
+* The cloud provider must support CSI snapshots.
+* You must enable CSI in the `DataProtectionApplication` CR.
+
+.Procedure
+
+* Add the `metadata.labels.velero.io/csi-volumesnapshot-class: "true"` key-value pair to the `VolumeSnapshotClass` CR:
++
+[source,yaml,subs="attributes+"]
+----
+apiVersion: snapshot.storage.k8s.io/v1
+kind: VolumeSnapshotClass
+metadata:
+ name:
+ labels:
+ velero.io/csi-volumesnapshot-class: "true"
+driver:
+deletionPolicy: Retain
+----
+
+You can now create a `Backup` CR.
diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-creating-backup-cr.adoc b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-creating-backup-cr.adoc
new file mode 100644
index 000000000000..13a15aad3068
--- /dev/null
+++ b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-creating-backup-cr.adoc
@@ -0,0 +1,88 @@
+// Module included in the following assemblies:
+//
+// * backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc
+
+:_mod-docs-content-type: PROCEDURE
+[id="oadp-creating-backup-cr-doc"]
+= Creating a Backup CR
+include::_attributes/common-attributes.adoc[]
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: backing-up-applications
+
+toc::[]
+
+You back up Kubernetes images, internal images, and persistent volumes (PVs) by creating a `Backup` custom resource (CR).
+
+.Prerequisites
+
+* You must install the OpenShift API for Data Protection (OADP) Operator.
+* The `DataProtectionApplication` CR must be in a `Ready` state.
+* Backup location prerequisites:
+** You must have S3 object storage configured for Velero.
+** You must have a backup location configured in the `DataProtectionApplication` CR.
+* Snapshot location prerequisites:
+** Your cloud provider must have a native snapshot API or support Container Storage Interface (CSI) snapshots.
+** For CSI snapshots, you must create a `VolumeSnapshotClass` CR to register the CSI driver.
+** You must have a volume location configured in the `DataProtectionApplication` CR.
+
+.Procedure
+
+. Retrieve the `backupStorageLocations` CRs by entering the following command:
+
++
+[source,terminal]
+----
+$ oc get backupStorageLocations -n openshift-adp
+----
++
+.Example output
++
+[source,terminal]
+----
+NAMESPACE NAME PHASE LAST VALIDATED AGE DEFAULT
+openshift-adp velero-sample-1 Available 11s 31m
+----
+
+. Create a `Backup` CR, as in the following example:
++
+[source,yaml]
+----
+apiVersion: velero.io/v1
+kind: Backup
+metadata:
+ name:
+ labels:
+ velero.io/storage-location: default
+ namespace: openshift-adp
+spec:
+ hooks: {}
+ includedNamespaces:
+ - <1>
+ includedResources: [] <2>
+ excludedResources: [] <3>
+ storageLocation: <4>
+ ttl: 720h0m0s
+ labelSelector: <5>
+ matchLabels:
+ app=
+ app=
+ app=
+ orLabelSelectors: <6>
+ - matchLabels:
+ app=
+ app=
+ app=
+----
+<1> Specify an array of namespaces to back up.
+<2> Optional: Specify an array of resources to include in the backup. Resources might be shortcuts (for example, 'po' for 'pods') or fully-qualified. If unspecified, all resources are included.
+<3> Optional: Specify an array of resources to exclude from the backup. Resources might be shortcuts (for example, 'po' for 'pods') or fully-qualified.
+<4> Specify the name of the `backupStorageLocations` CR.
+<5> Map of {key,value} pairs of backup resources that have *all* the specified labels.
+<6> Map of {key,value} pairs of backup resources that have *one or more* of the specified labels.
+
+. Verify that the status of the `Backup` CR is `Completed`:
++
+[source,terminal]
+----
+$ oc get backup -n openshift-adp -o jsonpath='{.status.phase}'
+----
diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-creating-backup-hooks-doc.adoc b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-creating-backup-hooks-doc.adoc
new file mode 100644
index 000000000000..dd98c51f3bfd
--- /dev/null
+++ b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-creating-backup-hooks-doc.adoc
@@ -0,0 +1,62 @@
+:_mod-docs-content-type: PROCEDURE
+[id="oadp-creating-backup-hooks-doc"]
+= Creating backup hooks
+include::_attributes/common-attributes.adoc[]
+:context: backing-up-applications
+
+toc::[]
+
+When performing a backup, it is possible to specify one or more commands to execute in a container within a pod, based on the pod being backed up.
+
+The commands can be configured to performed before any custom action processing (_Pre_ hooks), or after all custom actions have been completed and any additional items specified by the custom action have been backed up (_Post_ hooks).
+
+You create backup hooks to run commands in a container in a pod by editing the `Backup` custom resource (CR).
+
+.Procedure
+
+* Add a hook to the `spec.hooks` block of the `Backup` CR, as in the following example:
++
+[source,yaml]
+----
+apiVersion: velero.io/v1
+kind: Backup
+metadata:
+ name:
+ namespace: openshift-adp
+spec:
+ hooks:
+ resources:
+ - name:
+ includedNamespaces:
+ - <1>
+ excludedNamespaces: <2>
+ -
+ includedResources: []
+ - pods <3>
+ excludedResources: [] <4>
+ labelSelector: <5>
+ matchLabels:
+ app: velero
+ component: server
+ pre: <6>
+ - exec:
+ container: <7>
+ command:
+ - /bin/uname <8>
+ - -a
+ onError: Fail <9>
+ timeout: 30s <10>
+ post: <11>
+...
+----
+<1> Optional: You can specify namespaces to which the hook applies. If this value is not specified, the hook applies to all namespaces.
+<2> Optional: You can specify namespaces to which the hook does not apply.
+<3> Currently, pods are the only supported resource that hooks can apply to.
+<4> Optional: You can specify resources to which the hook does not apply.
+<5> Optional: This hook only applies to objects matching the label. If this value is not specified, the hook applies to all objects.
+<6> Array of hooks to run before the backup.
+<7> Optional: If the container is not specified, the command runs in the first container in the pod.
+<8> This is the entry point for the `init` container being added.
+<9> Allowed values for error handling are `Fail` and `Continue`. The default is `Fail`.
+<10> Optional: How long to wait for the commands to run. The default is `30s`.
+<11> This block defines an array of hooks to run after the backup, with the same parameters as the pre-backup hooks.
diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-deleting-backups-doc.adoc b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-deleting-backups-doc.adoc
new file mode 100644
index 000000000000..c5e5abfefab4
--- /dev/null
+++ b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-deleting-backups-doc.adoc
@@ -0,0 +1,44 @@
+:_mod-docs-content-type: PROCEDURE
+[id="oadp-deleting-backups-doc"]
+= Deleting backups
+include::_attributes/common-attributes.adoc[]
+:context: backing-up-applications
+
+toc::[]
+
+You can remove backup files by deleting the `Backup` custom resource (CR).
+
+[WARNING]
+====
+After you delete the `Backup` CR and the associated object storage data, you cannot recover the deleted data.
+====
+
+.Prerequisites
+
+* You created a `Backup` CR.
+* You know the name of the `Backup` CR and the namespace that contains it.
+* You downloaded the Velero CLI tool.
+* You can access the Velero binary in your cluster.
+
+.Procedure
+
+* Choose one of the following actions to delete the `Backup` CR:
+
+** To delete the `Backup` CR and keep the associated object storage data, run the following command:
++
+[source,terminal]
+----
+$ oc delete backup -n
+----
+
+** To delete the `Backup` CR and delete the associated object storage data, run the following command:
++
+[source,terminal]
+----
+$ velero backup delete -n
+----
++
+Where:
++
+:: The name of the `Backup` custom resource.
+:: The namespace that contains the `Backup` custom resource.
diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-scheduling-backups-doc.adoc b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-scheduling-backups-doc.adoc
new file mode 100644
index 000000000000..f37336ee6a4e
--- /dev/null
+++ b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-scheduling-backups-doc.adoc
@@ -0,0 +1,86 @@
+
+:_mod-docs-content-type: PROCEDURE
+[id="oadp-scheduling-backups-doc"]
+= Scheduling backups using Schedule CR
+include::_attributes/common-attributes.adoc[]
+:context: backing-up-applications
+
+toc::[]
+
+The schedule operation allows you to create a backup of your data at a particular time, specified by a Cron expression.
+
+You schedule backups by creating a `Schedule` custom resource (CR) instead of a `Backup` CR.
+
+[WARNING]
+====
+Leave enough time in your backup schedule for a backup to finish before another backup is created.
+
+For example, if a backup of a namespace typically takes 10 minutes, do not schedule backups more frequently than every 15 minutes.
+====
+
+.Prerequisites
+
+* You must install the OpenShift API for Data Protection (OADP) Operator.
+* The `DataProtectionApplication` CR must be in a `Ready` state.
+
+.Procedure
+
+. Retrieve the `backupStorageLocations` CRs:
++
+[source,terminal]
+----
+$ oc get backupStorageLocations -n openshift-adp
+----
++
+.Example output
++
+[source,terminal]
+----
+NAMESPACE NAME PHASE LAST VALIDATED AGE DEFAULT
+openshift-adp velero-sample-1 Available 11s 31m
+----
+
+. Create a `Schedule` CR, as in the following example:
++
+[source,yaml]
+----
+$ cat << EOF | oc apply -f -
+apiVersion: velero.io/v1
+kind: Schedule
+metadata:
+ name:
+ namespace: openshift-adp
+spec:
+ schedule: 0 7 * * * <1>
+ template:
+ hooks: {}
+ includedNamespaces:
+ - <2>
+ storageLocation: <3>
+ defaultVolumesToFsBackup: true <4>
+ ttl: 720h0m0s
+EOF
+----
+
+<1> `cron` expression to schedule the backup, for example, `0 7 * * *` to perform a backup every day at 7:00.
++
+[NOTE]
+====
+To schedule a backup at specific intervals, enter the `` in the following format:
+[source,terminal]
+----
+ schedule: "*/10 * * * *"
+----
+Enter the minutes value between quotation marks (`" "`).
+====
+
+<2> Array of namespaces to back up.
+<3> Name of the `backupStorageLocations` CR.
+<4> Optional: In OADP version 1.2 and later, add the `defaultVolumesToFsBackup: true` key-value pair to your configuration when performing backups of volumes with Restic. In OADP version 1.1, add the `defaultVolumesToRestic: true` key-value pair when you back up volumes with Restic.
+
+. Verify that the status of the `Schedule` CR is `Completed` after the scheduled backup runs:
++
+[source,terminal]
+----
+$ oc get schedule -n openshift-adp -o jsonpath='{.status.phase}'
+----
diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc
index ab3ec37d6077..c761c938460e 100644
--- a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc
+++ b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="restoring-applications"]
= Restoring applications
include::_attributes/common-attributes.adoc[]
@@ -8,7 +8,7 @@ toc::[]
You restore application backups by creating a `Restore` custom resource (CR). See xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc#oadp-creating-restore-cr_restoring-applications[Creating a Restore CR].
-You can create restore hooks to run commands in a container in a pod while restoring your application by editing the `Restore` (CR). See xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc#oadp-creating-restore-hooks_restoring-applications[Creating restore hooks]
+You can create restore hooks to run commands in a container in a pod by editing the `Restore` CR. See xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc#oadp-creating-restore-hooks_restoring-applications[Creating restore hooks].
include::modules/oadp-creating-restore-cr.adoc[leveloffset=+1]
include::modules/oadp-creating-restore-hooks.adoc[leveloffset=+1]
diff --git a/backup_and_restore/application_backup_and_restore/installing/about-installing-oadp.adoc b/backup_and_restore/application_backup_and_restore/installing/about-installing-oadp.adoc
index 0292eafc004c..ec81a26d1af3 100644
--- a/backup_and_restore/application_backup_and_restore/installing/about-installing-oadp.adoc
+++ b/backup_and_restore/application_backup_and_restore/installing/about-installing-oadp.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="about-installing-oadp"]
= About installing OADP
include::_attributes/common-attributes.adoc[]
@@ -16,12 +16,19 @@ To back up Kubernetes resources and internal images, you must have object storag
* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc#installing-oadp-azure[Microsoft Azure]
* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc#installing-oadp-gcp[Google Cloud Platform]
* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc#installing-oadp-mcg[Multicloud Object Gateway]
-* AWS S3 compatible object storage, such as Noobaa or Minio
+* AWS S3 compatible object storage, such as Multicloud Object Gateway or MinIO
:FeatureName: The `CloudStorage` API, which automates the creation of a bucket for object storage,
include::snippets/technology-preview.adoc[]
-You can back up persistent volumes (PVs) by using snapshots or Restic.
+[NOTE]
+====
+The `CloudStorage` API is a Technology Preview feature when you use a `CloudStorage` object and want OADP to use the `CloudStorage` API to automatically create an S3 bucket for use as a `BackupStorageLocation`.
+
+The `CloudStorage` API supports manually creating a `BackupStorageLocation` object by specifying an existing S3 bucket. The `CloudStorage` API that creates an S3 bucket automatically is currently only enabled for AWS S3 storage.
+====
+
+You can back up persistent volumes (PVs) by using snapshots or a File System Backup (FSB).
To back up PVs with snapshots, you must have a cloud provider that supports either a native snapshot API or Container Storage Interface (CSI) snapshots, such as one of the following cloud providers:
@@ -32,7 +39,7 @@ To back up PVs with snapshots, you must have a cloud provider that supports eith
include::snippets/oadp-ocp-compat.adoc[]
-If your cloud provider does not support snapshots or if your storage is NFS, you can back up applications with xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-backing-up-applications-restic_backing-up-applications[Restic backups] on object storage.
+If your cloud provider does not support snapshots or if your storage is NFS, you can back up applications with xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-backing-up-applications-restic-doc.adoc#backing-up-applications[Backing up applications with File System Backup: Kopia or Restic] on object storage.
You create a default `Secret` and then you install the Data Protection Application.
@@ -53,3 +60,6 @@ include::modules/about-installing-oadp-on-multiple-namespaces.adoc[leveloffset=+
.Additional resources
* xref:../../../operators/understanding/olm/olm-understanding-olm.adoc#olm-csv_olm-understanding-olm[Cluster service version]
+
+include::modules/oadp-velero-cpu-memory-requirements.adoc[leveloffset=+1]
+include::modules/oadp-backup-restore-for-large-usage.adoc[leveloffset=+2]
\ No newline at end of file
diff --git a/backup_and_restore/application_backup_and_restore/installing/about-oadp-1-3-data-mover.adoc b/backup_and_restore/application_backup_and_restore/installing/about-oadp-1-3-data-mover.adoc
new file mode 100644
index 000000000000..b7e628121bcf
--- /dev/null
+++ b/backup_and_restore/application_backup_and_restore/installing/about-oadp-1-3-data-mover.adoc
@@ -0,0 +1,58 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="about-oadp-1-3-data-mover"]
+= About the OADP 1.3 Data Mover
+include::_attributes/common-attributes.adoc[]
+:context: about-oadp-1-3-data-mover
+
+toc::[]
+
+OADP 1.3 includes a built-in Data Mover that you can use to move Container Storage Interface (CSI) volume snapshots to a remote object store. The built-in Data Mover allows you to restore stateful applications from the remote object store if a failure, accidental deletion, or corruption of the cluster occurs. It uses xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-about-kopia.adoc#oadp-about-kopia[Kopia] as the uploader mechanism to read the snapshot data and write to the unified repository.
+
+OADP supports CSI snapshots on the following:
+
+* Red Hat OpenShift Data Foundation
+* Any other cloud storage provider with the Container Storage Interface (CSI) driver that supports the Kubernetes Volume Snapshot API
+
+:FeatureName: The OADP built-in Data Mover
+include::snippets/technology-preview.adoc[]
+
+[id="enabling-oadp-1-3-data-mover"]
+== Enabling the built-in Data Mover
+
+To enable the built-in Data Mover, you must include the CSI plugin and enable the node agent in the `DataProtectionApplication` custom resource (CR). The node agent is a Kubernetes daemonset that hosts data movement modules. These include the Data Mover controller, uploader, and the repository.
+
+.Example `DataProtectionApplication` manifest
+[source,yaml]
+----
+apiVersion: oadp.openshift.io/v1alpha1
+kind: DataProtectionApplication
+metadata:
+ name: dpa-sample
+spec:
+ configuration:
+ nodeAgent:
+ enable: true <1>
+ uploaderType: kopia <2>
+ velero:
+ defaultPlugins:
+ - openshift
+ - aws
+ - csi <3>
+# ...
+----
+<1> The flag to enable the node agent.
+<2> The type of uploader. The possible values are `restic` or `kopia`. The built-in Data Mover uses Kopia as the default uploader mechanism regardless of the value of the `uploaderType` field.
+<3> The CSI plugin included in the list of default plugins.
+
+[id="built-in-data-mover-crs"]
+== Built-in Data Mover controller and custom resource definitions (CRDs)
+
+The built-in Data Mover feature introduces three new API objects defined as CRDs for managing backup and restore:
+
+* `DataDownload`: Represents a data download of a volume snapshot. The CSI plugin creates one `DataDownload` object per volume to be restored. The `DataDownload` CR includes information about the target volume, the specified Data Mover, the progress of the current data download, the specified backup repository, and the result of the current data download after the process is complete.
+
+* `DataUpload`: Represents a data upload of a volume snapshot. The CSI plugin creates one `DataUpload` object per CSI snapshot. The `DataUpload` CR includes information about the specified snapshot, the specified Data Mover, the specified backup repository, the progress of the current data upload, and the result of the current data upload after the process is complete.
+
+* `BackupRepository`: Represents and manages the lifecycle of the backup repositories. OADP creates a backup repository per namespace when the first CSI snapshot backup or restore for a namespace is requested.
+
+
diff --git a/backup_and_restore/application_backup_and_restore/installing/data-mover-intro.adoc b/backup_and_restore/application_backup_and_restore/installing/data-mover-intro.adoc
new file mode 100644
index 000000000000..ae9d2fd42853
--- /dev/null
+++ b/backup_and_restore/application_backup_and_restore/installing/data-mover-intro.adoc
@@ -0,0 +1,37 @@
+:_mod-docs-content-type: CONCEPT
+[id="oadp-data-mover-intro"]
+= OADP Data Mover Introduction
+include::_attributes/common-attributes.adoc[]
+:context: data-mover
+
+toc::[]
+
+OADP Data Mover allows you to restore stateful applications from the store if a failure, accidental deletion, or corruption of the cluster occurs.
+
+[NOTE]
+====
+The OADP 1.1 Data Mover is a Technology Preview feature.
+
+The OADP 1.2 Data Mover has significantly improved features and performances, but is still a Technology Preview feature.
+====
+:FeatureName: The OADP Data Mover
+include::snippets/technology-preview.adoc[leveloffset=+1]
+
+* You can use OADP Data Mover to back up Container Storage Interface (CSI) volume snapshots to a remote object store. See xref:../../../backup_and_restore/application_backup_and_restore/installing/oadp-using-data-mover-for-csi-snapshots-doc.adoc#oadp-using-data-mover-for-csi-snapshots-doc[Using Data Mover for CSI snapshots].
+
+* You can use OADP 1.2 Data Mover to backup and restore application data for clusters that use CephFS, CephRBD, or both. See xref:../../../backup_and_restore/application_backup_and_restore/installing/oadp-using-data-mover-for-csi-snapshots-doc.adoc#oadp-using-data-mover-for-csi-snapshots-doc[Using OADP 1.2 Data Mover with Ceph storage].
+
+* You must perform a data cleanup after you perform a backup, if you are using OADP 1.1 Data Mover. See xref:../../../backup_and_restore/application_backup_and_restore/installing/oadp-cleaning-up-after-data-mover-1-1-backup-doc.adoc#oadp-cleaning-up-after-data-mover-1-1-backup-doc[Cleaning up after a backup using OADP 1.1 Data Mover].
+
+include::snippets/snip-post-mig-hook[]
+
+[id="oadp-data-mover-prerequisites"]
+== OADP Data Mover prerequisites
+
+* You have a stateful application running in a separate namespace.
+
+* You have installed the OADP Operator by using Operator Lifecycle Manager (OLM).
+
+* You have created an appropriate `VolumeSnapshotClass` and `StorageClass`.
+
+* You have installed the VolSync operator using OLM.
diff --git a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc
index 64b8b58df482..ebd66485d057 100644
--- a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc
+++ b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc
@@ -1,6 +1,6 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="installing-oadp-aws"]
-= Installing and configuring the OpenShift API for Data Protection with Amazon Web Services
+= Configuring the OpenShift API for Data Protection with Amazon Web Services
include::_attributes/common-attributes.adoc[]
:context: installing-oadp-aws
:installing-oadp-aws:
@@ -13,11 +13,11 @@ You install the OpenShift API for Data Protection (OADP) with Amazon Web Service
include::snippets/oadp-mtc-operator.adoc[]
-You configure AWS for Velero, create a default `Secret`, and then install the Data Protection Application.
+You configure AWS for Velero, create a default `Secret`, and then install the Data Protection Application. For more details, see xref:../../..//backup_and_restore/application_backup_and_restore/installing/oadp-installing-operator.adoc#oadp-installing-operator-doc[Installing the OADP Operator].
To install the OADP Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. See xref:../../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks] for details.
-include::modules/oadp-installing-operator.adoc[leveloffset=+1]
+//include::modules/oadp-installing-operator.adoc[leveloffset=+1]
include::modules/migration-configuring-aws-s3.adoc[leveloffset=+1]
include::modules/oadp-about-backup-snapshot-locations-secrets.adoc[leveloffset=+1]
include::modules/oadp-creating-default-secret.adoc[leveloffset=+2]
@@ -31,7 +31,8 @@ You can configure the Data Protection Application by setting Velero resource all
include::modules/oadp-setting-resource-limits-and-requests.adoc[leveloffset=+2]
include::modules/oadp-self-signed-certificate.adoc[leveloffset=+2]
-include::modules/oadp-installing-dpa.adoc[leveloffset=+1]
+include::modules/oadp-installing-dpa-1-2-and-earlier.adoc[leveloffset=+1]
+include::modules/oadp-installing-dpa-1-3.adoc[leveloffset=+1]
include::modules/oadp-enabling-csi-dpa.adoc[leveloffset=+2]
:!installing-oadp-aws:
diff --git a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc
index 3077b98b3b71..87cdb9445c87 100644
--- a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc
+++ b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc
@@ -1,6 +1,6 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="installing-oadp-azure"]
-= Installing and configuring the OpenShift API for Data Protection with Microsoft Azure
+= Configuring the OpenShift API for Data Protection with Microsoft Azure
include::_attributes/common-attributes.adoc[]
:context: installing-oadp-azure
:installing-oadp-azure:
@@ -13,11 +13,11 @@ You install the OpenShift API for Data Protection (OADP) with Microsoft Azure by
include::snippets/oadp-mtc-operator.adoc[]
-You configure Azure for Velero, create a default `Secret`, and then install the Data Protection Application.
+You configure Azure for Velero, create a default `Secret`, and then install the Data Protection Application. For more details, see xref:../../..//backup_and_restore/application_backup_and_restore/installing/oadp-installing-operator.adoc#oadp-installing-operator-doc[Installing the OADP Operator].
To install the OADP Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. See xref:../../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks] for details.
-include::modules/oadp-installing-operator.adoc[leveloffset=+1]
+// include::modules/oadp-installing-operator.adoc[leveloffset=+1]
include::modules/migration-configuring-azure.adoc[leveloffset=+1]
include::modules/oadp-about-backup-snapshot-locations-secrets.adoc[leveloffset=+1]
include::modules/oadp-creating-default-secret.adoc[leveloffset=+2]
@@ -31,7 +31,8 @@ You can configure the Data Protection Application by setting Velero resource all
include::modules/oadp-setting-resource-limits-and-requests.adoc[leveloffset=+2]
include::modules/oadp-self-signed-certificate.adoc[leveloffset=+2]
-include::modules/oadp-installing-dpa.adoc[leveloffset=+1]
+include::modules/oadp-installing-dpa-1-2-and-earlier.adoc[leveloffset=+1]
+include::modules/oadp-installing-dpa-1-3.adoc[leveloffset=+1]
include::modules/oadp-enabling-csi-dpa.adoc[leveloffset=+2]
:installing-oadp-azure!:
diff --git a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc
index 6b688c72a17a..ce7af5d6f74b 100644
--- a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc
+++ b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc
@@ -1,6 +1,6 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="installing-oadp-gcp"]
-= Installing and configuring the OpenShift API for Data Protection with Google Cloud Platform
+= Configuring the OpenShift API for Data Protection with Google Cloud Platform
include::_attributes/common-attributes.adoc[]
:context: installing-oadp-gcp
:installing-oadp-gcp:
@@ -13,11 +13,11 @@ You install the OpenShift API for Data Protection (OADP) with Google Cloud Platf
include::snippets/oadp-mtc-operator.adoc[]
-You configure GCP for Velero, create a default `Secret`, and then install the Data Protection Application.
+You configure GCP for Velero, create a default `Secret`, and then install the Data Protection Application. For more details, see xref:../../..//backup_and_restore/application_backup_and_restore/installing/oadp-installing-operator.adoc#oadp-installing-operator-doc[Installing the OADP Operator].
To install the OADP Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. See xref:../../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks] for details.
-include::modules/oadp-installing-operator.adoc[leveloffset=+1]
+//include::modules/oadp-installing-operator.adoc[leveloffset=+1]
include::modules/migration-configuring-gcp.adoc[leveloffset=+1]
include::modules/oadp-about-backup-snapshot-locations-secrets.adoc[leveloffset=+1]
include::modules/oadp-creating-default-secret.adoc[leveloffset=+2]
@@ -31,7 +31,9 @@ You can configure the Data Protection Application by setting Velero resource all
include::modules/oadp-setting-resource-limits-and-requests.adoc[leveloffset=+2]
include::modules/oadp-self-signed-certificate.adoc[leveloffset=+2]
-include::modules/oadp-installing-dpa.adoc[leveloffset=+1]
+include::modules/oadp-installing-dpa-1-2-and-earlier.adoc[leveloffset=+1]
+include::modules/oadp-gcp-wif-cloud-authentication.adoc[leveloffset=+1]
+include::modules/oadp-installing-dpa-1-3.adoc[leveloffset=+1]
include::modules/oadp-enabling-csi-dpa.adoc[leveloffset=+2]
:installing-oadp-gcp!:
diff --git a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc
index ff7180e2410e..750cf600250f 100644
--- a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc
+++ b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc
@@ -1,6 +1,6 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="installing-oadp-mcg"]
-= Installing and configuring the OpenShift API for Data Protection with Multicloud Object Gateway
+= Configuring the OpenShift API for Data Protection with Multicloud Object Gateway
include::_attributes/common-attributes.adoc[]
:context: installing-oadp-mcg
:installing-oadp-mcg:
@@ -19,11 +19,11 @@ MCG is a component of {rh-storage}. You configure MCG as a backup location in th
:FeatureName: The `CloudStorage` API, which automates the creation of a bucket for object storage,
include::snippets/technology-preview.adoc[]
-You create a `Secret` for the backup location and then you install the Data Protection Application.
+You create a `Secret` for the backup location and then you install the Data Protection Application. For more details, see xref:../../..//backup_and_restore/application_backup_and_restore/installing/oadp-installing-operator.adoc#oadp-installing-operator-doc[Installing the OADP Operator].
To install the OADP Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. For details, see xref:../../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks].
-include::modules/oadp-installing-operator.adoc[leveloffset=+1]
+//include::modules/oadp-installing-operator.adoc[leveloffset=+1]
include::modules/migration-configuring-mcg.adoc[leveloffset=+1]
include::modules/oadp-about-backup-snapshot-locations-secrets.adoc[leveloffset=+1]
include::modules/oadp-creating-default-secret.adoc[leveloffset=+2]
@@ -37,7 +37,8 @@ You can configure the Data Protection Application by setting Velero resource all
include::modules/oadp-setting-resource-limits-and-requests.adoc[leveloffset=+2]
include::modules/oadp-self-signed-certificate.adoc[leveloffset=+2]
-include::modules/oadp-installing-dpa.adoc[leveloffset=+1]
+include::modules/oadp-installing-dpa-1-2-and-earlier.adoc[leveloffset=+1]
+include::modules/oadp-installing-dpa-1-3.adoc[leveloffset=+1]
include::modules/oadp-enabling-csi-dpa.adoc[leveloffset=+2]
:installing-oadp-mcg!:
diff --git a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-ocs.adoc b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-ocs.adoc
index 651717695045..5fcf863b741f 100644
--- a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-ocs.adoc
+++ b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-ocs.adoc
@@ -1,6 +1,6 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="installing-oadp-ocs"]
-= Installing and configuring the OpenShift API for Data Protection with OpenShift Data Foundation
+= Configuring the OpenShift API for Data Protection with OpenShift Data Foundation
include::_attributes/common-attributes.adoc[]
:context: installing-oadp-ocs
:credentials: cloud-credentials
@@ -12,17 +12,22 @@ You install the OpenShift API for Data Protection (OADP) with {rh-storage} by in
include::snippets/oadp-mtc-operator.adoc[]
-You can configure xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc#installing-oadp-mcg[Multicloud Object Gateway] or any S3-compatible object storage as a backup location.
+You can configure xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc#installing-oadp-mcg[Multicloud Object Gateway] or any AWS S3-compatible object storage as a backup location.
:FeatureName: The `CloudStorage` API, which automates the creation of a bucket for object storage,
include::snippets/technology-preview.adoc[]
-You create a `Secret` for the backup location and then you install the Data Protection Application.
+You create a `Secret` for the backup location and then you install the Data Protection Application. For more details, see xref:../../..//backup_and_restore/application_backup_and_restore/installing/oadp-installing-operator.adoc#oadp-installing-operator-doc[Installing the OADP Operator].
To install the OADP Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. For details, see xref:../../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks].
-include::modules/oadp-installing-operator.adoc[leveloffset=+1]
+//include::modules/oadp-installing-operator.adoc[leveloffset=+1]
include::modules/oadp-about-backup-snapshot-locations-secrets.adoc[leveloffset=+1]
+
+[role="_additional-resources"]
+.Additional resources
+* https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.13/html/managing_hybrid_and_multicloud_resources/object-bucket-claim#creating-an-object-bucket-claim-using-the-openshift-web-console_rhodf[Creating an Object Bucket Claim using the OpenShift Web Console].
+
include::modules/oadp-creating-default-secret.adoc[leveloffset=+2]
include::modules/oadp-secrets-for-different-credentials.adoc[leveloffset=+2]
@@ -32,9 +37,11 @@ include::modules/oadp-secrets-for-different-credentials.adoc[leveloffset=+2]
You can configure the Data Protection Application by setting Velero resource allocations or enabling self-signed CA certificates.
include::modules/oadp-setting-resource-limits-and-requests.adoc[leveloffset=+2]
+include::modules/oadp-odf-cpu-memory-requirements.adoc[leveloffset=+3]
include::modules/oadp-self-signed-certificate.adoc[leveloffset=+2]
-include::modules/oadp-installing-dpa.adoc[leveloffset=+1]
-include::modules/oadp-configuring-noobaa-for-dr.adoc[leveloffset=+2]
+include::modules/oadp-installing-dpa-1-2-and-earlier.adoc[leveloffset=+1]
+include::modules/oadp-installing-dpa-1-3.adoc[leveloffset=+1]
+include::modules/oadp-creating-object-bucket-claim.adoc[leveloffset=+2]
include::modules/oadp-enabling-csi-dpa.adoc[leveloffset=+2]
diff --git a/backup_and_restore/application_backup_and_restore/installing/oadp-12-data-mover-ceph-doc.adoc b/backup_and_restore/application_backup_and_restore/installing/oadp-12-data-mover-ceph-doc.adoc
new file mode 100644
index 000000000000..8b2078fa19c3
--- /dev/null
+++ b/backup_and_restore/application_backup_and_restore/installing/oadp-12-data-mover-ceph-doc.adoc
@@ -0,0 +1,62 @@
+[id="oadp-12-data-mover-ceph-doc"]
+= Using OADP 1.2 Data Mover with Ceph storage
+include::_attributes/common-attributes.adoc[]
+:context: backing-up-applications
+
+toc::[]
+
+You can use OADP 1.2 Data Mover to backup and restore application data for clusters that use CephFS, CephRBD, or both.
+
+OADP 1.2 Data Mover leverages Ceph features that support large-scale environments. One of these is the shallow copy method, which is available for {product-title} 4.12 and later. This feature supports backing up and restoring `StorageClass` and `AccessMode` resources other than what is found on the source persistent volume claim (PVC).
+
+[IMPORTANT]
+====
+The CephFS shallow copy feature is a back up feature. It is not part of restore operations.
+====
+
+include::modules/oadp-ceph-prerequisites.adoc[leveloffset=+1]
+
+[id="defining-crs-for-12-data-mover"]
+== Defining custom resources for use with OADP 1.2 Data Mover
+
+When you install {rh-storage-first}, it automatically creates default CephFS and a CephRBD `StorageClass` and `VolumeSnapshotClass` custom resources (CRs). You must define these CRs for use with OpenShift API for Data Protection (OADP) 1.2 Data Mover.
+
+After you define the CRs, you must make several other changes to your environment before you can perform your back up and restore operations.
+
+include::modules/oadp-ceph-preparing-cephfs-crs.adoc[leveloffset=+2]
+include::modules/oadp-ceph-preparing-cephrbd-crs.adoc[leveloffset=+2]
+include::modules/oadp-ceph-preparing-crs-additional.adoc[leveloffset=+2]
+
+[id="oadp-ceph-back-up-restore-cephfs"]
+== Backing up and restoring data using OADP 1.2 Data Mover and CephFS storage
+
+You can use OpenShift API for Data Protection (OADP) 1.2 Data Mover to back up and restore data using CephFS storage by enabling the shallow copy feature of CephFS.
+
+include::snippets/oadp-ceph-cr-prerequisites.adoc[]
+
+:context: !backing-up-applications
+
+:context: cephfs
+
+include::modules/oadp-ceph-cephfs-back-up-dba.adoc[leveloffset=+2]
+include::modules/oadp-ceph-cephfs-back-up.adoc[leveloffset=+2]
+include::modules/oadp-ceph-cephfs-restore.adoc[leveloffset=+2]
+
+[id="oadp-ceph-split"]
+== Backing up and restoring data using OADP 1.2 Data Mover and split volumes (CephFS and Ceph RBD)
+
+You can use OpenShift API for Data Protection (OADP) 1.2 Data Mover to back up and restore data in an environment that has _split volumes_, that is, an environment that uses both CephFS and CephRBD.
+
+include::snippets/oadp-ceph-cr-prerequisites.adoc[]
+
+:context: !cephfs
+
+:context: split
+
+include::modules/oadp-ceph-split-back-up-dba.adoc[leveloffset=+2]
+include::modules/oadp-ceph-cephfs-back-up.adoc[leveloffset=+2]
+include::modules/oadp-ceph-cephfs-restore.adoc[leveloffset=+2]
+
+:context: !split
+
+:context: backing-up-applications
diff --git a/backup_and_restore/application_backup_and_restore/installing/oadp-backup-restore-csi-snapshots.adoc b/backup_and_restore/application_backup_and_restore/installing/oadp-backup-restore-csi-snapshots.adoc
new file mode 100644
index 000000000000..0c3462723f59
--- /dev/null
+++ b/backup_and_restore/application_backup_and_restore/installing/oadp-backup-restore-csi-snapshots.adoc
@@ -0,0 +1,16 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="oadp-backup-restore-csi-snapshots"]
+= Backing up and restoring CSI snapshots
+include::_attributes/common-attributes.adoc[]
+:context: oadp-backup-restore-csi-snapshots
+
+toc::[]
+
+You can back up and restore persistent volumes by using the OADP 1.3 Data Mover.
+
+include::modules/oadp-1-3-backing-csi-snapshots.adoc[leveloffset=+1]
+
+include::modules/oadp-1-3-restoring-csi-snapshots.adoc[leveloffset=+1]
+
+
+
diff --git a/backup_and_restore/application_backup_and_restore/installing/oadp-cleaning-up-after-data-mover-1-1-backup-doc.adoc b/backup_and_restore/application_backup_and_restore/installing/oadp-cleaning-up-after-data-mover-1-1-backup-doc.adoc
new file mode 100644
index 000000000000..83535e5f3a5b
--- /dev/null
+++ b/backup_and_restore/application_backup_and_restore/installing/oadp-cleaning-up-after-data-mover-1-1-backup-doc.adoc
@@ -0,0 +1,26 @@
+[id="oadp-cleaning-up-after-data-mover-1-1-backup-doc"]
+= Cleaning up after a backup using OADP 1.1 Data Mover
+include::_attributes/common-attributes.adoc[]
+:context: datamover11
+
+toc::[]
+
+For OADP 1.1 Data Mover, you must perform a data cleanup after you perform a backup.
+
+The cleanup consists of deleting the following resources:
+
+* Snapshots in a bucket
+* Cluster resources
+* Volume snapshot backups (VSBs) after a backup procedure that is either run by a schedule or is run repetitively
+
+include::modules/oadp-cleaning-up-after-data-mover-snapshots.adoc[leveloffset=+1]
+
+[id="deleting-cluster-resources-data-mover"]
+== Deleting cluster resources
+
+OADP 1.1 Data Mover might leave cluster resources whether or not it successfully backs up your container storage interface (CSI) volume snapshots to a remote object store.
+
+include::modules/oadp-deleting-cluster-resources-following-success.adoc[leveloffset=+2]
+include::modules/oadp-deleting-cluster-resources-following-failure.adoc[leveloffset=+2]
+
+include::modules/oadp-vsb-cleanup-after-scheduler.adoc[leveloffset=+1]
diff --git a/backup_and_restore/application_backup_and_restore/installing/oadp-installing-operator.adoc b/backup_and_restore/application_backup_and_restore/installing/oadp-installing-operator.adoc
new file mode 100644
index 000000000000..ba6d54403d8e
--- /dev/null
+++ b/backup_and_restore/application_backup_and_restore/installing/oadp-installing-operator.adoc
@@ -0,0 +1,25 @@
+:_mod-docs-content-type: PROCEDURE
+[id="oadp-installing-operator-doc"]
+= Installing the OADP Operator
+include::_attributes/common-attributes.adoc[]
+:context: installing-oadp-operator
+
+toc::[]
+
+You can install the OpenShift API for Data Protection (OADP) Operator on {product-title} {product-version} by using Operator Lifecycle Manager (OLM).
+
+The OADP Operator installs link:https://{velero-domain}/docs/v{velero-version}/[Velero {velero-version}].
+
+.Prerequisites
+
+* You must be logged in as a user with `cluster-admin` privileges.
+
+.Procedure
+
+. In the {product-title} web console, click *Operators* -> *OperatorHub*.
+. Use the *Filter by keyword* field to find the *OADP Operator*.
+. Select the *OADP Operator* and click *Install*.
+. Click *Install* to install the Operator in the `openshift-adp` project.
+. Click *Operators* -> *Installed Operators* to verify the installation.
+
+include::modules/velero-oadp-version-relationship.adoc[leveloffset=+1]
diff --git a/backup_and_restore/application_backup_and_restore/installing/oadp-using-data-mover-for-csi-snapshots-doc.adoc b/backup_and_restore/application_backup_and_restore/installing/oadp-using-data-mover-for-csi-snapshots-doc.adoc
new file mode 100644
index 000000000000..a39a0419065d
--- /dev/null
+++ b/backup_and_restore/application_backup_and_restore/installing/oadp-using-data-mover-for-csi-snapshots-doc.adoc
@@ -0,0 +1,281 @@
+// Module included in the following assemblies:
+//
+// * backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc
+
+:_mod-docs-content-type: PROCEDURE
+[id="oadp-using-data-mover-for-csi-snapshots-doc"]
+= Using Data Mover for CSI snapshots
+include::_attributes/common-attributes.adoc[]
+:context: backing-up-applications
+
+toc::[]
+
+:FeatureName: Data Mover for CSI snapshots
+
+The OADP Data Mover enables customers to back up Container Storage Interface (CSI) volume snapshots to a remote object store. When Data Mover is enabled, you can restore stateful applications, using CSI volume snapshots pulled from the object store if a failure, accidental deletion, or corruption of the cluster occurs.
+
+The Data Mover solution uses the Restic option of VolSync.
+
+Data Mover supports backup and restore of CSI volume snapshots only.
+
+In OADP 1.2 Data Mover `VolumeSnapshotBackups` (VSBs) and `VolumeSnapshotRestores` (VSRs) are queued using the VolumeSnapshotMover (VSM). The VSM's performance is improved by specifying a concurrent number of VSBs and VSRs simultaneously `InProgress`. After all async plugin operations are complete, the backup is marked as complete.
+
+
+[NOTE]
+====
+The OADP 1.1 Data Mover is a Technology Preview feature.
+
+The OADP 1.2 Data Mover has significantly improved features and performances, but is still a Technology Preview feature.
+====
+:FeatureName: The OADP Data Mover
+include::snippets/technology-preview.adoc[leveloffset=+1]
+
+[NOTE]
+====
+Red Hat recommends that customers who use OADP 1.2 Data Mover in order to back up and restore ODF CephFS volumes, upgrade or install {product-title} version 4.12 or later for improved performance. OADP Data Mover can leverage CephFS shallow volumes in {product-title} version 4.12 or later, which based on our testing, can improve the performance of backup times.
+
+* https://issues.redhat.com/browse/RHSTOR-4287[CephFS ROX details]
+//* https://github.com/ceph/ceph-csi/blob/devel/docs/cephfs-snapshot-backed-volumes.md[Provisioning and mounting CephFS snapshot-backed volumes]
+
+
+//For more information about OADP 1.2 with CephS [name of topic], see ___.
+
+====
+
+.Prerequisites
+
+* You have verified that the `StorageClass` and `VolumeSnapshotClass` custom resources (CRs) support CSI.
+
+* You have verified that only one `VolumeSnapshotClass` CR has the annotation `snapshot.storage.kubernetes.io/is-default-class: "true"`.
++
+[NOTE]
+====
+In {product-title} version 4.12 or later, verify that this is the only default `VolumeSnapshotClass`.
+====
+
+* You have verified that `deletionPolicy` of the `VolumeSnapshotClass` CR is set to `Retain`.
+
+* You have verified that only one `StorageClass` CR has the annotation `storageclass.kubernetes.io/is-default-class: "true"`.
+
+* You have included the label `{velero-domain}/csi-volumesnapshot-class: "true"` in your `VolumeSnapshotClass` CR.
+
+* You have verified that the `OADP namespace` has the annotation `oc annotate --overwrite namespace/openshift-adp volsync.backube/privileged-movers="true"`.
++
+[NOTE]
+====
+In OADP 1.1 the above setting is mandatory.
+
+In OADP 1.2 the `privileged-movers` setting is not required in most scenarios. The restoring container permissions should be adequate for the Volsync copy. In some user scenarios, there may be permission errors that the `privileged-mover`= `true` setting should resolve.
+====
+
+* You have installed the VolSync Operator by using the Operator Lifecycle Manager (OLM).
++
+[NOTE]
+====
+The VolSync Operator is required for using OADP Data Mover.
+====
+
+* You have installed the OADP operator by using OLM.
+
+.Procedure
+
+. Configure a Restic secret by creating a `.yaml` file as following:
++
+[source,yaml]
+----
+apiVersion: v1
+kind: Secret
+metadata:
+ name:
+ namespace: openshift-adp
+type: Opaque
+stringData:
+ RESTIC_PASSWORD:
+----
++
+[NOTE]
+====
+By default, the Operator looks for a secret named `dm-credential`. If you are using a different name, you need to specify the name through a Data Protection Application (DPA) CR using `dpa.spec.features.dataMover.credentialName`.
+====
+
+. Create a DPA CR similar to the following example. The default plugins include CSI.
++
+.Example Data Protection Application (DPA) CR
+[source,yaml]
+----
+apiVersion: oadp.openshift.io/v1alpha1
+kind: DataProtectionApplication
+metadata:
+ name: velero-sample
+ namespace: openshift-adp
+spec:
+ backupLocations:
+ - velero:
+ config:
+ profile: default
+ region: us-east-1
+ credential:
+ key: cloud
+ name: cloud-credentials
+ default: true
+ objectStorage:
+ bucket:
+ prefix:
+ provider: aws
+ configuration:
+ restic:
+ enable:
+ velero:
+ itemOperationSyncFrequency: "10s"
+ defaultPlugins:
+ - openshift
+ - aws
+ - csi
+ - vsm <1>
+ features:
+ dataMover:
+ credentialName: restic-secret
+ enable: true
+ maxConcurrentBackupVolumes: "3" <2>
+ maxConcurrentRestoreVolumes: "3" <3>
+ pruneInterval: "14" <4>
+ volumeOptions: <5>
+ sourceVolumeOptions:
+ accessMode: ReadOnlyMany
+ cacheAccessMode: ReadWriteOnce
+ cacheCapacity: 2Gi
+ destinationVolumeOptions:
+ storageClass: other-storageclass-name
+ cacheAccessMode: ReadWriteMany
+ snapshotLocations:
+ - velero:
+ config:
+ profile: default
+ region: us-west-2
+ provider: aws
+
+----
+<1> OADP 1.2 only.
+<2> OADP 1.2 only. Optional: Specify the upper limit of the number of snapshots allowed to be queued for backup. The default value is 10.
+<3> OADP 1.2 only. Optional: Specify the upper limit of the number of snapshots allowed to be queued for restore. The default value is 10.
+<4> OADP 1.2 only. Optional: Specify the number of days, between running Restic pruning on the repository. The prune operation repacks the data to free space, but it can also generate significant I/O traffic as a part of the process. Setting this option allows a trade-off between storage consumption, from no longer referenced data, and access costs.
+<5> OADP 1.2 only. Optional: Specify VolumeSync volume options for backup and restore.
+
++
+The OADP Operator installs two custom resource definitions (CRDs), `VolumeSnapshotBackup` and `VolumeSnapshotRestore`.
++
+.Example `VolumeSnapshotBackup` CRD
+[source,yaml]
+----
+apiVersion: datamover.oadp.openshift.io/v1alpha1
+kind: VolumeSnapshotBackup
+metadata:
+ name:
+ namespace: <1>
+spec:
+ volumeSnapshotContent:
+ name:
+ protectedNamespace: <2>
+ resticSecretRef:
+ name:
+----
+<1> Specify the namespace where the volume snapshot exists.
+<2> Specify the namespace where the OADP Operator is installed. The default is `openshift-adp`.
++
+.Example `VolumeSnapshotRestore` CRD
+[source,yaml]
+----
+apiVersion: datamover.oadp.openshift.io/v1alpha1
+kind: VolumeSnapshotRestore
+metadata:
+ name:
+ namespace: <1>
+spec:
+ protectedNamespace: <2>
+ resticSecretRef:
+ name:
+ volumeSnapshotMoverBackupRef:
+ sourcePVCData:
+ name:
+ size:
+ resticrepository:
+ volumeSnapshotClassName:
+----
+<1> Specify the namespace where the volume snapshot exists.
+<2> Specify the namespace where the OADP Operator is installed. The default is `openshift-adp`.
+
+. You can back up a volume snapshot by performing the following steps:
+
+.. Create a backup CR:
++
+[source,yaml]
+----
+apiVersion: velero.io/v1
+kind: Backup
+metadata:
+ name:
+ namespace: <1>
+spec:
+ includedNamespaces:
+ - <2>
+ storageLocation: velero-sample-1
+----
+<1> Specify the namespace where the Operator is installed. The default namespace is `openshift-adp`.
+<2> Specify the application namespace or namespaces to be backed up.
+
+.. Wait up to 10 minutes and check whether the `VolumeSnapshotBackup` CR status is `Completed` by entering the following commands:
++
+[source,terminal]
+----
+$ oc get vsb -n
+----
++
+[source,terminal]
+----
+$ oc get vsb -n -o jsonpath="{.status.phase}"
+----
++
+A snapshot is created in the object store was configured in the DPA.
++
+[NOTE]
+====
+If the status of the `VolumeSnapshotBackup` CR becomes `Failed`, refer to the Velero logs for troubleshooting.
+====
+
+. You can restore a volume snapshot by performing the following steps:
+
+.. Delete the application namespace and the `VolumeSnapshotContent` that was created by the Velero CSI plugin.
+
+.. Create a `Restore` CR and set `restorePVs` to `true`.
++
+.Example `Restore` CR
+[source,yaml]
+----
+apiVersion: velero.io/v1
+kind: Restore
+metadata:
+ name:
+ namespace:
+spec:
+ backupName:
+ restorePVs: true
+----
+
+.. Wait up to 10 minutes and check whether the `VolumeSnapshotRestore` CR status is `Completed` by entering the following command:
++
+[source,terminal]
+----
+$ oc get vsr -n
+----
++
+[source,terminal]
+----
+$ oc get vsr -n -o jsonpath="{.status.phase}"
+----
+
+.. Check whether your application data and resources have been restored.
++
+[NOTE]
+====
+If the status of the `VolumeSnapshotRestore` CR becomes 'Failed', refer to the Velero logs for troubleshooting.
+====
diff --git a/backup_and_restore/application_backup_and_restore/installing/uninstalling-oadp.adoc b/backup_and_restore/application_backup_and_restore/installing/uninstalling-oadp.adoc
index 49f3c9b02f2e..7cf49e56d54e 100644
--- a/backup_and_restore/application_backup_and_restore/installing/uninstalling-oadp.adoc
+++ b/backup_and_restore/application_backup_and_restore/installing/uninstalling-oadp.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="uninstalling-oadp"]
= Uninstalling the OpenShift API for Data Protection
include::_attributes/common-attributes.adoc[]
diff --git a/backup_and_restore/application_backup_and_restore/oadp-advanced-topics.adoc b/backup_and_restore/application_backup_and_restore/oadp-advanced-topics.adoc
index 26d17d149167..fcd0124642c4 100644
--- a/backup_and_restore/application_backup_and_restore/oadp-advanced-topics.adoc
+++ b/backup_and_restore/application_backup_and_restore/oadp-advanced-topics.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="oadp-advanced-topics"]
= Advanced OADP features and functionalities
include::_attributes/common-attributes.adoc[]
@@ -19,6 +19,10 @@ include::modules/oadp-using-enable-api-group-versions.adoc[leveloffset=+2]
== Backing up data from one cluster and restoring it to another cluster
include::modules/oadp-about-backing-and-restoring-from-cluster-to-cluster.adoc[leveloffset=+2]
+include::modules/oadp-pod-volume-backup.adoc[leveloffset=+2]
+include::modules/oadp-backing-up-opt-in.adoc[leveloffset=+3]
+include::modules/oadp-backing-up-opt-out.adoc[leveloffset=+3]
+include::modules/oadp-cluster-to-cluster-uid-and-gid-ranges.adoc[leveloffset=+2]
include::modules/oadp-backing-and-restoring-from-cluster-to-cluster.adoc[leveloffset=+2]
[role="_additional-resources"]
@@ -27,8 +31,8 @@ include::modules/oadp-backing-and-restoring-from-cluster-to-cluster.adoc[levelof
For more information about API group versions, see xref:../../backup_and_restore/application_backup_and_restore/oadp-advanced-topics.adoc#oadp-different-kubernetes-api-versions[Working with different Kubernetes API versions on the same cluster].
-For more information about OADP Data Mover, see xref:../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-using-data-mover-for-csi-snapshots_backing-up-applications[Using Data Mover for CSI snapshots].
+For more information about OADP Data Mover, see xref:../../backup_and_restore/application_backup_and_restore/installing/oadp-using-data-mover-for-csi-snapshots-doc.adoc#backing-up-applications[Using Data Mover for CSI snapshots].
-For more information about using Restic with OADP, see xref:../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-backing-up-applications-restic_backing-up-applications[Backing up applications with Restic].
+For more information about using Restic with OADP, see xref:../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-backing-up-applications-restic-doc.adoc#backing-up-applications[Backing up applications with File System Backup: Kopia or Restic].
:!oadp-advanced-topics:
diff --git a/backup_and_restore/application_backup_and_restore/oadp-api.adoc b/backup_and_restore/application_backup_and_restore/oadp-api.adoc
index 6ac2bd278c3f..ceb99eeef8c7 100644
--- a/backup_and_restore/application_backup_and_restore/oadp-api.adoc
+++ b/backup_and_restore/application_backup_and_restore/oadp-api.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="oadp-api"]
= APIs used with OADP
include::_attributes/common-attributes.adoc[]
diff --git a/backup_and_restore/application_backup_and_restore/oadp-features-plugins.adoc b/backup_and_restore/application_backup_and_restore/oadp-features-plugins.adoc
index 417275375f02..97b2dc77984c 100644
--- a/backup_and_restore/application_backup_and_restore/oadp-features-plugins.adoc
+++ b/backup_and_restore/application_backup_and_restore/oadp-features-plugins.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="oadp-features-plugins"]
= OADP features and plugins
include::_attributes/common-attributes.adoc[]
@@ -13,15 +13,19 @@ The default plugins enable Velero to integrate with certain cloud providers and
include::modules/oadp-features.adoc[leveloffset=+1]
include::modules/oadp-plugins.adoc[leveloffset=+1]
include::modules/oadp-configuring-velero-plugins.adoc[leveloffset=+1]
+include::modules/oadp-plugins-receiving-eof-message.adoc[leveloffset=+2]
+include::modules/oadp-supported-architecture.adoc[leveloffset=+1]
[id="oadp-support-for-ibm-power-and-ibm-z"]
-== OADP support for IBM Power and {ibmzProductName}
+== OADP support for {ibm-power-title} and {ibm-z-title}
-OpenShift API for Data Protection (OADP) is platform neutral. The information that follows relates only to IBM Power and to {ibmzProductName}.
+OpenShift API for Data Protection (OADP) is platform neutral. The information that follows relates only to {ibm-power-name} and to {ibm-z-name}.
-OADP 1.1.0 was tested successfully against {product-title} 4.11 for both IBM Power and {ibmzProductName}. The sections that follow give testing and support information for OADP 1.1.0 in terms of backup locations for these systems.
+OADP 1.1.0 was tested successfully against {product-title} 4.11 for both {ibm-power-name} and {ibm-z-name}. The sections that follow give testing and support information for OADP 1.1.0 in terms of backup locations for these systems.
include::modules/oadp-ibm-power-test-support.adoc[leveloffset=+2]
include::modules/oadp-ibm-z-test-support.adoc[leveloffset=+2]
+include::modules/oadp-fips.adoc[leveloffset=+1]
+
:!oadp-features-plugins:
diff --git a/backup_and_restore/application_backup_and_restore/oadp-intro.adoc b/backup_and_restore/application_backup_and_restore/oadp-intro.adoc
new file mode 100644
index 000000000000..ab17fc13ff62
--- /dev/null
+++ b/backup_and_restore/application_backup_and_restore/oadp-intro.adoc
@@ -0,0 +1,34 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="oadp-introduction"]
+= Introduction to {oadp-full}
+include::_attributes/common-attributes.adoc[]
+:context: oadp-api
+:namespace: openshift-adp
+:local-product: OADP
+
+toc::[]
+
+The {oadp-first} product safeguards customer applications on {product-title}. It offers comprehensive disaster recovery protection, covering {product-title} applications, application-related cluster resources, persistent volumes, and internal images. OADP is also capable of backing up both containerized applications and virtual machines (VMs).
+
+However, OADP does not serve as a disaster recovery solution for xref:../../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backup-etcd[etcd] or OpenShift Operators.
+
+
+[id="oadp-apis_{context}"]
+== {oadp-full} APIs
+
+{oadp-first} provides APIs that enable multiple approaches to customizing backups and preventing the inclusion of unnecessary or inappropriate resources.
+
+OADP provides the following APIs:
+
+* xref:../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#backing-up-applications[Backup]
+* xref:../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc#restoring-applications[Restore]
+* xref:../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-scheduling-backups-doc.adoc#oadp-scheduling-backups-doc[Schedule]
+* xref:../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc#oadp-about-backup-snapshot-locations_installing-oadp-aws[BackupStorageLocation]
+* xref:../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-backing-up-pvs-csi-doc.adoc#oadp-backing-up-pvs-csi-doc[VolumeSnapshotLocation]
+
+[role="_additional-resources"]
+.Additional resources
+
+* xref:../../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backup-etcd[Backing up etcd]
+// once finished re-work come back and add doc links to the APIs
+
diff --git a/backup_and_restore/application_backup_and_restore/oadp-release-notes.adoc b/backup_and_restore/application_backup_and_restore/oadp-release-notes.adoc
deleted file mode 100644
index 50fff6ad65a5..000000000000
--- a/backup_and_restore/application_backup_and_restore/oadp-release-notes.adoc
+++ /dev/null
@@ -1,20 +0,0 @@
-:_content-type: ASSEMBLY
-[id="oadp-release-notes"]
-= OADP release notes
-include::_attributes/common-attributes.adoc[]
-:context: oadp-release-notes
-
-toc::[]
-
-The release notes for OpenShift API for Data Protection (OADP) describe new features and enhancements, deprecated features, product recommendations, known issues, and resolved issues.
-
-
-include::modules/oadp-release-notes-1-2-0.adoc[leveloffset=+1]
-
-include::modules/oadp-release-notes-1-1-4.adoc[leveloffset=+1]
-
-include::modules/oadp-release-notes-1-1-2.adoc[leveloffset=+1]
-
-include::modules/oadp-release-notes-1-1-1.adoc[leveloffset=+1]
-
-:!oadp-release-notes:
diff --git a/virt/virtual_machines/cloning_vms/_attributes b/backup_and_restore/application_backup_and_restore/oadp-rosa/_attributes
similarity index 100%
rename from virt/virtual_machines/cloning_vms/_attributes
rename to backup_and_restore/application_backup_and_restore/oadp-rosa/_attributes
diff --git a/backup_and_restore/application_backup_and_restore/oadp-rosa/images b/backup_and_restore/application_backup_and_restore/oadp-rosa/images
new file mode 120000
index 000000000000..4399cbb3c0f3
--- /dev/null
+++ b/backup_and_restore/application_backup_and_restore/oadp-rosa/images
@@ -0,0 +1 @@
+../../../images/
\ No newline at end of file
diff --git a/virt/virtual_machines/cloning_vms/modules b/backup_and_restore/application_backup_and_restore/oadp-rosa/modules
similarity index 100%
rename from virt/virtual_machines/cloning_vms/modules
rename to backup_and_restore/application_backup_and_restore/oadp-rosa/modules
diff --git a/backup_and_restore/application_backup_and_restore/oadp-rosa/oadp-rosa-backing-up-applications.adoc b/backup_and_restore/application_backup_and_restore/oadp-rosa/oadp-rosa-backing-up-applications.adoc
new file mode 100644
index 000000000000..0508483d774c
--- /dev/null
+++ b/backup_and_restore/application_backup_and_restore/oadp-rosa/oadp-rosa-backing-up-applications.adoc
@@ -0,0 +1,42 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="oadp-rosa-backing-up-applications"]
+= Backing up applications on ROSA clusters using OADP
+include::_attributes/common-attributes.adoc[]
+:context: oadp-rosa-backing-up-applications
+
+toc::[]
+
+You can use {oadp-first} with {product-rosa} (ROSA) clusters to back up and restore application data.
+
+ROSA is a fully-managed, turnkey application platform that allows you to deliver value to your customers by building and deploying applications.
+
+ROSA provides seamless integration with a wide range of {aws-first} compute, database, analytics, machine learning, networking, mobile, and other services to speed up the building and delivery of differentiating experiences to your customers.
+
+You can subscribe to the service directly from your {aws-short} account.
+
+After you create your clusters, you can operate your clusters with the {product-title} web console or through link:https://docs.openshift.com/dedicated/ocm/ocm-overview.html[{cluster-manager-first}]. You can also use ROSA with OpenShift APIs and command-line interface (CLI) tools.
+
+For additional information about ROSA installation, see link:https://www.redhat.com/en/products/interactive-walkthrough/install-rosa[Installing Red Hat Openshift Service on AWS (ROSA) interactive walkthrough].
+
+Before installing {oadp-first}, you must set up role and policy credentials for OADP so that it can use the {aws-full} API.
+
+This process is performed in the following two stages:
+
+. Prepare {aws-short} credentials
+. Install the OADP Operator and give it an IAM role
+
+include::modules/preparing-aws-credentials-for-oadp.adoc[leveloffset=+1]
+
+include::modules/installing-oadp-rosa-sts.adoc[leveloffset=+1]
+[role="_additional-resources"]
+.Additional resources
+
+* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.14/html/operators/user-tasks#olm-installing-from-operatorhub-using-web-console_olm-installing-operators-in-namespace[Installing from OperatorHub using the web console].
+* link:https://docs.openshift.com/container-platform/4.14/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.html[Backing up applications]
+
+[id="oadp-rosa-backing-up-and-cleaning"]
+== Example: Backing up workload on OADP ROSA STS, with an optional cleanup
+
+include::modules/performing-a-backup-oadp-rosa-sts.adoc[leveloffset=+2]
+include::modules/cleanup-a-backup-oadp-rosa-sts.adoc[leveloffset=+2]
+
diff --git a/virt/virtual_machines/cloning_vms/snippets b/backup_and_restore/application_backup_and_restore/oadp-rosa/snippets
similarity index 100%
rename from virt/virtual_machines/cloning_vms/snippets
rename to backup_and_restore/application_backup_and_restore/oadp-rosa/snippets
diff --git a/virt/virtual_machines/importing_vms/_attributes b/backup_and_restore/application_backup_and_restore/release-notes/_attributes
similarity index 100%
rename from virt/virtual_machines/importing_vms/_attributes
rename to backup_and_restore/application_backup_and_restore/release-notes/_attributes
diff --git a/backup_and_restore/application_backup_and_restore/release-notes/images b/backup_and_restore/application_backup_and_restore/release-notes/images
new file mode 120000
index 000000000000..4399cbb3c0f3
--- /dev/null
+++ b/backup_and_restore/application_backup_and_restore/release-notes/images
@@ -0,0 +1 @@
+../../../images/
\ No newline at end of file
diff --git a/virt/virtual_machines/importing_vms/modules b/backup_and_restore/application_backup_and_restore/release-notes/modules
similarity index 100%
rename from virt/virtual_machines/importing_vms/modules
rename to backup_and_restore/application_backup_and_restore/release-notes/modules
diff --git a/backup_and_restore/application_backup_and_restore/release-notes/oadp-release-notes-1-1.adoc b/backup_and_restore/application_backup_and_restore/release-notes/oadp-release-notes-1-1.adoc
new file mode 100644
index 000000000000..ec1f49954abb
--- /dev/null
+++ b/backup_and_restore/application_backup_and_restore/release-notes/oadp-release-notes-1-1.adoc
@@ -0,0 +1,25 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="oadp-release-notes-1-1"]
+= OADP 1.1 release notes
+include::_attributes/common-attributes.adoc[]
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: oadp-release-notes
+
+toc::[]
+
+The release notes for OpenShift API for Data Protection (OADP) 1.1 describe new features and enhancements, deprecated features, product recommendations, known issues, and resolved issues.
+
+
+include::modules/oadp-release-notes-1-1-7.adoc[leveloffset=+1]
+
+include::modules/oadp-release-notes-1-1-6.adoc[leveloffset=+1]
+
+include::modules/oadp-release-notes-1-1-5.adoc[leveloffset=+1]
+
+include::modules/oadp-release-notes-1-1-4.adoc[leveloffset=+1]
+
+include::modules/oadp-release-notes-1-1-3.adoc[leveloffset=+1]
+
+include::modules/oadp-release-notes-1-1-2.adoc[leveloffset=+1]
+
+include::modules/oadp-release-notes-1-1-1.adoc[leveloffset=+1]
diff --git a/backup_and_restore/application_backup_and_restore/release-notes/oadp-release-notes-1-2.adoc b/backup_and_restore/application_backup_and_restore/release-notes/oadp-release-notes-1-2.adoc
new file mode 100644
index 000000000000..4bba009be9b8
--- /dev/null
+++ b/backup_and_restore/application_backup_and_restore/release-notes/oadp-release-notes-1-2.adoc
@@ -0,0 +1,29 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="oadp-release-notes-1-2"]
+= OADP 1.2 release notes
+include::_attributes/common-attributes.adoc[]
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: oadp-release-notes
+
+toc::[]
+
+The release notes for OpenShift API for Data Protection (OADP) 1.2 describe new features and enhancements, deprecated features, product recommendations, known issues, and resolved issues.
+
+include::modules/oadp-release-notes-1-2-3.adoc[leveloffset=+1]
+
+include::modules/oadp-release-notes-1-2-2.adoc[leveloffset=+1]
+
+include::modules/oadp-release-notes-1-2-1.adoc[leveloffset=+1]
+
+include::modules/oadp-release-notes-1-2-0.adoc[leveloffset=+1]
+include::modules/oadp-backing-up-dpa-configuration-1-2-0.adoc[leveloffset=+3]
+include::modules/oadp-upgrading-oadp-operator-1-2-0.adoc[leveloffset=+3]
+[role="_additional-resources"]
+.Additional resources
+
+* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc#migration-configuring-aws-s3_installing-oadp-aws[Configuring Amazon Web Services]
+* xref:../../../backup_and_restore/application_backup_and_restore/installing/oadp-using-data-mover-for-csi-snapshots-doc.adoc#oadp-using-data-mover-for-csi-snapshots-doc[Using Data Mover for CSI snapshots]
+* xref:../../../operators/admin/olm-upgrading-operators.adoc#olm-changing-update-channel_olm-upgrading-operators[Updating installed Operators]
+
+include::modules/oadp-converting-to-new-dpa-1-2-0.adoc[leveloffset=+3]
+include::modules/oadp-verifying-upgrade-1-2-0.adoc[leveloffset=+3]
diff --git a/backup_and_restore/application_backup_and_restore/release-notes/oadp-release-notes-1-3.adoc b/backup_and_restore/application_backup_and_restore/release-notes/oadp-release-notes-1-3.adoc
new file mode 100644
index 000000000000..0f48f8ba3b96
--- /dev/null
+++ b/backup_and_restore/application_backup_and_restore/release-notes/oadp-release-notes-1-3.adoc
@@ -0,0 +1,21 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="oadp-release-notes"]
+= OADP 1.3 release notes
+include::_attributes/common-attributes.adoc[]
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: oadp-release-notes
+
+toc::[]
+
+The release notes for OpenShift API for Data Protection (OADP) 1.3 describe new features and enhancements, deprecated features, product recommendations, known issues, and resolved issues.
+
+include::modules/oadp-release-notes-1-3-0.adoc[leveloffset=+1]
+include::modules/oadp-upgrade-from-oadp-data-mover-1-2-0.adoc[leveloffset=+3]
+include::modules/oadp-backing-up-dpa-configuration-1-3-0.adoc[leveloffset=+3]
+include::modules/oadp-upgrading-oadp-operator-1-3-0.adoc[leveloffset=+3]
+[role="_additional-resources"]
+.Additional resources
+* xref:../../../operators/admin/olm-upgrading-operators.adoc#olm-changing-update-channel_olm-upgrading-operators[Updating installed Operators]
+
+include::modules/oadp-converting-dpa-to-new-version-1-3-0.adoc[leveloffset=+3]
+include::modules/oadp-verifying-upgrade-1-3-0.adoc[leveloffset=+3]
diff --git a/virt/virtual_machines/importing_vms/snippets b/backup_and_restore/application_backup_and_restore/release-notes/snippets
similarity index 100%
rename from virt/virtual_machines/importing_vms/snippets
rename to backup_and_restore/application_backup_and_restore/release-notes/snippets
diff --git a/backup_and_restore/application_backup_and_restore/troubleshooting.adoc b/backup_and_restore/application_backup_and_restore/troubleshooting.adoc
index 41b79da4646a..5b533fc03215 100644
--- a/backup_and_restore/application_backup_and_restore/troubleshooting.adoc
+++ b/backup_and_restore/application_backup_and_restore/troubleshooting.adoc
@@ -1,7 +1,8 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="troubleshooting"]
= Troubleshooting
include::_attributes/common-attributes.adoc[]
+include::_attributes/attributes-openshift-dedicated.adoc[]
:context: oadp-troubleshooting
:namespace: openshift-adp
:local-product: OADP
@@ -13,7 +14,7 @@ You can debug Velero custom resources (CRs) by using the xref:../../backup_and_r
You can check xref:../../backup_and_restore/application_backup_and_restore/troubleshooting.adoc#oadp-installation-issues_oadp-troubleshooting[installation issues], xref:../../backup_and_restore/application_backup_and_restore/troubleshooting.adoc#oadp-backup-restore-cr-issues_oadp-troubleshooting[backup and restore CR issues], and xref:../../backup_and_restore/application_backup_and_restore/troubleshooting.adoc#oadp-restic-issues_oadp-troubleshooting[Restic issues].
-You can collect logs, CR information, and Prometheus metric data by using the xref:../../backup_and_restore/application_backup_and_restore/troubleshooting.adoc#migration-using-must-gather_oadp-troubleshooting[`must-gather` tool].
+You can collect logs and CR information by using the xref:../../backup_and_restore/application_backup_and_restore/troubleshooting.adoc#migration-using-must-gather_oadp-troubleshooting[`must-gather` tool].
You can obtain the Velero CLI tool by:
@@ -21,6 +22,7 @@ You can obtain the Velero CLI tool by:
* Accessing the Velero binary in the Velero deployment in the cluster
include::modules/velero-obtaining-by-downloading.adoc[leveloffset=+1]
+include::modules/velero-oadp-version-relationship.adoc[leveloffset=+2]
include::modules/velero-obtaining-by-accessing-binary.adoc[leveloffset=+1]
include::modules/oadp-debugging-oc-cli.adoc[leveloffset=+1]
@@ -32,6 +34,9 @@ include::modules/migration-debugging-velero-resources.adoc[leveloffset=+1]
== Pods crash or restart due to lack of memory or CPU
If a Velero or Restic pod crashes due to a lack of memory or CPU, you can set specific resource requests for either of those resources.
+[role="_additional-resources"]
+.Additional resources
+* xref:../../backup_and_restore/application_backup_and_restore/installing/about-installing-oadp.adoc#oadp-velero-cpu-memory-requirements_about-installing-oadp[CPU and memory requirements]
include::modules/oadp-pod-crash-set-resource-request-velero.adoc[leveloffset=+2]
include::modules/oadp-pod-crash-set-resource-request-restic.adoc[leveloffset=+2]
@@ -65,6 +70,7 @@ This section describes the additional steps required to restore resources for se
include::modules/migration-debugging-velero-admission-webhooks-knative.adoc[leveloffset=+3]
include::modules/migration-debugging-velero-admission-webhooks-ibm-appconnect.adoc[leveloffset=+3]
+include::modules/oadp-plugins-receiving-eof-message.adoc[leveloffset=+2]
[role="_additional-resources"]
.Additional resources
@@ -74,9 +80,34 @@ include::modules/migration-debugging-velero-admission-webhooks-ibm-appconnect.ad
* xref:../../architecture/admission-plug-ins.adoc#admission-webhook-types_admission-plug-ins[Types of webhook admission plugins]
include::modules/oadp-installation-issues.adoc[leveloffset=+1]
+include::modules/oadp-operator-issues.adoc[leveloffset=+1]
+include::modules/oadp-timeouts.adoc[leveloffset=+1]
+include::modules/oadp-restic-timeouts.adoc[leveloffset=+2]
+include::modules/oadp-velero-timeouts.adoc[leveloffset=+2]
+include::modules/oadp-datamover-timeouts.adoc[leveloffset=+2]
+include::modules/oadp-csi-snapshot-timeouts.adoc[leveloffset=+2]
+include::modules/oadp-velero-default-timeouts.adoc[leveloffset=+2]
+include::modules/oadp-item-restore-timeouts.adoc[leveloffset=+2]
+include::modules/oadp-item-backup-timeouts.adoc[leveloffset=+2]
include::modules/oadp-backup-restore-cr-issues.adoc[leveloffset=+1]
include::modules/oadp-restic-issues.adoc[leveloffset=+1]
+include::modules/oadp-restic-restore-failing-psa-policy.adoc[leveloffset=+2]
include::modules/migration-using-must-gather.adoc[leveloffset=+1]
+include::modules/migration-combining-must-gather.adoc[leveloffset=+2]
+include::modules/oadp-monitoring.adoc[leveloffset=+1]
+[role="_additional-resources"]
+.Additional resources
+* xref:../../monitoring/monitoring-overview.adoc#about-openshift-monitoring[Monitoring stack]
+
+include::modules/oadp-monitoring-setup.adoc[leveloffset=+2]
+include::modules/oadp-creating-service-monitor.adoc[leveloffset=+2]
+include::modules/oadp-creating-alerting-rule.adoc[leveloffset=+2]
+[role="_additional-resources"]
+.Additional resources
+* xref:../../monitoring/managing-alerts.adoc#managing-alerts[Managing alerts]
+
+include::modules/oadp-list-of-metrics.adoc[leveloffset=+2]
+include::modules/oadp-viewing-metrics-ui.adoc[leveloffset=+2]
:!oadp-troubleshooting:
diff --git a/backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc b/backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc
index a8bc6819d7a5..09754659df3e 100644
--- a/backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc
+++ b/backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="backup-etcd"]
= Backing up etcd
include::_attributes/common-attributes.adoc[]
@@ -26,3 +26,6 @@ include::modules/backup-etcd.adoc[leveloffset=+1]
[id="additional-resources_backup-etcd"]
== Additional resources
* xref:../../hosted_control_planes/hcp-backup-restore-dr.adoc#hcp-backup-restore[Backing up and restoring etcd on a hosted cluster]
+
+// Creating automated etcd backups
+include::modules/etcd-creating-automated-backups.adoc[leveloffset=+1]
diff --git a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/about-disaster-recovery.adoc b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/about-disaster-recovery.adoc
index 38baebe6e0c6..28136968142d 100644
--- a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/about-disaster-recovery.adoc
+++ b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/about-disaster-recovery.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="about-dr"]
= About disaster recovery
include::_attributes/common-attributes.adoc[]
diff --git a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc
index 4400fc6492a2..32bca8c77fd6 100644
--- a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc
+++ b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="dr-restoring-cluster-state"]
= Restoring to a previous cluster state
include::_attributes/common-attributes.adoc[]
@@ -20,7 +20,7 @@ include::modules/dr-restoring-cluster-state.adoc[leveloffset=+1]
* xref:../../../installing/installing_bare_metal/installing-bare-metal.adoc#installing-bare-metal[Installing a user-provisioned cluster on bare metal]
* xref:../../../networking/accessing-hosts.adoc#accessing-hosts[Creating a bastion host to access {product-title} instances and the control plane nodes with SSH]
-* xref:../../../installing/installing_bare_metal_ipi/ipi-install-expanding-the-cluster.adoc#replacing-a-bare-metal-control-plane-node_ipi-install-expanding[Replacing a bare-metal control plane node]
+* xref:../../../installing/installing_bare_metal_ipi/ipi-install-expanding-the-cluster.adoc#replacing-a-bare-metal-control-plane-node_ipi-install-expanding[Replacing a bare-metal control plane node]
include::modules/dr-scenario-cluster-state-issues.adoc[leveloffset=+1]
diff --git a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc
index a15d6765d198..907c61922af3 100644
--- a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc
+++ b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="dr-recovering-expired-certs"]
= Recovering from expired control plane certificates
include::_attributes/common-attributes.adoc[]
diff --git a/backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc b/backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc
index 5ad22219dc1b..95c74001add0 100644
--- a/backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc
+++ b/backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="replacing-unhealthy-etcd-member"]
= Replacing an unhealthy etcd member
include::_attributes/common-attributes.adoc[]
diff --git a/backup_and_restore/graceful-cluster-restart.adoc b/backup_and_restore/graceful-cluster-restart.adoc
index da115c5bf6e8..e11d5f5e93ee 100644
--- a/backup_and_restore/graceful-cluster-restart.adoc
+++ b/backup_and_restore/graceful-cluster-restart.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="graceful-restart-cluster"]
= Restarting the cluster gracefully
include::_attributes/common-attributes.adoc[]
diff --git a/backup_and_restore/graceful-cluster-shutdown.adoc b/backup_and_restore/graceful-cluster-shutdown.adoc
index d5fc8860f78d..9425a2270607 100644
--- a/backup_and_restore/graceful-cluster-shutdown.adoc
+++ b/backup_and_restore/graceful-cluster-shutdown.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="graceful-shutdown-cluster"]
= Shutting down the cluster gracefully
include::_attributes/common-attributes.adoc[]
@@ -11,6 +11,19 @@ This document describes the process to gracefully shut down your cluster. You mi
== Prerequisites
* Take an xref:../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backing-up-etcd-data_backup-etcd[etcd backup] prior to shutting down the cluster.
++
+[IMPORTANT]
+====
+It is important to take an etcd backup before performing this procedure so that your cluster can be restored if you encounter any issues when restarting the cluster.
+
+For example, the following conditions can cause the restarted cluster to malfunction:
+
+* etcd data corruption during shutdown
+* Node failure due to hardware
+* Network connectivity issues
+
+If your cluster fails to recover, follow the steps to xref:../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[restore to a previous cluster state].
+====
// Shutting down the cluster
include::modules/graceful-shutdown.adoc[leveloffset=+1]
@@ -20,5 +33,3 @@ include::modules/graceful-shutdown.adoc[leveloffset=+1]
== Additional resources
* xref:../backup_and_restore/graceful-cluster-restart.adoc#graceful-restart-cluster[Restarting the cluster gracefully]
-
-* xref:../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[Restore to a previous cluster state]
\ No newline at end of file
diff --git a/backup_and_restore/index.adoc b/backup_and_restore/index.adoc
index 4784c633da6d..5bd4be21a632 100644
--- a/backup_and_restore/index.adoc
+++ b/backup_and_restore/index.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="backup-restore-overview"]
= Backup and restore
include::_attributes/common-attributes.adoc[]
@@ -23,7 +23,7 @@ A cluster's certificates expire one year after the installation date. You can sh
You might run into several situations where {product-title} does not work as expected, such as:
-* You have a cluster that is not functional after the restart because of unexpected conditions, such as node failure, or network connectivity issues.
+* You have a cluster that is not functional after the restart because of unexpected conditions, such as node failure or network connectivity issues.
* You have deleted something critical in the cluster by mistake.
* You have lost the majority of your control plane hosts, leading to etcd quorum loss.
@@ -74,12 +74,15 @@ If you do not want to back up PVs by using snapshots, you can use link:https://r
[id="backing-up-and-restoring-applications"]
=== Backing up and restoring applications
-You back up applications by creating a `Backup` custom resource (CR). See xref:../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-creating-backup-cr_backing-up-applications[Creating a Backup CR].You can configure the following backup options:
+You back up applications by creating a `Backup` custom resource (CR). See xref:../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-creating-backup-cr.adoc#backing-up-applications[Creating a Backup CR]. You can configure the following backup options:
-* xref:../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-creating-backup-hooks_backing-up-applications[Backup hooks] to run commands before or after the backup operation
-* xref:../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-scheduling-backups_backing-up-applications[Scheduled backups]
-* xref:../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-backing-up-applications-restic_backing-up-applications[Restic backups]
+* xref:../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-creating-backup-hooks-doc.adoc#backing-up-applications[Creating backup hooks] to run commands before or after the backup operation
-You restore application backups by creating a `Restore` (CR). See xref:../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc#oadp-creating-restore-cr_restoring-applications[Creating a Restore CR]. You can configure xref:../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc#oadp-creating-restore-hooks_restoring-applications[restore hooks] to run commands in init containers or in the application container during the restore operation.
+* xref:../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-scheduling-backups-doc.adoc#backing-up-applications[Scheduling backups]
+
+* xref:../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-backing-up-applications-restic-doc.adoc#backing-up-applications[Backing up applications with File System Backup: Kopia or Restic]
+
+* You restore application backups by creating a `Restore` (CR). See xref:../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc#oadp-creating-restore-cr_restoring-applications[Creating a Restore CR].
+* You can configure xref:../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications#oadp-creating-restore-hooks_restoring-applications[restore hooks] to run commands in init containers or in the application container during the restore operation.
:backup-restore-overview!:
diff --git a/build_for_portal.py b/build_for_portal.py
index e33a6b58fe71..d4a2f5359098 100644
--- a/build_for_portal.py
+++ b/build_for_portal.py
@@ -46,7 +46,6 @@
)
CMP_IGNORE_FILES = [".git", ".gitignore", "README.md", "build.cfg"]
DEVNULL = open(os.devnull, "wb")
-LIST_OF_HUGE_BOOKS: list = ["Installing", "API reference"]
MASTER_FILE_BASE = "= {title}\n\
:product-author: {product-author}\n\
@@ -269,7 +268,37 @@ def ensure_directory(directory):
Creates DIRECTORY if it does not exist.
"""
if not os.path.exists(directory):
- os.mkdir(directory)
+ os.makedirs(directory)
+
+def expand_huge_books(info):
+ """
+ Finds nodes for huge books, creates new nodes for books from their top-level topics,
+ and then removes the nodes for huge books
+ """
+
+ # find all the huge books, sa defined by nodes
+ huge_book_nodes = [book for book in info["book_nodes"]
+ if os.path.exists(os.path.join(info["src_dir"],book["Dir"],"hugeBook.flag")) ]
+
+
+ for book in huge_book_nodes:
+ # save the directory in info
+ huge_book_dir = book["Dir"]
+ info["huge_book_dirs"].append(huge_book_dir)
+ # create the flag file in the book destination directory
+ book_dest_dir = os.path.join(info["dest_dir"], book["Dir"])
+ ensure_directory(book_dest_dir)
+ with open(os.path.join(book_dest_dir,"hugeBook.flag"),"w") as fi:
+ fi.write("hugebook")
+ # make new book nodes for the second-level headings
+ for topic in book["Topics"]:
+ if "Dir" in topic.keys():
+ info["book_nodes"].append(topic)
+ topic["Dir"] = huge_book_dir + "/" + topic["Dir"]
+
+ # remove book nodes for huge books
+ for node_to_remove in huge_book_nodes:
+ info["book_nodes"].remove(node_to_remove)
def build_master_files(info):
@@ -277,11 +306,15 @@ def build_master_files(info):
Builds the master.adoc and docinfo.xml files for each guide specified in the config.
"""
+ # change the huge books into sub-books
+ expand_huge_books(info)
+
# TODO: Refactor. This does too much.
dest_dir = info["dest_dir"]
all_in_one = info["all_in_one"]
all_in_one_text = ""
+
for book in info["book_nodes"]:
book_dest_dir = os.path.join(dest_dir, book["Dir"])
@@ -328,40 +361,6 @@ def build_master_files(info):
info["preface-title"] = ":preface-title: " + preface_title
all_in_one_text += master
- if book["Name"] in LIST_OF_HUGE_BOOKS:
- huge_book_topics = book["Topics"]
-
- for topic in huge_book_topics:
- if "Dir" in topic.keys():
- topic_master_file = os.path.join(
- book_dest_dir, topic["Dir"], "master.adoc"
- )
- topic_docinfo_file = os.path.join(
- book_dest_dir, topic["Dir"], "docinfo.xml"
- )
-
- # TODO: Make less hacky.
- book_info["title"] = topic["Name"]
- info["title"] = topic["Name"]
-
- master_base = MASTER_FILE_BASE.format(**book_info)
- docinfo_node = topic["Name"]
-
- ensure_directory(os.path.join(book_dest_dir, topic["Dir"]))
- sub_master = generate_master_entry(
- topic,
- topic["Dir"],
- info["distro"],
- all_in_one,
- all_in_one=all_in_one,
- )
-
- log.debug("Writing " + topic_master_file)
- with open(topic_master_file, "w") as f:
- f.write(master_base + sub_master)
- log.debug("Writing " + topic_docinfo_file)
- with open(topic_docinfo_file, "w") as f:
- f.write(DOCINFO_BASE.format(**info))
# TODO: And is this ever used?
if all_in_one:
master_file = os.path.join(dest_dir, "master.adoc")
@@ -515,6 +514,7 @@ def copy_file(
Copies a source file to destination, making sure to scrub the content, add id's where the content is referenced elsewhere and fix any
links that should be cross references. Also copies any includes that are referenced, since they aren't included in _build_cfg.yml.
"""
+
# It's possible that the file might have been created by another include, if so then just return
if os.path.isfile(dest_file):
return
@@ -544,15 +544,17 @@ def copy_file(
key, value = re.split("\s*=\s*", meta, 2)
include_vars[key] = value
+
# Determine the include src/dest paths
include_file = os.path.join(os.path.dirname(book_src_dir), include_path)
relative_path = os.path.relpath(include_file, os.path.dirname(src_file))
# If the path is in another book, copy it into this one
relative_book_path = os.path.relpath(include_file, book_src_dir)
+
if relative_book_path.startswith("../"):
- path, src_book_name = os.path.split(book_src_dir)
- dest_include_dir = os.path.join(dest_dir, src_book_name, "includes")
+ src_book_relative_dir = os.path.relpath(book_src_dir,info["src_dir"])
+ dest_include_dir = os.path.join(dest_dir, src_book_relative_dir, "includes")
relative_path = os.path.join(
os.path.relpath(dest_include_dir, parent_dir),
os.path.basename(include_file),
@@ -720,7 +722,7 @@ def fix_links(content, info, book_src_dir, src_file, tag=None, cwd=None):
Fix any links that were done incorrectly and reference the output instead of the source content.
"""
if info["all_in_one"]:
- content = fix_links(content, info["src_dir"], src_file, info)
+ content = _fix_links(content, info["src_dir"], src_file, info)
else:
# Determine if the tag should be passed when fixing the links. If it's in the same book, then process the entire file. If it's
# outside the book then don't process it.
@@ -733,11 +735,27 @@ def fix_links(content, info, book_src_dir, src_file, tag=None, cwd=None):
return content
+def dir_to_book_name(dir,src_file,info):
+ # find a book name by the directory
+ for book in info["book_nodes"]:
+ if book["Dir"] == dir:
+ return(book["Name"])
+ break
+
+ has_errors = True
+ log.error(
+ 'ERROR (%s): book not found for the directory %s',
+ src_file,
+ dir)
+ return(dir)
+
def _fix_links(content, book_dir, src_file, info, tag=None, cwd=None):
"""
Fix any links that were done incorrectly and reference the output instead of the source content.
"""
+ current_book_name = dir_to_book_name(os.path.relpath(book_dir,info["src_dir"]),src_file,info)
+
# TODO Deal with xref so that they keep the proper path. Atm it'll just strip the path and leave only the id
file_to_id_map = info["file_to_id_map"]
current_dir = cwd or os.path.dirname(src_file)
@@ -750,6 +768,14 @@ def _fix_links(content, book_dir, src_file, info, tag=None, cwd=None):
link_anchor = link.group(2)
link_title = link.group(3)
+ # sanity check - is this a link to an external site?
+ # apparently the link macro CAN be used for internal links too, so just testing for http
+ # NOTE: a docs.openshift.com link would not process here corectly, anyway, so let it pass through
+ if ("http:" in link_text) or ("https:" in link_text):
+ continue
+
+ fixed_link = "" # setting the scope of fixed_link outside the if statements
+
if link_file is not None:
fixed_link_file = link_file.replace(".html", ".adoc")
fixed_link_file_abs = os.path.abspath(
@@ -757,32 +783,41 @@ def _fix_links(content, book_dir, src_file, info, tag=None, cwd=None):
)
if fixed_link_file_abs in file_to_id_map:
- # We are dealing with a cross reference to another book here
- external_link = EXTERNAL_LINK_RE.search(link_file)
- book_dir_name = external_link.group(1)
+ # We are dealing with a cross reference to a book here
+ full_relative_path = os.path.relpath(fixed_link_file_abs,info["src_dir"])
+
+ if full_relative_path[:2]=="..":
+ log.error(
+ 'ERROR (%s): link pointing outside source directory? %s',
+ src_file,
+ link_file)
+ continue
+ split_relative_path = full_relative_path.split("/")
+ book_dir_name = split_relative_path[0]
+ if book_dir_name in info["huge_book_dirs"]:
+ book_dir_name = split_relative_path[0]+"/"+split_relative_path[1]
# Find the book name
- book_name = book_dir_name
- for book in info["data"]:
- if (
- check_node_distro_matches(book, info["distro"])
- and book["Dir"] == book_dir_name
- ):
- book_name = book["Name"]
- break
-
- fixed_link_file = BASE_PORTAL_URL + build_portal_url(info, book_name)
-
- if link_anchor is None:
- fixed_link = (
- "link:"
- + fixed_link_file
- + "#"
- + file_to_id_map[fixed_link_file_abs]
- + link_title
- )
+ book_name = dir_to_book_name(book_dir_name,src_file,info)
+
+
+ if book_name==current_book_name:
+ if link_anchor is None:
+ fixed_link = "xref:" + file_to_id_map[fixed_link_file_abs] + link_title
+ else:
+ fixed_link = "xref:" + link_anchor.replace("#", "") + link_title
else:
- fixed_link = "link:" + fixed_link_file + link_anchor + link_title
+ fixed_link_file = BASE_PORTAL_URL + build_portal_url(info, book_name)
+ if link_anchor is None:
+ fixed_link = (
+ "link:"
+ + fixed_link_file
+ + "#"
+ + file_to_id_map[fixed_link_file_abs]
+ + link_title
+ )
+ else:
+ fixed_link = "link:" + fixed_link_file + link_anchor + link_title
else:
# Cross reference or link that isn't in the docs suite
fixed_link = link_text
@@ -1132,6 +1167,7 @@ def main():
"all_in_one": args.all_in_one,
"preface-title": "",
"upstream_branch": args.upstream_branch,
+ "huge_book_dirs": []
}
# Build the master files
diff --git a/cicd/builds/advanced-build-operations.adoc b/cicd/builds/advanced-build-operations.adoc
index c8a9279320d0..3a26aedd3944 100644
--- a/cicd/builds/advanced-build-operations.adoc
+++ b/cicd/builds/advanced-build-operations.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="advanced-build-operations"]
= Performing advanced builds
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/builds/basic-build-operations.adoc b/cicd/builds/basic-build-operations.adoc
index 5e63cd49638b..ebdb8b93538a 100644
--- a/cicd/builds/basic-build-operations.adoc
+++ b/cicd/builds/basic-build-operations.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="basic-build-operations"]
= Performing and configuring basic builds
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/builds/build-configuration.adoc b/cicd/builds/build-configuration.adoc
index a73773137d95..5ffc4ae73652 100644
--- a/cicd/builds/build-configuration.adoc
+++ b/cicd/builds/build-configuration.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="build-configuration"]
= Build configuration resources
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/builds/build-strategies.adoc b/cicd/builds/build-strategies.adoc
index 3bc1fd41cb51..2b66d1fd45ce 100644
--- a/cicd/builds/build-strategies.adoc
+++ b/cicd/builds/build-strategies.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="build-strategies"]
= Using build strategies
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/builds/creating-build-inputs.adoc b/cicd/builds/creating-build-inputs.adoc
index 32354cd232c9..14a54d93b055 100644
--- a/cicd/builds/creating-build-inputs.adoc
+++ b/cicd/builds/creating-build-inputs.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="creating-build-inputs"]
= Creating build inputs
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/builds/custom-builds-buildah.adoc b/cicd/builds/custom-builds-buildah.adoc
index 9ea928151cb0..56eb739a0908 100644
--- a/cicd/builds/custom-builds-buildah.adoc
+++ b/cicd/builds/custom-builds-buildah.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="custom-builds-buildah"]
= Custom image builds with Buildah
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/builds/managing-build-output.adoc b/cicd/builds/managing-build-output.adoc
index 1378cd27f6e5..486a103c3dc9 100644
--- a/cicd/builds/managing-build-output.adoc
+++ b/cicd/builds/managing-build-output.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="managing-build-output"]
= Managing build output
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/builds/running-entitled-builds.adoc b/cicd/builds/running-entitled-builds.adoc
index 4eef8f5985dc..6b38fed5181e 100644
--- a/cicd/builds/running-entitled-builds.adoc
+++ b/cicd/builds/running-entitled-builds.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="running-entitled-builds"]
= Using Red Hat subscriptions in builds
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/builds/securing-builds-by-strategy.adoc b/cicd/builds/securing-builds-by-strategy.adoc
index 9809d2327602..74cb3602327c 100644
--- a/cicd/builds/securing-builds-by-strategy.adoc
+++ b/cicd/builds/securing-builds-by-strategy.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="securing-builds-by-strategy"]
= Securing builds by strategy
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/builds/setting-up-trusted-ca.adoc b/cicd/builds/setting-up-trusted-ca.adoc
index 6adc4e59cb6e..de6612efd9a8 100644
--- a/cicd/builds/setting-up-trusted-ca.adoc
+++ b/cicd/builds/setting-up-trusted-ca.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="setting-up-trusted-ca"]
= Setting up additional trusted certificate authorities for builds
ifndef::openshift-dedicated,openshift-rosa[]
diff --git a/cicd/builds/triggering-builds-build-hooks.adoc b/cicd/builds/triggering-builds-build-hooks.adoc
index adca76892240..123549e06b44 100644
--- a/cicd/builds/triggering-builds-build-hooks.adoc
+++ b/cicd/builds/triggering-builds-build-hooks.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="triggering-builds-build-hooks"]
= Triggering and modifying builds
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/builds/troubleshooting-builds.adoc b/cicd/builds/troubleshooting-builds.adoc
index 92cd14bfd15d..ba5bbfc23a38 100644
--- a/cicd/builds/troubleshooting-builds.adoc
+++ b/cicd/builds/troubleshooting-builds.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="troubleshooting-builds_{context}"]
= Troubleshooting builds
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/builds/understanding-buildconfigs.adoc b/cicd/builds/understanding-buildconfigs.adoc
index bf87540e52ac..498c74fb9f15 100644
--- a/cicd/builds/understanding-buildconfigs.adoc
+++ b/cicd/builds/understanding-buildconfigs.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="understanding-buildconfigs"]
= Understanding build configurations
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/builds/understanding-image-builds.adoc b/cicd/builds/understanding-image-builds.adoc
index 6483bac9c0b7..b2875d6937d3 100644
--- a/cicd/builds/understanding-image-builds.adoc
+++ b/cicd/builds/understanding-image-builds.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="understanding-image-builds"]
= Understanding image builds
include::_attributes/common-attributes.adoc[]
diff --git a/distr_tracing/distr_tracing_install/_attributes b/cicd/builds_using_shipwright/_attributes
similarity index 100%
rename from distr_tracing/distr_tracing_install/_attributes
rename to cicd/builds_using_shipwright/_attributes
diff --git a/logging/v5_5/images b/cicd/builds_using_shipwright/images
similarity index 100%
rename from logging/v5_5/images
rename to cicd/builds_using_shipwright/images
diff --git a/logging/v5_6/modules b/cicd/builds_using_shipwright/modules
similarity index 100%
rename from logging/v5_6/modules
rename to cicd/builds_using_shipwright/modules
diff --git a/cicd/builds_using_shipwright/overview-openshift-builds.adoc b/cicd/builds_using_shipwright/overview-openshift-builds.adoc
new file mode 100644
index 000000000000..a874af3d3952
--- /dev/null
+++ b/cicd/builds_using_shipwright/overview-openshift-builds.adoc
@@ -0,0 +1,26 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="overview-openshift-builds"]
+= Overview of Builds
+:context: overview-openshift-builds
+include::_attributes/common-attributes.adoc[]
+
+toc::[]
+
+
+Builds is an extensible build framework based on the link:https://shipwright.io/[Shipwright project], which you can use to build container images on an {product-title} cluster. You can build container images from source code and Dockerfiles by using image build tools, such as Source-to-Image (S2I) and Buildah. You can create and apply build resources, view logs of build runs, and manage builds in your {product-title} namespaces.
+
+Builds includes the following capabilities:
+
+* Standard Kubernetes-native API for building container images from source code and Dockerfiles
+* Support for Source-to-Image (S2I) and Buildah build strategies
+* Extensibility with your own custom build strategies
+* Execution of builds from source code in a local directory
+* Shipwright CLI for creating and viewing logs, and managing builds on the cluster
+* Integrated user experience with the *Developer* perspective of the {product-title} web console
+
+
+Builds consists of the following custom resources (CRs):
+
+* `Build`
+* `BuildStrategy` and `ClusterBuildStrategy`
+* `BuildRun`
diff --git a/distr_tracing/distr_tracing_config/snippets b/cicd/builds_using_shipwright/snippets
similarity index 100%
rename from distr_tracing/distr_tracing_config/snippets
rename to cicd/builds_using_shipwright/snippets
diff --git a/cicd/gitops/about-redhat-openshift-gitops.adoc b/cicd/gitops/about-redhat-openshift-gitops.adoc
new file mode 100644
index 000000000000..799de0f2d9c3
--- /dev/null
+++ b/cicd/gitops/about-redhat-openshift-gitops.adoc
@@ -0,0 +1,54 @@
+:_mod-docs-content-type: ASSEMBLY
+include::_attributes/common-attributes.adoc[]
+[id="about-redhat-openshift-gitops"]
+= About {gitops-title}
+:context: about-redhat-openshift-gitops
+
+toc::[]
+
+{gitops-title} is an Operator that uses Argo CD as the declarative GitOps engine. It enables GitOps workflows across multicluster OpenShift and Kubernetes infrastructure. Using {gitops-title}, administrators can consistently configure and deploy Kubernetes-based infrastructure and applications across clusters and development lifecycles. {gitops-title} is based on the open source project link:https://argoproj.github.io/cd/[Argo CD] and provides a similar set of features to what the upstream offers, with additional automation, integration into Red Hat {OCP} and the benefits of Red Hat’s enterprise support, quality assurance and focus on enterprise security.
+
+[NOTE]
+====
+Because {gitops-title} releases on a different cadence from {OCP}, the {gitops-title} documentation is now available as separate documentation sets for each minor version of the product.
+
+The {gitops-title} documentation is available at link:https://docs.openshift.com/gitops/[].
+
+Documentation for specific versions is available using the version selector dropdown, or directly by adding the version to the URL, for example, link:https://docs.openshift.com/gitops/1.8[].
+
+In addition, the {gitops-title} documentation is also available on the Red Hat Portal at https://access.redhat.com/documentation/en-us/red_hat_openshift_gitops/[].
+
+For additional information about the {gitops-title} life cycle and supported platforms, refer to the link:https://access.redhat.com/support/policy/updates/openshift#gitops[Platform Life Cycle Policy].
+====
+
+{gitops-title} ensures consistency in applications when you deploy them to different clusters in different environments, such as: development, staging, and production. {gitops-title} organizes the deployment process around the configuration repositories and makes them the central element. It always has at least two repositories:
+
+ . Application repository with the source code
+ . Environment configuration repository that defines the desired state of the application
+
+These repositories contain a declarative description of the infrastructure you need in your specified environment. They also contain an automated process to make your environment match the described state.
+
+{gitops-title} uses Argo CD to maintain cluster resources. Argo CD is an open-source declarative tool for the continuous integration and continuous deployment (CI/CD) of applications. {gitops-title} implements Argo CD as a controller so that it continuously monitors application definitions and configurations defined in a Git repository. Then, Argo CD compares the specified state of these configurations with their live state on the cluster.
+
+Argo CD reports any configurations that deviate from their specified state. These reports allow administrators to automatically or manually resync configurations to the defined state. Therefore, Argo CD enables you to deliver global custom resources, like the resources that are used to configure {OCP} clusters.
+
+[id="key-features"]
+== Key features
+
+{gitops-title} helps you automate the following tasks:
+
+* Ensure that the clusters have similar states for configuration, monitoring, and storage
+* Apply or revert configuration changes to multiple {OCP} clusters
+* Associate templated configuration with different environments
+* Promote applications across clusters, from staging to production
+
+// add something about CLI tools?
+
+
+[id="additional-resources_about-op-gitops"]
+[role="_additional-resources"]
+== Additional resources
+
+* link:https://docs.openshift.com/container-platform/latest/operators/understanding/crds/crd-extending-api-with-crds.html#crd-extending-api-with-crds[Extending the Kubernetes API with custom resource definitions]
+* link:https://docs.openshift.com/container-platform/latest/operators/understanding/crds/crd-managing-resources-from-crds.html#crd-managing-resources-from-crds[Managing resources from custom resource definitions]
+* link:https://docs.openshift.com/gitops/latest/understanding_openshift_gitops/what-is-gitops.html#what-is-gitops[What is GitOps?]
diff --git a/cicd/gitops/about-sizing-requirements-gitops.adoc b/cicd/gitops/about-sizing-requirements-gitops.adoc
index 1907bf67f9b4..97833df23946 100644
--- a/cicd/gitops/about-sizing-requirements-gitops.adoc
+++ b/cicd/gitops/about-sizing-requirements-gitops.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="about-sizing-requirements-gitops"]
= Sizing requirements for GitOps Operator
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/gitops/argo-cd-custom-resource-properties.adoc b/cicd/gitops/argo-cd-custom-resource-properties.adoc
index ad6e49faa176..bb0a41497c79 100644
--- a/cicd/gitops/argo-cd-custom-resource-properties.adoc
+++ b/cicd/gitops/argo-cd-custom-resource-properties.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="argo-cd-custom-resource-properties"]
= Argo CD Operator
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/gitops/collecting-debugging-data-for-support.adoc b/cicd/gitops/collecting-debugging-data-for-support.adoc
index c2db643ed117..b9c38514d0bf 100644
--- a/cicd/gitops/collecting-debugging-data-for-support.adoc
+++ b/cicd/gitops/collecting-debugging-data-for-support.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="collecting-debugging-data-for-support"]
= Collecting debugging data for a support case
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/gitops/configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations.adoc b/cicd/gitops/configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations.adoc
index 05240fc23661..85a435274af4 100644
--- a/cicd/gitops/configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations.adoc
+++ b/cicd/gitops/configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations"]
= Configuring an OpenShift cluster by deploying an application with cluster configurations
include::_attributes/common-attributes.adoc[]
@@ -10,8 +10,8 @@ With {gitops-title}, you can configure Argo CD to recursively sync the content o
.Prerequisites
-* You have logged in to the `product-title` cluster as an administrator.
-* You have installed the `gitops-title` Operator in your cluster.
+* You have logged in to the {product-title} cluster as an administrator.
+* You have installed the {gitops-title} Operator in your cluster.
* You have logged into Argo CD instance.
include::modules/gitops-using-argo-cd-instance-to-manage-cluster-scoped-resources.adoc[leveloffset=+1]
diff --git a/cicd/gitops/configuring-argo-cd-rbac.adoc b/cicd/gitops/configuring-argo-cd-rbac.adoc
index 00b6c827d7b5..87df5230a0cc 100644
--- a/cicd/gitops/configuring-argo-cd-rbac.adoc
+++ b/cicd/gitops/configuring-argo-cd-rbac.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="configuring-argo-cd-rbac"]
= Configuring Argo CD RBAC
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/gitops/configuring-resource-quota.adoc b/cicd/gitops/configuring-resource-quota.adoc
index 937f1fb3c44a..af2b02ac2226 100644
--- a/cicd/gitops/configuring-resource-quota.adoc
+++ b/cicd/gitops/configuring-resource-quota.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="configuring-resource-quota"]
= Configuring resource quota or requests
include::_attributes/common-attributes.adoc[]
@@ -7,7 +7,7 @@ include::_attributes/common-attributes.adoc[]
toc::[]
[role="_abstract"]
-With the Argo CD Custom Resource, you can create, update, and delete resource requests and limits for Argo CD workloads.
+With the Argo CD Custom Resource, you can create, update, and delete resource requests and limits for Argo CD workloads.
include::modules/configure-workloads.adoc[leveloffset=+1]
include::modules/patch-argocd-instance.adoc[leveloffset=+1]
diff --git a/cicd/gitops/configuring-secure-communication-with-redis.adoc b/cicd/gitops/configuring-secure-communication-with-redis.adoc
new file mode 100644
index 000000000000..192d71b7ece5
--- /dev/null
+++ b/cicd/gitops/configuring-secure-communication-with-redis.adoc
@@ -0,0 +1,27 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="configuring-secure-communication-with-redis"]
+= Configuring secure communication with Redis
+include::_attributes/common-attributes.adoc[]
+:context: configuring-secure-communication-with-redis
+
+toc::[]
+
+Using the Transport Layer Security (TLS) encryption with {gitops-title}, you can secure the communication between the Argo CD components and Redis cache and protect the possibly sensitive data in transit.
+
+You can secure communication with Redis by using one of the following configurations:
+
+* Enable the `autotls` setting to issue an appropriate certificate for TLS encryption.
+* Manually configure the TLS encryption by creating the `argocd-operator-redis-tls` secret with a key and certificate pair.
+
+Both configurations are possible with or without the High Availability (HA) enabled.
+
+.Prerequisites
+* You have access to the cluster with `cluster-admin` privileges.
+* You have access to the {product-title} web console.
+* {gitops-title} Operator is installed on your cluster.
+
+include::modules/gitops-configuring-tls-for-redis-with-autotls-enabled.adoc[leveloffset=+1]
+
+include::modules/gitops-configuring-tls-for-redis-with-autotls-disabled.adoc[leveloffset=+1]
+
+
diff --git a/cicd/gitops/configuring-sso-for-argo-cd-on-openshift.adoc b/cicd/gitops/configuring-sso-for-argo-cd-on-openshift.adoc
index f293b6f82222..3d0df978cec5 100644
--- a/cicd/gitops/configuring-sso-for-argo-cd-on-openshift.adoc
+++ b/cicd/gitops/configuring-sso-for-argo-cd-on-openshift.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="configuring-sso-for-argo-cd-on-openshift"]
= Configuring SSO for Argo CD on OpenShift
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/gitops/configuring-sso-for-argo-cd-using-keycloak.adoc b/cicd/gitops/configuring-sso-for-argo-cd-using-keycloak.adoc
index dbd0d8683a97..9b34a6eb5fca 100644
--- a/cicd/gitops/configuring-sso-for-argo-cd-using-keycloak.adoc
+++ b/cicd/gitops/configuring-sso-for-argo-cd-using-keycloak.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="configuring-sso-for-argo-cd-using-keycloak"]
= Configuring SSO for Argo CD using Keycloak
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/gitops/configuring-sso-on-argo-cd-using-dex.adoc b/cicd/gitops/configuring-sso-on-argo-cd-using-dex.adoc
index 28fbb2790c70..c1eab3e39433 100644
--- a/cicd/gitops/configuring-sso-on-argo-cd-using-dex.adoc
+++ b/cicd/gitops/configuring-sso-on-argo-cd-using-dex.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="configuring-sso-for-argo-cd-using-dex"]
= Configuring SSO for Argo CD using Dex
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/gitops/deploying-a-spring-boot-application-with-argo-cd.adoc b/cicd/gitops/deploying-a-spring-boot-application-with-argo-cd.adoc
index 53a9fc1dacbd..5bbdbed55137 100644
--- a/cicd/gitops/deploying-a-spring-boot-application-with-argo-cd.adoc
+++ b/cicd/gitops/deploying-a-spring-boot-application-with-argo-cd.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="deploying-a-spring-boot-application-with-argo-cd"]
= Deploying a Spring Boot application with Argo CD
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/gitops/gitops-release-notes.adoc b/cicd/gitops/gitops-release-notes.adoc
index e5dffe5ab484..01ba924070c3 100644
--- a/cicd/gitops/gitops-release-notes.adoc
+++ b/cicd/gitops/gitops-release-notes.adoc
@@ -1,5 +1,5 @@
//OpenShift GitOps Release Notes
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="gitops-release-notes"]
= {gitops-title} release notes
:context: gitops-release-notes
@@ -23,7 +23,12 @@ include::modules/go-compatibility-and-support-matrix.adoc[leveloffset=+1]
include::modules/making-open-source-more-inclusive.adoc[leveloffset=+1]
// Modules included, most to least recent
+include::modules/gitops-release-notes-1-9-2.adoc[leveloffset=+1]
+
+include::modules/gitops-release-notes-1-9-1.adoc[leveloffset=+1]
+
include::modules/gitops-release-notes-1-9-0.adoc[leveloffset=+1]
+
// 1.25.0 additional resources, OCP docs
ifdef::openshift-enterprise[]
[role="_additional-resources"]
@@ -31,6 +36,10 @@ ifdef::openshift-enterprise[]
* xref:../../operators/admin/olm-configuring-proxy-support.adoc#olm-inject-custom-ca_olm-configuring-proxy-support[Injecting a custom CA certificate]
endif::[]
+include::modules/gitops-release-notes-1-8-5.adoc[leveloffset=+1]
+
+include::modules/gitops-release-notes-1-8-4.adoc[leveloffset=+1]
+
include::modules/gitops-release-notes-1-8-3.adoc[leveloffset=+1]
include::modules/gitops-release-notes-1-8-2.adoc[leveloffset=+1]
diff --git a/cicd/gitops/health-information-for-resources-deployment.adoc b/cicd/gitops/health-information-for-resources-deployment.adoc
index 0eb5a6be00b2..a97c16664fee 100644
--- a/cicd/gitops/health-information-for-resources-deployment.adoc
+++ b/cicd/gitops/health-information-for-resources-deployment.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="health-information-for-resources-deployment"]
= Monitoring health information for application resources and deployments
:context: health-information-for-resources-deployment
@@ -12,4 +12,5 @@ The *Application environments* page in the *Developer* perspective of the {produ
The environments pages in the *Developer* perspective of the {product-title} web console are decoupled from the {gitops-title} Application Manager command-line interface (CLI), `kam`. You do not have to use `kam` to generate Application Environment manifests for the environments to show up in the *Developer* perspective of the {product-title} web console. You can use your own manifests, but the environments must still be represented by namespaces. In addition, specific labels and annotations are still needed.
+include::modules/go-settings-for-environment-labels-and-annotations.adoc[leveloffset=+1]
include::modules/go-health-monitoring.adoc[leveloffset=+1]
diff --git a/cicd/gitops/installing-openshift-gitops.adoc b/cicd/gitops/installing-openshift-gitops.adoc
index 2435189f3b21..ae9a07411e95 100644
--- a/cicd/gitops/installing-openshift-gitops.adoc
+++ b/cicd/gitops/installing-openshift-gitops.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="getting-started-with-openshift-gitops"]
= Installing {gitops-title}
include::_attributes/common-attributes.adoc[]
@@ -24,6 +24,13 @@ If you have already installed the Community version of the Argo CD Operator, rem
This guide explains how to install the {gitops-title} Operator to an {product-title} cluster and log in to the Argo CD instance.
+[IMPORTANT]
+====
+The `latest` channel enables installation of the most recent stable version of the {gitops-title} Operator. Currently, it is the default channel for installing the {gitops-title} Operator.
+
+To install a specific version of the {gitops-title} Operator, cluster administrators can use the corresponding `gitops-` channel. For example, to install the {gitops-title} Operator version 1.8.x, you can use the `gitops-1.8` channel.
+====
+
include::modules/installing-gitops-operator-in-web-console.adoc[leveloffset=+1]
include::modules/installing-gitops-operator-using-cli.adoc[leveloffset=+1]
diff --git a/cicd/gitops/monitoring-argo-cd-custom-resource-workloads.adoc b/cicd/gitops/monitoring-argo-cd-custom-resource-workloads.adoc
index 800b71dc6de3..71f743215737 100644
--- a/cicd/gitops/monitoring-argo-cd-custom-resource-workloads.adoc
+++ b/cicd/gitops/monitoring-argo-cd-custom-resource-workloads.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="monitoring-argo-cd-custom-resource-workloads"]
= Monitoring Argo CD custom resource workloads
include::_attributes/common-attributes.adoc[]
@@ -9,17 +9,17 @@ toc::[]
[role="_abstract"]
With {gitops-title}, you can monitor the availability of Argo CD custom resource workloads for specific Argo CD instances. By monitoring Argo CD custom resource workloads, you have the latest information about the state of your Argo CD instances by enabling alerts for them. When the component workload pods such as application-controller, repo-server, or server of the corresponding Argo CD instance are unable to come up for certain reasons and there is a drift between the number of ready replicas and the number of desired replicas for a certain period of time, the Operator then triggers the alerts.
-You can enable and disable the setting for monitoring Argo CD custom resource workloads.
+You can enable and disable the setting for monitoring Argo CD custom resource workloads.
// Prerequisites for monitoring Argo CD custom resource workloads
[discrete]
== Prerequisites
-* You have access to the cluster as a user with the `cluster-admin` role.
+* You have access to the cluster as a user with the `cluster-admin` role.
* {gitops-title} is installed in your cluster.
* The monitoring stack is configured in your cluster in the `openshift-monitoring` project. In addition, the Argo CD instance is in a namespace that you can monitor through Prometheus.
* The `kube-state-metrics` service is running in your cluster.
-* Optional: If you are enabling monitoring for an Argo CD instance already present in a user-defined project, ensure that the monitoring is xref:../../monitoring/enabling-monitoring-for-user-defined-projects.html#enabling-monitoring-for-user-defined-projects_enabling-monitoring-for-user-defined-projects[enabled for user-defined projects] in your cluster.
+* Optional: If you are enabling monitoring for an Argo CD instance already present in a user-defined project, ensure that the monitoring is xref:../../monitoring/enabling-monitoring-for-user-defined-projects.adoc#enabling-monitoring-for-user-defined-projects_enabling-monitoring-for-user-defined-projects[enabled for user-defined projects] in your cluster.
+
[NOTE]
====
diff --git a/cicd/gitops/monitoring-argo-cd-instances.adoc b/cicd/gitops/monitoring-argo-cd-instances.adoc
new file mode 100644
index 000000000000..4fb0a6b567bd
--- /dev/null
+++ b/cicd/gitops/monitoring-argo-cd-instances.adoc
@@ -0,0 +1,17 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="monitoring-argo-cd-instances"]
+= Monitoring Argo CD instances
+include::_attributes/common-attributes.adoc[]
+:context: monitoring-argo-cd-instances
+
+toc::[]
+
+By default, the {gitops-title} Operator automatically detects an installed Argo CD instance in your defined namespace, for example, `openshift-gitops`, and connects it to the monitoring stack of the cluster to provide alerts for out-of-sync applications.
+
+.Prerequisites
+* You have access to the cluster with `cluster-admin` privileges.
+* You have access to the {product-title} web console.
+* You have installed the {gitops-title} Operator in your cluster.
+* You have installed an Argo CD application in your defined namespace, for example, `openshift-gitops`.
+
+include::modules/gitops-monitoring-argo-cd-health-using-prometheus-metrics.adoc[leveloffset=+1]
diff --git a/cicd/gitops/run-gitops-control-plane-workload-on-infra-nodes.adoc b/cicd/gitops/run-gitops-control-plane-workload-on-infra-nodes.adoc
index 21122fe29469..cf97208bdb4c 100644
--- a/cicd/gitops/run-gitops-control-plane-workload-on-infra-nodes.adoc
+++ b/cicd/gitops/run-gitops-control-plane-workload-on-infra-nodes.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="run-gitops-control-plane-workload-on-infra-nodes"]
= Running {gitops-shortname} control plane workloads on infrastructure nodes
:context: run-gitops-control-plane-workload-on-infra-nodes
@@ -6,7 +6,7 @@ include::_attributes/common-attributes.adoc[]
toc::[]
-You can use infrastructure nodes to prevent additional billing cost against subscription counts.
+You can use infrastructure nodes to prevent additional billing cost against subscription counts.
You can use the {product-title} to run certain workloads on infrastructure nodes installed by the {gitops-title} Operator. This comprises the workloads that are installed by the {gitops-title} Operator by default in the `openshift-gitops` namespace, including the default Argo CD instance in that namespace.
diff --git a/cicd/gitops/setting-up-argocd-instance.adoc b/cicd/gitops/setting-up-argocd-instance.adoc
index 55756aa831fe..7a9b6205a6d7 100644
--- a/cicd/gitops/setting-up-argocd-instance.adoc
+++ b/cicd/gitops/setting-up-argocd-instance.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="setting-up-argocd-instance"]
= Setting up an Argo CD instance
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/gitops/troubleshooting-issues-in-GitOps.adoc b/cicd/gitops/troubleshooting-issues-in-GitOps.adoc
index 6eb4cefc9d4a..c5557ad88905 100644
--- a/cicd/gitops/troubleshooting-issues-in-GitOps.adoc
+++ b/cicd/gitops/troubleshooting-issues-in-GitOps.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="troubleshooting-issues-in-GitOps"]
= Troubleshooting issues in {gitops-title}
diff --git a/cicd/gitops/understanding-openshift-gitops.adoc b/cicd/gitops/understanding-openshift-gitops.adoc
index 7ab0f91ad813..e9d438996c3a 100644
--- a/cicd/gitops/understanding-openshift-gitops.adoc
+++ b/cicd/gitops/understanding-openshift-gitops.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="understanding-openshift-gitops"]
= Understanding OpenShift GitOps
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/gitops/uninstalling-openshift-gitops.adoc b/cicd/gitops/uninstalling-openshift-gitops.adoc
index 17e3ecadfa01..af1f482b8b26 100644
--- a/cicd/gitops/uninstalling-openshift-gitops.adoc
+++ b/cicd/gitops/uninstalling-openshift-gitops.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="uninstalling-openshift-gitops"]
= Uninstalling OpenShift GitOps
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/gitops/using-argo-rollouts-for-progressive-deployment-delivery.adoc b/cicd/gitops/using-argo-rollouts-for-progressive-deployment-delivery.adoc
new file mode 100644
index 000000000000..e1cb3a88a856
--- /dev/null
+++ b/cicd/gitops/using-argo-rollouts-for-progressive-deployment-delivery.adoc
@@ -0,0 +1,44 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="using-argo-rollouts-for-progressive-deployment-delivery"]
+= Using Argo Rollouts for progressive deployment delivery
+include::_attributes/common-attributes.adoc[]
+:context: using-argo-rollouts-for-progressive-deployment-delivery
+
+toc::[]
+
+:FeatureName: Argo Rollouts
+include::snippets/technology-preview.adoc[leveloffset=+1]
+
+Progressive delivery is the process of releasing product updates in a controlled and gradual manner.
+Progressive delivery reduces the risk of a release by exposing the new version of a product update only to a subset of users initially. The process involves continuously observing and analyzing this new version to verify whether its behavior matches the requirements and expectations set. The verifications continue as the process gradually exposes the product update to a broader and wider audience.
+
+{product-title} provides some progressive delivery capability by using routes to split traffic between different services, but this typically requires manual intervention and management.
+
+With Argo Rollouts, you can use automation and metric analysis to support progressive deployment delivery and drive the automated rollout or rollback of a new version of an application.
+Argo Rollouts provide advanced deployment capabilities and enable integration with ingress controllers and service meshes.
+You can use Argo Rollouts to manage multiple replica sets that represent different versions of the deployed application. Depending on your deployment strategy, you can handle traffic to these versions during an update by optimizing their existing traffic shaping abilities and gradually shifting traffic to the new version. You can combine Argo Rollouts with a metric provider like Prometheus to do metric-based and policy-driven rollouts and rollbacks based on the parameters set.
+
+[id="prerequisites_using-argo-rollouts-for-progressive-deployment-delivery"]
+== Prerequisites
+* You have access to the cluster with `cluster-admin` privileges.
+* You have access to the {product-title} web console.
+* {gitops-title} 1.9.0 or a newer version is installed in your cluster.
+
+include::modules/gitops-benefits-of-argo-rollouts.adoc[leveloffset=+1]
+
+include::modules/gitops-about-argo-rollout-manager-custom-resources-and-spec.adoc[leveloffset=+1]
+
+include::modules/gitops-creating-rolloutmanager-custom-resource.adoc[leveloffset=+1]
+
+include::modules/gitops-deleting-rolloutmanager-custom-resource.adoc[leveloffset=+1]
+
+[role="_additional-resources"]
+[id="additional-resources_argo-rollouts-in-gitops"]
+== Additional resources
+* xref:../../cicd/gitops/installing-openshift-gitops.adoc#installing-gitops-operator-in-web-console_installing-openshift-gitops[Installing {gitops-title}]
+* xref:../../cicd/gitops/uninstalling-openshift-gitops.adoc#go-uninstalling-gitops-operator_uninstalling-openshift-gitops[Uninstalling {gitops-title}]
+* xref:../../applications/deployments/deployment-strategies.adoc#deployments-canary-deployments_deployment-strategies[Canary deployments]
+* xref:../../applications/deployments/route-based-deployment-strategies.adoc#deployments-blue-green_route-based-deployment-strategies[Blue-green deployments]
+* link:https://argo-rollouts-manager.readthedocs.io/en/latest/crd_reference/[`RolloutManager` Custom Resource specification]
+* link:https://www.redhat.com/architect/blue-green-canary-argo-rollouts[Blue-green and canary deployments with Argo Rollouts]
+* link:https://cloud.redhat.com/blog/trying-out-argo-rollouts-in-openshift-gitops-1.9/[Argo Rollouts tech preview limitations]
diff --git a/cicd/gitops/viewing-argo-cd-logs.adoc b/cicd/gitops/viewing-argo-cd-logs.adoc
new file mode 100644
index 000000000000..af11fee67677
--- /dev/null
+++ b/cicd/gitops/viewing-argo-cd-logs.adoc
@@ -0,0 +1,16 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="viewing-argo-cd-logs"]
+= Viewing Argo CD logs
+include::_attributes/common-attributes.adoc[]
+:context: viewing-argo-cd-logs
+
+toc::[]
+
+You can view the Argo CD logs with {logging}. {logging-uc} visualizes the logs on a Kibana dashboard. The {clo} enables logging with Argo CD by default.
+
+include::modules/gitops-storing-and-retrieving-argo-cd-logs.adoc[leveloffset=+1]
+
+[role="_additional-resources"]
+[id="additional-resources_viewing-argo-cd-logs"]
+== Additional resources
+* xref:../../logging/cluster-logging-deploying.adoc#cluster-logging-deploy-console_cluster-logging-deploying[Installing {logging} using the web console]
diff --git a/cicd/hugeBook.flag b/cicd/hugeBook.flag
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/cicd/index.adoc b/cicd/index.adoc
index e9ed1f0b7af2..550df0bca4e7 100644
--- a/cicd/index.adoc
+++ b/cicd/index.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="ci-cd-overview"]
= {product-title} CI/CD overview
include::_attributes/common-attributes.adoc[]
@@ -15,27 +15,27 @@ toc::[]
[id="openshift-builds"]
== OpenShift Builds
-With OpenShift Builds, you can create cloud-native apps by using a declarative build process. You can define the build process in a YAML file that you use to create a BuildConfig object. This definition includes attributes such as build triggers, input parameters, and source code. When deployed, the BuildConfig object typically builds a runnable image and pushes it to a container image registry.
+OpenShift Builds provides you the following options to configure and run a build:
-OpenShift Builds provides the following extensible support for build strategies:
+* Builds using Shipwright is an extensible build framework based on the Shipwright project. You can use it to build container images on an {product-title} cluster. You can build container images from source code and Dockerfile by using image build tools, such as Source-to-Image (S2I) and Buildah.
++
+For more information, see link:https://docs.openshift.com/builds/latest/about/overview-openshift-builds.html[Overview of Builds].
-* Docker build
-* Source-to-image (S2I) build
-* Custom build
-
-For more information, see xref:../cicd/builds/understanding-image-builds.adoc#understanding-image-builds[Understanding image builds]
+* Builds using `BuildConfig` objects is a declarative build process to create cloud-native apps. You can define the build process in a YAML file that you use to create a `BuildConfig` object. This definition includes attributes such as build triggers, input parameters, and source code. When deployed, the `BuildConfig` object builds a runnable image and pushes the image to a container image registry. With the `BuildConfig` object, you can create a Docker, Source-to-image (S2I), or custom build.
++
+For more information, see xref:../cicd/builds/understanding-image-builds.adoc#understanding-image-builds[Understanding image builds].
[id="openshift-pipelines"]
== {pipelines-shortname}
{pipelines-shortname} provides a Kubernetes-native CI/CD framework to design and run each step of the CI/CD pipeline in its own container. It can scale independently to meet the on-demand pipelines with predictable outcomes.
-For more information, see xref:../cicd/pipelines/understanding-openshift-pipelines.adoc#understanding-openshift-pipelines[Understanding {pipelines-shortname}]
+For more information, see link:https://docs.openshift.com/pipelines/latest/about/understanding-openshift-pipelines.html[Understanding {pipelines-shortname}].
[id="openshift-gitops"]
== OpenShift GitOps
OpenShift GitOps is an Operator that uses Argo CD as the declarative GitOps engine. It enables GitOps workflows across multicluster OpenShift and Kubernetes infrastructure. Using OpenShift GitOps, administrators can consistently configure and deploy Kubernetes-based infrastructure and applications across clusters and development lifecycles.
-For more information, see xref:../cicd/gitops/understanding-openshift-gitops.adoc#understanding-openshift-gitops[Understanding OpenShift GitOps]
+For more information, see xref:../cicd/gitops/about-redhat-openshift-gitops.adoc#about-redhat-openshift-gitops[About {gitops-title}].
[id="jenkins-ci-cd"]
== Jenkins
diff --git a/cicd/jenkins/images-other-jenkins-agent.adoc b/cicd/jenkins/images-other-jenkins-agent.adoc
index 68f0628b5bf8..ac056380a912 100644
--- a/cicd/jenkins/images-other-jenkins-agent.adoc
+++ b/cicd/jenkins/images-other-jenkins-agent.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="images-other-jenkins-agent"]
= Jenkins agent
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/jenkins/images-other-jenkins.adoc b/cicd/jenkins/images-other-jenkins.adoc
index 87aeeabeb003..9019f4665a99 100644
--- a/cicd/jenkins/images-other-jenkins.adoc
+++ b/cicd/jenkins/images-other-jenkins.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="images-other-jenkins"]
= Configuring Jenkins images
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/jenkins/important-changes-to-openshift-jenkins-images.adoc b/cicd/jenkins/important-changes-to-openshift-jenkins-images.adoc
index 6590c40201fd..ef82dd6c2188 100644
--- a/cicd/jenkins/important-changes-to-openshift-jenkins-images.adoc
+++ b/cicd/jenkins/important-changes-to-openshift-jenkins-images.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="important-changes-to-openshift-jenkins-images"]
= Important changes to OpenShift Jenkins images
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/jenkins/migrating-from-jenkins-to-openshift-pipelines.adoc b/cicd/jenkins/migrating-from-jenkins-to-openshift-pipelines.adoc
index 7f8242f2ddbc..1bd3cd4e3118 100644
--- a/cicd/jenkins/migrating-from-jenkins-to-openshift-pipelines.adoc
+++ b/cicd/jenkins/migrating-from-jenkins-to-openshift-pipelines.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
//Jenkins-Tekton-Migration
[id="migrating-from-jenkins-to-openshift-pipelines_{context}"]
= Migrating from Jenkins to {pipelines-shortname} or Tekton
@@ -7,7 +7,7 @@ include::_attributes/common-attributes.adoc[]
toc::[]
-You can migrate your CI/CD workflows from Jenkins to xref:../../cicd/pipelines/understanding-openshift-pipelines.adoc#understanding-openshift-pipelines[{pipelines-title}], a cloud-native CI/CD experience based on the Tekton project.
+You can migrate your CI/CD workflows from Jenkins to link:https://docs.openshift.com/pipelines/latest/about/understanding-openshift-pipelines.html[{pipelines-title}], a cloud-native CI/CD experience based on the Tekton project.
include::modules/jt-comparison-of-jenkins-and-openshift-pipelines-concepts.adoc[leveloffset=+1]
@@ -23,5 +23,5 @@ include::modules/jt-examples-of-common-use-cases.adoc[leveloffset=+1]
[role="_additional-resources"]
== Additional resources
-* xref:../../cicd/pipelines/understanding-openshift-pipelines.adoc#understanding-openshift-pipelines[Understanding {pipelines-shortname}]
+* link:https://docs.openshift.com/pipelines/latest/about/understanding-openshift-pipelines.html[Understanding {pipelines-shortname}]
* xref:../../authentication/using-rbac.adoc#using-rbac[Role-based Access Control]
diff --git a/cicd/pipelines/about-pipelines.adoc b/cicd/pipelines/about-pipelines.adoc
new file mode 100644
index 000000000000..2ae03ee625a1
--- /dev/null
+++ b/cicd/pipelines/about-pipelines.adoc
@@ -0,0 +1,26 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="about-pipelines"]
+= About {pipelines-title}
+:context: about-pipelines
+include::_attributes/common-attributes.adoc[]
+
+toc::[]
+
+{pipelines-title} is a cloud-native, continuous integration and continuous delivery (CI/CD) solution based on Kubernetes resources. It uses Tekton building blocks to automate deployments across multiple platforms by abstracting away the underlying implementation details. Tekton introduces a number of standard custom resource definitions (CRDs) for defining CI/CD pipelines that are portable across Kubernetes distributions.
+
+[NOTE]
+====
+Because {pipelines-title} releases on a different cadence from {product-title}, the {pipelines-title} documentation is now available as separate documentation sets for each minor version of the product.
+
+The {pipelines-title} documentation is available at link:https://docs.openshift.com/pipelines/[].
+
+Documentation for specific versions is available using the version selector drop-down list, or directly by adding the version to the URL, for example, link:https://docs.openshift.com/pipelines/1.11[].
+
+In addition, the {pipelines-title} documentation is also available on the Red Hat Customer Portal at https://access.redhat.com/documentation/en-us/red_hat_openshift_pipelines/[].
+
+
+For additional information about the {pipelines-title} life cycle and supported platforms, refer to the link:https://access.redhat.com/support/policy/updates/openshift#pipelines[Platform Life Cycle Policy].
+====
+
+// add something about CLI tools?
+
diff --git a/cicd/pipelines/authenticating-pipelines-using-git-secret.adoc b/cicd/pipelines/authenticating-pipelines-using-git-secret.adoc
index 4f575e125149..1f894d99e6df 100644
--- a/cicd/pipelines/authenticating-pipelines-using-git-secret.adoc
+++ b/cicd/pipelines/authenticating-pipelines-using-git-secret.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="authenticating-pipelines-using-git-secret"]
= Authenticating pipelines using git secret
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/pipelines/creating-applications-with-cicd-pipelines.adoc b/cicd/pipelines/creating-applications-with-cicd-pipelines.adoc
index 732c493b8662..211013bd6e70 100644
--- a/cicd/pipelines/creating-applications-with-cicd-pipelines.adoc
+++ b/cicd/pipelines/creating-applications-with-cicd-pipelines.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="creating-applications-with-cicd-pipelines"]
= Creating CI/CD solutions for applications using {pipelines-shortname}
include::_attributes/common-attributes.adoc[]
@@ -81,7 +81,7 @@ include::modules/op-validating-pull-requests-using-GitHub-interceptors.adoc[leve
== Additional resources
* To include {pac} along with the application source code in the same repository, see xref:../../cicd/pipelines/using-pipelines-as-code.adoc#using-pipelines-as-code[Using {pac}].
-* For more details on pipelines in the *Developer* perspective, see the xref:../../cicd/pipelines/working-with-pipelines-using-the-developer-perspective.adoc#working-with-pipelines-using-the-developer-perspective[working with pipelines in the *Developer* perspective] section.
+* For more details on pipelines in the *Developer* perspective, see the xref:../../cicd/pipelines/working-with-pipelines-web-console.adoc#working-with-pipelines-web-console[working with pipelines in the web console] section.
* To learn more about Security Context Constraints (SCCs), see the xref:../../authentication/managing-security-context-constraints.adoc#managing-pod-security-policies[Managing Security Context Constraints] section.
* For more examples of reusable tasks, see the link:https://github.com/openshift/pipelines-catalog[OpenShift Catalog] repository. Additionally, you can also see the Tekton Catalog in the Tekton project.
* To install and deploy a custom instance of Tekton Hub for reusable tasks and pipelines, see xref:../../cicd/pipelines/using-tekton-hub-with-openshift-pipelines.adoc#using-tekton-hub-with-openshift-pipelines[Using {tekton-hub} with {pipelines-title}].
diff --git a/cicd/pipelines/customizing-configurations-in-the-tektonconfig-cr.adoc b/cicd/pipelines/customizing-configurations-in-the-tektonconfig-cr.adoc
index ed728694b569..b6ebea428388 100644
--- a/cicd/pipelines/customizing-configurations-in-the-tektonconfig-cr.adoc
+++ b/cicd/pipelines/customizing-configurations-in-the-tektonconfig-cr.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="customizing-configurations-in-the-tektonconfig-cr"]
= Customizing configurations in the TektonConfig custom resource
include::_attributes/common-attributes.adoc[]
@@ -52,5 +52,5 @@ include::modules/op-annotations-for-automatic-pruning-taskruns-pipelineruns.adoc
* xref:../../cicd/pipelines/authenticating-pipelines-using-git-secret.adoc#op-configuring-ssh-authentication-for-git_authenticating-pipelines-using-git-secret[Configuring SSH authentication for Git]
* xref:../../cicd/pipelines/managing-nonversioned-and-versioned-cluster-tasks.adoc#managing-nonversioned-and-versioned-cluster-tasks[Managing non-versioned and versioned cluster tasks]
-* xref:../../cicd/pipelines/working-with-pipelines-using-the-developer-perspective.adoc#using-custom-pipeline-template-for-git-import_working-with-pipelines-using-the-developer-perspective[Using a custom pipeline template for creating and deploying an application from a Git repository]
* xref:../../applications/pruning-objects.adoc#pruning-objects[Pruning objects to reclaim resources]
+* xref:../../cicd/pipelines/working-with-pipelines-web-console.adoc#op-creating-pipeline-templates-admin-console_working-with-pipelines-web-console[Creating pipeline templates in the Administrator perspective]
diff --git a/cicd/pipelines/installing-pipelines.adoc b/cicd/pipelines/installing-pipelines.adoc
index 03ed8d7b277e..1bcf8a8c472b 100644
--- a/cicd/pipelines/installing-pipelines.adoc
+++ b/cicd/pipelines/installing-pipelines.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="installing-pipelines"]
= Installing {pipelines-shortname}
include::_attributes/common-attributes.adoc[]
@@ -46,11 +46,11 @@ include::modules/op-performance-tuning-using-tektonconfig-cr.adoc[leveloffset=+1
* To install {tekton-chains} using the {pipelines-title} Operator, see xref:../../cicd/pipelines/using-tekton-chains-for-openshift-pipelines-supply-chain-security.adoc#using-tekton-chains-for-openshift-pipelines-supply-chain-security[Using {tekton-chains} for {pipelines-title} supply chain security].
-* To install and deploy in-cluster {tekton-hub}, see xref:../../cicd/pipelines/using-tekton-hub-with-openshift-pipelines.adoc#using-tekton-hub-with-openshift-pipelines[Using {tekton-hub} with {pipelines-title}].
+* To install and deploy in-cluster {tekton-hub}, see xref:../../cicd/pipelines/using-tekton-hub-with-openshift-pipelines.adoc#using-tekton-hub-with-openshift-pipelines[Using {tekton-hub} with {pipelines-title}].
* For more information on using pipelines in a restricted environment, see:
-** xref:../../cicd/pipelines/creating-applications-with-cicd-pipelines.html#op-mirroring-images-to-run-pipelines-in-restricted-environment_creating-applications-with-cicd-pipelines[Mirroring images to run pipelines in a restricted environment]
+** xref:../../cicd/pipelines/creating-applications-with-cicd-pipelines.adoc#op-mirroring-images-to-run-pipelines-in-restricted-environment_creating-applications-with-cicd-pipelines[Mirroring images to run pipelines in a restricted environment]
** xref:../../openshift_images/configuring-samples-operator.adoc#samples-operator-restricted-network-install[Configuring Samples Operator for a restricted cluster]
diff --git a/cicd/pipelines/managing-nonversioned-and-versioned-cluster-tasks.adoc b/cicd/pipelines/managing-nonversioned-and-versioned-cluster-tasks.adoc
index ed9a3f96c984..46e8b3d29c11 100644
--- a/cicd/pipelines/managing-nonversioned-and-versioned-cluster-tasks.adoc
+++ b/cicd/pipelines/managing-nonversioned-and-versioned-cluster-tasks.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="managing-nonversioned-and-versioned-cluster-tasks"]
= Managing non-versioned and versioned cluster tasks
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/pipelines/op-release-notes.adoc b/cicd/pipelines/op-release-notes.adoc
index 3784aff18fc5..670dd54577be 100644
--- a/cicd/pipelines/op-release-notes.adoc
+++ b/cicd/pipelines/op-release-notes.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
//OpenShift Pipelines Release Notes
include::_attributes/common-attributes.adoc[]
[id="op-release-notes"]
diff --git a/cicd/pipelines/reducing-pipelines-resource-consumption.adoc b/cicd/pipelines/reducing-pipelines-resource-consumption.adoc
index 764f7790ac1a..444c40586fc8 100644
--- a/cicd/pipelines/reducing-pipelines-resource-consumption.adoc
+++ b/cicd/pipelines/reducing-pipelines-resource-consumption.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="reducing-pipelines-resource-consumption"]
= Reducing resource consumption of {pipelines-shortname}
include::_attributes/common-attributes.adoc[]
@@ -12,8 +12,8 @@ To define the final resource limits that are set on the resulting pods, {pipelin
To restrict resource consumption in your project, you can:
-* xref:../../applications/quotas/quotas-setting-per-project.html[Set and manage resource quotas] to limit the aggregate resource consumption.
-* Use xref:../../nodes/clusters/nodes-cluster-limit-ranges.html[limit ranges to restrict resource consumption] for specific objects, such as pods, images, image streams, and persistent volume claims.
+* xref:../../applications/quotas/quotas-setting-per-project.adoc[Set and manage resource quotas] to limit the aggregate resource consumption.
+* Use xref:../../nodes/clusters/nodes-cluster-limit-ranges.adoc[limit ranges to restrict resource consumption] for specific objects, such as pods, images, image streams, and persistent volume claims.
include::modules/op-understanding-pipelines-resource-consumption.adoc[leveloffset=+1]
diff --git a/cicd/pipelines/remote-pipelines-tasks-resolvers.adoc b/cicd/pipelines/remote-pipelines-tasks-resolvers.adoc
index 80ca525603ed..74bb0f07b9d5 100644
--- a/cicd/pipelines/remote-pipelines-tasks-resolvers.adoc
+++ b/cicd/pipelines/remote-pipelines-tasks-resolvers.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="remote-pipelines-tasks-resolvers"]
= Specifying remote pipelines and tasks using resolvers
include::_attributes/common-attributes.adoc[]
@@ -21,7 +21,12 @@ Git resolver:: Retrieves a task or pipeline binding from a Git repository. You m
[id="resolver-hub_{context}"]
== Specifying a remote pipeline or task from a Tekton catalog
-You can specify a remote pipeline or task that is defined in a public Tekton catalog, either link:https://artifacthub.io/[{artifact-hub}] or link:https://hub.tekton.dev/[{tekton-hub}], by using the hub resolver.
+You can use the hub resolver to specify a remote pipeline or task that is defined either in a public Tekton catalog of link:https://artifacthub.io/[{artifact-hub}] or in an instance of {tekton-hub}.
+
+[IMPORTANT]
+====
+The {artifact-hub} project is not supported with {pipelines-title}. Only the configuration of {artifact-hub} is supported.
+====
include::modules/op-resolver-hub-config.adoc[leveloffset=+2]
include::modules/op-resolver-hub.adoc[leveloffset=+2]
@@ -29,7 +34,7 @@ include::modules/op-resolver-hub.adoc[leveloffset=+2]
[id="resolver-bundles_{context}"]
== Specifying a remote pipeline or task from a Tekton bundle
-You can specify a remote pipeline or task from a Tekton bundle by using the bundles resolver. A Tekton bundle is an OCI image available from any OCI repository, such as an OpenShift container repository.
+You can use the bundles resolver to specify a remote pipeline or task from a Tekton bundle. A Tekton bundle is an OCI image available from any OCI repository, such as an OpenShift container repository.
include::modules/op-resolver-bundle-config.adoc[leveloffset=+2]
include::modules/op-resolver-bundle.adoc[leveloffset=+2]
@@ -37,7 +42,7 @@ include::modules/op-resolver-bundle.adoc[leveloffset=+2]
[id="resolver-cluster_{context}"]
== Specifying a remote pipeline or task from the same cluster
-You can specify a remote pipeline or task that is defined in a namespace on the {product-title} cluster where {pipelines-title} is running by using the cluster resolver.
+You can use the cluster resolver to specify a remote pipeline or task that is defined in a namespace on the {product-title} cluster where {pipelines-title} is running.
include::modules/op-resolver-cluster-config.adoc[leveloffset=+2]
include::modules/op-resolver-cluster.adoc[leveloffset=+2]
@@ -45,8 +50,14 @@ include::modules/op-resolver-cluster.adoc[leveloffset=+2]
[id="resolver-git_{context}"]
== Specifying a remote pipeline or task from a Git repository
-You can specify a remote pipeline or task from a Git repostory by using the Git resolver. The repository must contain a YAML file that defines the pipeline or task. The Git resolver can access a repository either by cloning it anonymously or else by using the authenticated SCM API.
+You can use the Git resolver to specify a remote pipeline or task from a Git repostory. The repository must contain a YAML file that defines the pipeline or task. The Git resolver can access a repository either by cloning it anonymously or else by using the authenticated SCM API.
include::modules/op-resolver-git-config-anon.adoc[leveloffset=+2]
include::modules/op-resolver-git-config-scm.adoc[leveloffset=+2]
include::modules/op-resolver-git.adoc[leveloffset=+2]
+
+[role="_additional-resources"]
+[id="additional-resources_{context}"]
+== Additional resources
+
+* xref:../../cicd/pipelines/using-tekton-hub-with-openshift-pipelines.adoc#using-tekton-hub-with-openshift-pipelines[Using Tekton Hub with {pipelines-shortname}]
diff --git a/cicd/pipelines/securing-webhooks-with-event-listeners.adoc b/cicd/pipelines/securing-webhooks-with-event-listeners.adoc
index 338c5d82c53f..c07c74ca48fd 100644
--- a/cicd/pipelines/securing-webhooks-with-event-listeners.adoc
+++ b/cicd/pipelines/securing-webhooks-with-event-listeners.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="securing-webhooks-with-event-listeners"]
= Securing webhooks with event listeners
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/pipelines/setting-compute-resource-quota-for-openshift-pipelines.adoc b/cicd/pipelines/setting-compute-resource-quota-for-openshift-pipelines.adoc
index 293db99e61cf..e21ded700449 100644
--- a/cicd/pipelines/setting-compute-resource-quota-for-openshift-pipelines.adoc
+++ b/cicd/pipelines/setting-compute-resource-quota-for-openshift-pipelines.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="setting-compute-resource-quota-for-openshift-pipelines"]
= Setting compute resource quota for {pipelines-shortname}
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/pipelines/understanding-openshift-pipelines.adoc b/cicd/pipelines/understanding-openshift-pipelines.adoc
index 7aa1be6a3f91..93b66781be31 100644
--- a/cicd/pipelines/understanding-openshift-pipelines.adoc
+++ b/cicd/pipelines/understanding-openshift-pipelines.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="understanding-openshift-pipelines"]
= Understanding {pipelines-shortname}
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/pipelines/uninstalling-pipelines.adoc b/cicd/pipelines/uninstalling-pipelines.adoc
index c3489bd15c38..286ebc310bdc 100644
--- a/cicd/pipelines/uninstalling-pipelines.adoc
+++ b/cicd/pipelines/uninstalling-pipelines.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="uninstalling-pipelines"]
= Uninstalling {pipelines-shortname}
include::_attributes/common-attributes.adoc[]
@@ -9,7 +9,7 @@ toc::[]
Cluster administrators can uninstall the {pipelines-title} Operator by performing the following steps:
. Delete the Custom Resources (CRs) that were added by default when you installed the {pipelines-title} Operator.
-. Delete the CRs of the optional components, such as {tekton-chains}, that are dependent on the Operator.
+. Delete the CRs of the optional components such as {tekton-hub} that depend on the Operator.
+
[CAUTION]
====
diff --git a/cicd/pipelines/unprivileged-building-of-container-images-using-buildah.adoc b/cicd/pipelines/unprivileged-building-of-container-images-using-buildah.adoc
index 48734d9ea68c..90a4aa50f109 100644
--- a/cicd/pipelines/unprivileged-building-of-container-images-using-buildah.adoc
+++ b/cicd/pipelines/unprivileged-building-of-container-images-using-buildah.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="unprivileged-building-of-container-images-using-buildah"]
= Building of container images using Buildah as a non-root user
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/pipelines/using-pipelines-as-code.adoc b/cicd/pipelines/using-pipelines-as-code.adoc
index ddf4f6948ee9..3c37b05c3c30 100644
--- a/cicd/pipelines/using-pipelines-as-code.adoc
+++ b/cicd/pipelines/using-pipelines-as-code.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="using-pipelines-as-code"]
= Using {pac}
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/pipelines/using-pods-in-a-privileged-security-context.adoc b/cicd/pipelines/using-pods-in-a-privileged-security-context.adoc
index 323b4b627d52..f8dcf2ba63be 100644
--- a/cicd/pipelines/using-pods-in-a-privileged-security-context.adoc
+++ b/cicd/pipelines/using-pods-in-a-privileged-security-context.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="using-pods-in-a-privileged-security-context"]
= Using pods in a privileged security context
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/pipelines/using-tekton-chains-for-openshift-pipelines-supply-chain-security.adoc b/cicd/pipelines/using-tekton-chains-for-openshift-pipelines-supply-chain-security.adoc
index a1c864d12d04..d377b8225ddb 100644
--- a/cicd/pipelines/using-tekton-chains-for-openshift-pipelines-supply-chain-security.adoc
+++ b/cicd/pipelines/using-tekton-chains-for-openshift-pipelines-supply-chain-security.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="using-tekton-chains-for-openshift-pipelines-supply-chain-security"]
= Using Tekton Chains for {pipelines-shortname} supply chain security
include::_attributes/common-attributes.adoc[]
diff --git a/cicd/pipelines/using-tekton-hub-with-openshift-pipelines.adoc b/cicd/pipelines/using-tekton-hub-with-openshift-pipelines.adoc
index c4b1c5f07da0..fdff8d224ca0 100644
--- a/cicd/pipelines/using-tekton-hub-with-openshift-pipelines.adoc
+++ b/cicd/pipelines/using-tekton-hub-with-openshift-pipelines.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="using-tekton-hub-with-openshift-pipelines"]
= Using Tekton Hub with {pipelines-shortname}
include::_attributes/common-attributes.adoc[]
@@ -10,7 +10,7 @@ toc::[]
include::snippets/technology-preview.adoc[]
[role="_abstract"]
-{tekton-hub} helps you discover, search, and share reusable tasks and pipelines for your CI/CD workflows. A public instance of {tekton-hub} is available at link:https://hub.tekton.dev/[hub.tekton.dev]. Cluster administrators can also install and deploy a custom instance of {tekton-hub} by modifying the configurations in the `TektonHub` custom resource (CR).
+{tekton-hub} helps you discover, search, and share reusable tasks and pipelines for your CI/CD workflows. A public instance of {tekton-hub} is available at link:https://hub.tekton.dev/[hub.tekton.dev]. Cluster administrators can also install and deploy a custom instance of {tekton-hub} by modifying the configurations in the `TektonHub` custom resource (CR).
include::modules/op-installing-and-deploying-tekton-hub-on-an-openshift-cluster.adoc[leveloffset=+1]
diff --git a/cicd/pipelines/viewing-pipeline-logs-using-the-openshift-logging-operator.adoc b/cicd/pipelines/viewing-pipeline-logs-using-the-openshift-logging-operator.adoc
index 0a4b75bc2866..8fe4700773fc 100644
--- a/cicd/pipelines/viewing-pipeline-logs-using-the-openshift-logging-operator.adoc
+++ b/cicd/pipelines/viewing-pipeline-logs-using-the-openshift-logging-operator.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="viewing-pipeline-logs-using-the-openshift-logging-operator"]
= Viewing pipeline logs using the OpenShift Logging Operator
include::_attributes/common-attributes.adoc[]
@@ -26,7 +26,6 @@ include::modules/op-viewing-pipeline-logs-in-kibana.adoc[leveloffset=+1]
[role="_additional-resources"]
[id="additional-resources_viewing-pipeline-logs-using-the-openshift-logging-operator"]
== Additional resources
-
* xref:../../logging/cluster-logging-deploying.adoc[Installing OpenShift Logging]
-* xref:../../logging/viewing-resource-logs.adoc[Viewing logs for a resource]
-* xref:../../logging/cluster-logging-visualizer.adoc[Viewing cluster logs by using Kibana]
+* xref:../../logging/log_visualization/log-visualization.adoc#log-visualization-resource-logs_log-visualization[Viewing logs for a resource]
+* xref:../../logging/log_visualization/logging-kibana.adoc#logging-kibana[Log visualization with Kibana]
diff --git a/cicd/pipelines/working-with-pipelines-using-the-developer-perspective.adoc b/cicd/pipelines/working-with-pipelines-using-the-developer-perspective.adoc
deleted file mode 100644
index f81e5c6f689b..000000000000
--- a/cicd/pipelines/working-with-pipelines-using-the-developer-perspective.adoc
+++ /dev/null
@@ -1,52 +0,0 @@
-:_content-type: ASSEMBLY
-[id="working-with-pipelines-using-the-developer-perspective"]
-= Working with {pipelines-title} using the Developer perspective
-include::_attributes/common-attributes.adoc[]
-:context: working-with-pipelines-using-the-developer-perspective
-
-toc::[]
-
-[role="_abstract"]
-You can use the *Developer* perspective of the {product-title} web console to create CI/CD pipelines for your software delivery process.
-
-In the *Developer* perspective:
-
-* Use the *Add* -> *Pipeline* -> *Pipeline builder* option to create customized pipelines for your application.
-* Use the *Add* -> *From Git* option to create pipelines using operator-installed pipeline templates and resources while creating an application on {product-title}.
-
-After you create the pipelines for your application, you can view and visually interact with the deployed pipelines in the *Pipelines* view. You can also use the *Topology* view to interact with the pipelines created using the *From Git* option. You must apply custom labels to pipelines created using the *Pipeline builder* to see them in the *Topology* view.
-
-[discrete]
-== Prerequisites
-
-* You have access to an {product-title} cluster and have switched to xref:../../web_console/web-console-overview.adoc#about-developer-perspective_web-console-overview[the *Developer* perspective].
-* You have the xref:../../cicd/pipelines/installing-pipelines.adoc#installing-pipelines[{pipelines-shortname} Operator installed] in your cluster.
-* You are a cluster administrator or a user with create and edit permissions.
-* You have created a project.
-
-
-include::modules/op-constructing-pipelines-using-pipeline-builder.adoc[leveloffset=+1]
-
-include::modules/op-creating-pipelines-along-with-applications.adoc[leveloffset=+1]
-
-include::modules/odc-adding-a-GitHub-repository-containing-pipelines.adoc[leveloffset=+1]
-
-include::modules/op-interacting-with-pipelines-using-the-developer-perspective.adoc[leveloffset=+1]
-
-include::modules/op-using-custom-pipeline-template-for-git-import.adoc[leveloffset=+1]
-
-include::modules/op-starting-pipelines-from-pipelines-view.adoc[leveloffset=+1]
-
-include::modules/op-starting-pipelines-from-topology-view.adoc[leveloffset=+1]
-
-include::modules/op-interacting-pipelines-from-topology-view.adoc[leveloffset=+1]
-
-include::modules/op-editing-pipelines.adoc[leveloffset=+1]
-
-include::modules/op-deleting-pipelines.adoc[leveloffset=+1]
-
-[role="_additional-resources"]
-[id="additional-resources-working-with-pipelines-using-the-developer-perspective"]
-== Additional resources
-
-* xref:../../cicd/pipelines/using-tekton-hub-with-openshift-pipelines.adoc#using-tekton-hub-with-openshift-pipelines[Using Tekton Hub with {pipelines-shortname}]
diff --git a/cicd/pipelines/working-with-pipelines-web-console.adoc b/cicd/pipelines/working-with-pipelines-web-console.adoc
new file mode 100644
index 000000000000..2a182a12f1f7
--- /dev/null
+++ b/cicd/pipelines/working-with-pipelines-web-console.adoc
@@ -0,0 +1,49 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="working-with-pipelines-web-console"]
+= Working with {pipelines-title} in the web console
+include::_attributes/common-attributes.adoc[]
+:context: working-with-pipelines-web-console
+
+toc::[]
+
+You can use the *Administrator* or *Developer* perspective to create and modify `Pipeline`, `PipelineRun`, and `Repository` objects from the *Pipelines* page in the {product-title} web console.
+You can also use the *+Add* page in the *Developer* perspective of the web console to create CI/CD pipelines for your software delivery process.
+
+// Dev console
+include::modules/op-odc-pipelines-abstract.adoc[leveloffset=+1]
+
+[discrete]
+[id="prerequisites_working-with-pipelines-web-console"]
+== Prerequisites
+
+* You have access to an {product-title} cluster, and have xref:../../web_console/web-console-overview.adoc#about-developer-perspective_web-console-overview[switched to the *Developer* perspective].
+* You have the xref:../../cicd/pipelines/installing-pipelines.adoc#installing-pipelines[{pipelines-shortname} Operator installed] in your cluster.
+* You are a cluster administrator or a user with create and edit permissions.
+* You have created a project.
+
+include::modules/op-constructing-pipelines-using-pipeline-builder.adoc[leveloffset=+2]
+
+include::modules/op-creating-pipelines-along-with-applications.adoc[leveloffset=+2]
+
+include::modules/odc-adding-a-GitHub-repository-containing-pipelines.adoc[leveloffset=+2]
+
+include::modules/op-interacting-with-pipelines-using-the-developer-perspective.adoc[leveloffset=+2]
+
+include::modules/op-starting-pipelines-from-pipelines-view.adoc[leveloffset=+2]
+
+include::modules/op-starting-pipelines-from-topology-view.adoc[leveloffset=+2]
+
+include::modules/op-interacting-pipelines-from-topology-view.adoc[leveloffset=+2]
+
+include::modules/op-editing-pipelines.adoc[leveloffset=+2]
+
+include::modules/op-deleting-pipelines.adoc[leveloffset=+2]
+
+[role="_additional-resources"]
+[id="additional-resources_working-with-pipelines-web-console"]
+=== Additional resources
+
+* xref:../../cicd/pipelines/using-tekton-hub-with-openshift-pipelines.adoc#using-tekton-hub-with-openshift-pipelines[Using Tekton Hub with {pipelines-shortname}]
+
+// Admin console
+include::modules/op-creating-pipeline-templates-admin-console.adoc[leveloffset=+1]
diff --git a/cli_reference/developer_cli_odo/configuring-the-odo-cli.adoc b/cli_reference/developer_cli_odo/configuring-the-odo-cli.adoc
index db390d2e31ca..21019b3b87a5 100644
--- a/cli_reference/developer_cli_odo/configuring-the-odo-cli.adoc
+++ b/cli_reference/developer_cli_odo/configuring-the-odo-cli.adoc
@@ -1,5 +1,5 @@
////
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id='configuring-the-odo-cli']
= Configuring the odo CLI
include::_attributes/common-attributes.adoc[]
diff --git a/cli_reference/developer_cli_odo/creating-instances-of-services-managed-by-operators.adoc b/cli_reference/developer_cli_odo/creating-instances-of-services-managed-by-operators.adoc
index cfa8616d9b80..c36f7fa3c8e4 100644
--- a/cli_reference/developer_cli_odo/creating-instances-of-services-managed-by-operators.adoc
+++ b/cli_reference/developer_cli_odo/creating-instances-of-services-managed-by-operators.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id=creating-instances-of-services-managed-by-operators]
= Creating instances of services managed by Operators
include::_attributes/common-attributes.adoc[]
diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-java-application-with-a-database.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-java-application-with-a-database.adoc
index e30e041083b7..edfee771d9da 100644
--- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-java-application-with-a-database.adoc
+++ b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-java-application-with-a-database.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id=creating-a-java-application-with-a-database]
= Creating a Java application with a database
include::_attributes/common-attributes.adoc[]
diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-multicomponent-application-with-odo.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-multicomponent-application-with-odo.adoc
index 6904fe98e44d..12680a720b6b 100644
--- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-multicomponent-application-with-odo.adoc
+++ b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-multicomponent-application-with-odo.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
include::_attributes/common-attributes.adoc[]
[id='creating-a-multicomponent-application-with-odo']
= Creating a multicomponent application with `{odo-title}`
diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-single-component-application-with-odo.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-single-component-application-with-odo.adoc
index a5cbc0653c55..18d92631f06a 100644
--- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-single-component-application-with-odo.adoc
+++ b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-single-component-application-with-odo.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
include::_attributes/common-attributes.adoc[]
[id='creating-a-single-component-application-with-odo']
= Creating a single-component application with {odo-title}
diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-an-application-with-a-database.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-an-application-with-a-database.adoc
index af8e2d948469..1449367714b6 100644
--- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-an-application-with-a-database.adoc
+++ b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-an-application-with-a-database.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id=creating-an-application-with-a-database]
= Creating an application with a database
include::_attributes/common-attributes.adoc[]
diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/debugging-applications-in-odo.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/debugging-applications-in-odo.adoc
index 67d9f4478a59..1031a675671b 100644
--- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/debugging-applications-in-odo.adoc
+++ b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/debugging-applications-in-odo.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
include::_attributes/common-attributes.adoc[]
[id='debugging-applications-in-odo']
= Debugging applications in `{odo-title}`
diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/deleting-applications.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/deleting-applications.adoc
index fba30e7d3f48..562c5978a065 100644
--- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/deleting-applications.adoc
+++ b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/deleting-applications.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id='deleting-applications']
= Deleting applications
include::_attributes/common-attributes.adoc[]
diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/sample-applications.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/sample-applications.adoc
index 962f39a629ad..156a44efb7f4 100644
--- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/sample-applications.adoc
+++ b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/sample-applications.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="sample-applications"]
= Sample applications
include::_attributes/common-attributes.adoc[]
diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/using-devfiles-in-odo.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/using-devfiles-in-odo.adoc
index fec5f98f977a..bb1da638d524 100644
--- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/using-devfiles-in-odo.adoc
+++ b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/using-devfiles-in-odo.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="using-devfiles-in-odo"]
= Using devfiles in {odo-title}
include::_attributes/common-attributes.adoc[]
diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/working-with-projects.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/working-with-projects.adoc
index bc36206cd0d9..27d1053fa461 100644
--- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/working-with-projects.adoc
+++ b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/working-with-projects.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="working-with-projects"]
= Working with projects
include::_attributes/common-attributes.adoc[]
diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/working-with-storage.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/working-with-storage.adoc
index 87ecdf93ac03..5080b4b1d109 100644
--- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/working-with-storage.adoc
+++ b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/working-with-storage.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id='working-with-storage']
= Working with storage
include::_attributes/common-attributes.adoc[]
diff --git a/cli_reference/developer_cli_odo/installing-odo.adoc b/cli_reference/developer_cli_odo/installing-odo.adoc
index 813a0f156199..6d23d7b9e190 100644
--- a/cli_reference/developer_cli_odo/installing-odo.adoc
+++ b/cli_reference/developer_cli_odo/installing-odo.adoc
@@ -1,5 +1,5 @@
////
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id='installing-odo']
= Installing odo
include::_attributes/common-attributes.adoc[]
diff --git a/cli_reference/developer_cli_odo/managing-environment-variables-in-odo.adoc b/cli_reference/developer_cli_odo/managing-environment-variables-in-odo.adoc
index 1b37feceb671..e46c1fc6f5b0 100644
--- a/cli_reference/developer_cli_odo/managing-environment-variables-in-odo.adoc
+++ b/cli_reference/developer_cli_odo/managing-environment-variables-in-odo.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id='managing-environment-variables']
= Managing environment variables
include::_attributes/common-attributes.adoc[]
diff --git a/cli_reference/developer_cli_odo/odo-architecture.adoc b/cli_reference/developer_cli_odo/odo-architecture.adoc
index 9a68934dc11e..6a66ea3994d7 100644
--- a/cli_reference/developer_cli_odo/odo-architecture.adoc
+++ b/cli_reference/developer_cli_odo/odo-architecture.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="odo-architecture"]
= odo architecture
include::_attributes/common-attributes.adoc[]
diff --git a/cli_reference/developer_cli_odo/odo-cli-reference.adoc b/cli_reference/developer_cli_odo/odo-cli-reference.adoc
index faa2e5650493..54e107acd36e 100644
--- a/cli_reference/developer_cli_odo/odo-cli-reference.adoc
+++ b/cli_reference/developer_cli_odo/odo-cli-reference.adoc
@@ -1,5 +1,5 @@
////
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id='odo-cli-reference']
= odo CLI reference
include::_attributes/common-attributes.adoc[]
diff --git a/cli_reference/developer_cli_odo/odo-release-notes.adoc b/cli_reference/developer_cli_odo/odo-release-notes.adoc
index 645fca7f25c9..d751b99c46ed 100644
--- a/cli_reference/developer_cli_odo/odo-release-notes.adoc
+++ b/cli_reference/developer_cli_odo/odo-release-notes.adoc
@@ -1,5 +1,5 @@
////
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id='odo-release-notes']
= `{odo-title}` release notes
include::_attributes/common-attributes.adoc[]
diff --git a/cli_reference/developer_cli_odo/understanding-odo.adoc b/cli_reference/developer_cli_odo/understanding-odo.adoc
index c9901902f1df..a155e5908f2a 100644
--- a/cli_reference/developer_cli_odo/understanding-odo.adoc
+++ b/cli_reference/developer_cli_odo/understanding-odo.adoc
@@ -1,5 +1,5 @@
////
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="understanding-odo"]
= Understanding odo
include::_attributes/common-attributes.adoc[]
@@ -16,5 +16,5 @@ Red Hat OpenShift Developer CLI (`odo`) is a tool for creating applications on {
include::modules/odo-key-features.adoc[leveloffset=+1]
include::modules/odo-core-concepts.adoc[leveloffset=+1]
include::modules/odo-listing-components.adoc[leveloffset=+1]
-include::modules/odo-telemetry.adoc[leveloffset=+1]
+include::modules/odo-telemetry.adoc[leveloffset=+1]
////
\ No newline at end of file
diff --git a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/about-odo-in-a-restricted-environment.adoc b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/about-odo-in-a-restricted-environment.adoc
index 594c44db70b1..9e1cd9f03ffe 100644
--- a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/about-odo-in-a-restricted-environment.adoc
+++ b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/about-odo-in-a-restricted-environment.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
include::_attributes/common-attributes.adoc[]
[id="about-odo-in-a-restricted-environment"]
= About {odo-title} in a restricted environment
diff --git a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-a-component-to-the-disconnected-cluster.adoc b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-a-component-to-the-disconnected-cluster.adoc
index 5904d648335f..590353c08776 100644
--- a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-a-component-to-the-disconnected-cluster.adoc
+++ b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-a-component-to-the-disconnected-cluster.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="creating-and-deploying-a-component-to-the-disconnected-cluster"]
= Creating and deploying a component to the disconnected cluster
include::_attributes/common-attributes.adoc[]
diff --git a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-devfile-components-to-the-disconnected-cluster.adoc b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-devfile-components-to-the-disconnected-cluster.adoc
index c4c0e7da0e0f..b34911fdbcb9 100644
--- a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-devfile-components-to-the-disconnected-cluster.adoc
+++ b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-devfile-components-to-the-disconnected-cluster.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="creating-and-deploying-devfile-components-to-the-disconnected-cluster"]
= Creating and deploying devfile components to the disconnected cluster
include::_attributes/common-attributes.adoc[]
diff --git a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/pushing-the-odo-init-image-to-the-restricted-cluster-registry.adoc b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/pushing-the-odo-init-image-to-the-restricted-cluster-registry.adoc
index 30cecbd5d8e5..569806f5d034 100644
--- a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/pushing-the-odo-init-image-to-the-restricted-cluster-registry.adoc
+++ b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/pushing-the-odo-init-image-to-the-restricted-cluster-registry.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="pushing-the-odo-init-image-to-the-restricted-cluster-registry"]
include::_attributes/common-attributes.adoc[]
= Pushing the {odo-title} init image to the restricted cluster registry
diff --git a/cli_reference/index.adoc b/cli_reference/index.adoc
index 6b4bbc18d69a..89c6fb66bac6 100644
--- a/cli_reference/index.adoc
+++ b/cli_reference/index.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="cli-tools-overview"]
= {product-title} CLI tools overview
include::_attributes/common-attributes.adoc[]
@@ -6,7 +6,11 @@ include::_attributes/common-attributes.adoc[]
toc::[]
-A user performs a range of operations while working on {product-title} such as the following:
+A user performs a range of operations while working on {product-title}
+ifdef::openshift-rosa[]
+(ROSA)
+endif::openshift-rosa[]
+such as the following:
* Managing clusters
* Building, deploying, and managing applications
@@ -14,15 +18,41 @@ A user performs a range of operations while working on {product-title} such as t
* Developing Operators
* Creating and maintaining Operator catalogs
-{product-title} offers a set of command-line interface (CLI) tools that simplify these tasks by enabling users to perform various administration and development operations from the terminal.
+ifndef::openshift-rosa[]
+{product-title}
+endif::openshift-rosa[]
+ifdef::openshift-rosa[]
+ROSA
+endif::openshift-rosa[]
+offers a set of command-line interface (CLI) tools that simplify these tasks by enabling users to perform various administration and development operations from the terminal.
These tools expose simple commands to manage the applications, as well as interact with each component of the system.
[id="cli-tools-list"]
== List of CLI tools
-The following set of CLI tools are available in {product-title}:
+The following set of CLI tools are available in
+ifndef::openshift-rosa[]
+{product-title}:
+endif::openshift-rosa[]
+ifdef::openshift-rosa[]
+ROSA:
+endif::openshift-rosa[]
-* xref:../cli_reference/openshift_cli/getting-started-cli.adoc#cli-getting-started[OpenShift CLI (oc)]: This is the most commonly used CLI tool by {product-title} users. It helps both cluster administrators and developers to perform end-to-end operations across {product-title} using the terminal. Unlike the web console, it allows the user to work directly with the project source code using command scripts.
+* xref:../cli_reference/openshift_cli/getting-started-cli.adoc#cli-getting-started[OpenShift CLI (`oc`)]:
+ifndef::openshift-rosa[]
+This is the most commonly used CLI tool by {product-title} users.
+endif::openshift-rosa[]
+ifdef::openshift-rosa[]
+This is one of the more commonly used developer CLI tools.
+endif::openshift-rosa[]
+It helps both cluster administrators and developers to perform end-to-end operations across
+ifndef::openshift-rosa[]
+{product-title}
+endif::openshift-rosa[]
+ifdef::openshift-rosa[]
+ROSA
+endif::openshift-rosa[]
+using the terminal. Unlike the web console, it allows the user to work directly with the project source code using command scripts.
* xref:../cli_reference/kn-cli-tools.adoc#kn-cli-tools[Knative CLI (kn)]: The Knative (`kn`) CLI tool provides simple and intuitive terminal commands that can be used to interact with OpenShift Serverless components, such as Knative Serving and Eventing.
@@ -31,3 +61,8 @@ The following set of CLI tools are available in {product-title}:
* xref:../cli_reference/opm/cli-opm-install.adoc#cli-opm-install[opm CLI]: The `opm` CLI tool helps the Operator developers and cluster administrators to create and maintain the catalogs of Operators from the terminal.
* xref:../cli_reference/osdk/cli-osdk-install.adoc#cli-osdk-install[Operator SDK]: The Operator SDK, a component of the Operator Framework, provides a CLI tool that Operator developers can use to build, test, and deploy an Operator from the terminal. It simplifies the process of building Kubernetes-native applications, which can require deep, application-specific operational knowledge.
+
+
+ifdef::openshift-rosa[]
+* xref:../cli_reference/rosa_cli/rosa-get-started-cli.adoc#rosa-get-started-cli[ROSA CLI (`rosa`)]: Use the `rosa` CLI to create, update, manage, and delete ROSA clusters and resources.
+endif::openshift-rosa[]
\ No newline at end of file
diff --git a/cli_reference/kn-cli-tools.adoc b/cli_reference/kn-cli-tools.adoc
index e5d4c358d7ff..cd42febc0678 100644
--- a/cli_reference/kn-cli-tools.adoc
+++ b/cli_reference/kn-cli-tools.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
include::_attributes/common-attributes.adoc[]
[id="kn-cli-tools"]
= Knative CLI for use with {ServerlessProductName}
diff --git a/cli_reference/odo-important-update.adoc b/cli_reference/odo-important-update.adoc
index bbb939cd21f1..c89230948f7f 100644
--- a/cli_reference/odo-important-update.adoc
+++ b/cli_reference/odo-important-update.adoc
@@ -1,8 +1,8 @@
// Module included in the following assemblies:
-//
+//
// * cli_reference/developer_cli_odo/odo-important-update.adoc
-:_content-type: CONCEPT
+:_mod-docs-content-type: CONCEPT
[id="odo-important_update_{context}"]
include::_attributes/attributes-openshift-dedicated.adoc[]
include::_attributes/common-attributes.adoc[]
@@ -11,10 +11,10 @@ include::_attributes/common-attributes.adoc[]
toc::[]
-Red Hat does not provide information about `{odo-title}` on the {OCP} documentation site. See the link:https://odo.dev/docs/introduction[documentation] maintained by Red Hat and the upstream community for documentation information related to `{odo-title}`.
+Red Hat does not provide information about `{odo-title}` on the {product-title} documentation site. See the link:https://odo.dev/docs/introduction[documentation] maintained by Red Hat and the upstream community for documentation information related to `{odo-title}`.
[IMPORTANT]
====
-For the materials maintained by the upstream community, Red Hat provides support under link:https://access.redhat.com/solutions/5893251[Cooperative Community Support].
+For the materials maintained by the upstream community, Red Hat provides support under link:https://access.redhat.com/solutions/5893251[Cooperative Community Support].
====
diff --git a/cli_reference/openshift_cli/administrator-cli-commands.adoc b/cli_reference/openshift_cli/administrator-cli-commands.adoc
index 3b5c3da2e911..45dddfd10f03 100644
--- a/cli_reference/openshift_cli/administrator-cli-commands.adoc
+++ b/cli_reference/openshift_cli/administrator-cli-commands.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="cli-administrator-commands"]
= OpenShift CLI administrator command reference
include::_attributes/common-attributes.adoc[]
diff --git a/cli_reference/openshift_cli/configuring-cli.adoc b/cli_reference/openshift_cli/configuring-cli.adoc
index 0eace0d03e61..7b91e26c250f 100644
--- a/cli_reference/openshift_cli/configuring-cli.adoc
+++ b/cli_reference/openshift_cli/configuring-cli.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="cli-configuring-cli"]
= Configuring the OpenShift CLI
include::_attributes/common-attributes.adoc[]
diff --git a/cli_reference/openshift_cli/developer-cli-commands.adoc b/cli_reference/openshift_cli/developer-cli-commands.adoc
index 517e2cb7a777..4d9a991d2674 100644
--- a/cli_reference/openshift_cli/developer-cli-commands.adoc
+++ b/cli_reference/openshift_cli/developer-cli-commands.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="cli-developer-commands"]
= OpenShift CLI developer command reference
include::_attributes/common-attributes.adoc[]
diff --git a/cli_reference/openshift_cli/extending-cli-plugins.adoc b/cli_reference/openshift_cli/extending-cli-plugins.adoc
index 549e986f667f..8206dec93d30 100644
--- a/cli_reference/openshift_cli/extending-cli-plugins.adoc
+++ b/cli_reference/openshift_cli/extending-cli-plugins.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="cli-extend-plugins"]
= Extending the OpenShift CLI with plugins
include::_attributes/common-attributes.adoc[]
@@ -7,7 +7,14 @@ include::_attributes/common-attributes.adoc[]
toc::[]
You can write and install plugins to build on the default `oc` commands,
-allowing you to perform new and more complex tasks with the {product-title} CLI.
+allowing you to perform new and more complex tasks with the
+ifndef::openshift-dedicated,openshift-rosa[]
+{product-title}
+endif::openshift-dedicated,openshift-rosa[]
+ifdef::openshift-rosa,openshift-dedicated[]
+OpenShift
+endif::openshift-rosa,openshift-dedicated[]
+CLI.
// Writing CLI plugins
include::modules/cli-extending-plugins-writing.adoc[leveloffset=+1]
diff --git a/cli_reference/openshift_cli/getting-started-cli.adoc b/cli_reference/openshift_cli/getting-started-cli.adoc
index bff334b936e1..1faa3b9da961 100644
--- a/cli_reference/openshift_cli/getting-started-cli.adoc
+++ b/cli_reference/openshift_cli/getting-started-cli.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="cli-getting-started"]
= Getting started with the OpenShift CLI
include::_attributes/common-attributes.adoc[]
@@ -40,6 +40,9 @@ include::modules/cli-installing-cli-brew.adoc[leveloffset=+2]
// Logging in to the CLI
include::modules/cli-logging-in.adoc[leveloffset=+1]
+// Logging in to the CLI by using the web
+include::modules/cli-logging-in-web.adoc[leveloffset=+1]
+
// Using the CLI
include::modules/cli-using-cli.adoc[leveloffset=+1]
diff --git a/cli_reference/openshift_cli/managing-cli-plugins-krew.adoc b/cli_reference/openshift_cli/managing-cli-plugins-krew.adoc
index 25d0e4420b04..e5effd419063 100644
--- a/cli_reference/openshift_cli/managing-cli-plugins-krew.adoc
+++ b/cli_reference/openshift_cli/managing-cli-plugins-krew.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="managing-cli-plugin-krew"]
= Managing CLI plugins with Krew
include::_attributes/common-attributes.adoc[]
diff --git a/cli_reference/openshift_cli/managing-cli-profiles.adoc b/cli_reference/openshift_cli/managing-cli-profiles.adoc
index 8978acebfc22..f35d40ec2c1e 100644
--- a/cli_reference/openshift_cli/managing-cli-profiles.adoc
+++ b/cli_reference/openshift_cli/managing-cli-profiles.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="managing-cli-profiles"]
= Managing CLI profiles
include::_attributes/common-attributes.adoc[]
@@ -7,13 +7,19 @@ include::_attributes/common-attributes.adoc[]
toc::[]
A CLI configuration file allows you to configure different profiles, or contexts, for use with the xref:../../cli_reference/index.adoc#cli-tools-overview[CLI tools overview]. A context consists of
-ifndef::microshift[]
+ifndef::microshift,openshift-dedicated,openshift-rosa[]
xref:../../authentication/understanding-authentication.adoc#understanding-authentication[user authentication]
-endif::[]
+endif::microshift,openshift-dedicated,openshift-rosa[]
ifdef::microshift[]
user authentication
endif::[]
-and {product-title} server information associated with a _nickname_.
+ifndef::openshift-rosa[]
+an {product-title}
+endif::openshift-rosa[]
+ifdef::openshift-rosa[]
+the {product-title} (ROSA)
+endif::openshift-rosa[]
+server information associated with a _nickname_.
include::modules/about-cli-profiles-switch.adoc[leveloffset=+1]
diff --git a/cli_reference/openshift_cli/usage-oc-kubectl.adoc b/cli_reference/openshift_cli/usage-oc-kubectl.adoc
index a6c2cf909381..0e841348376c 100644
--- a/cli_reference/openshift_cli/usage-oc-kubectl.adoc
+++ b/cli_reference/openshift_cli/usage-oc-kubectl.adoc
@@ -1,25 +1,57 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="usage-oc-kubectl"]
= Usage of oc and kubectl commands
include::_attributes/common-attributes.adoc[]
:context: usage-oc-kubectl
-The Kubernetes command-line interface (CLI), `kubectl`, can be used to run commands against a Kubernetes cluster. Because {product-title} is a certified Kubernetes distribution, you can use the supported `kubectl` binaries that ship with {product-title}, or you can gain extended functionality by using the `oc` binary.
+The Kubernetes command-line interface (CLI), `kubectl`, can be used to run commands against a Kubernetes cluster. Because {product-title}
+ifdef::openshift-rosa[]
+(ROSA)
+endif::openshift-rosa[]
+is a certified Kubernetes distribution, you can use the supported `kubectl` binaries that ship with
+ifndef::openshift-rosa[]
+{product-title}
+endif::openshift-rosa[]
+ifdef::openshift-rosa[]
+ROSA
+endif::openshift-rosa[]
+, or you can gain extended functionality by using the `oc` binary.
== The oc binary
-The `oc` binary offers the same capabilities as the `kubectl` binary, but it extends to natively support additional {product-title} features, including:
-
-* **Full support for {product-title} resources**
+The `oc` binary offers the same capabilities as the `kubectl` binary, but it extends to natively support additional
+ifndef::openshift-rosa[]
+{product-title}
+endif::openshift-rosa[]
+ifdef::openshift-rosa[]
+ROSA
+endif::openshift-rosa[]
+features, including:
+
+* **Full support for
+ifndef::openshift-rosa[]
+{product-title}
+endif::openshift-rosa[]
+ifdef::openshift-rosa[]
+ROSA
+endif::openshift-rosa[]
+resources**
+
-Resources such as `DeploymentConfig`, `BuildConfig`, `Route`, `ImageStream`, and `ImageStreamTag` objects are specific to {product-title} distributions, and build upon standard Kubernetes primitives.
+Resources such as `DeploymentConfig`, `BuildConfig`, `Route`, `ImageStream`, and `ImageStreamTag` objects are specific to
+ifndef::openshift-rosa[]
+{product-title}
+endif::openshift-rosa[]
+ifdef::openshift-rosa[]
+ROSA
+endif::openshift-rosa[]
+distributions, and build upon standard Kubernetes primitives.
+
* **Authentication**
+
-ifndef::microshift[]
-The `oc` binary offers a built-in `login` command for authentication and lets you work with {product-title} projects, which map Kubernetes namespaces to authenticated users.
+ifndef::microshift,openshift-rosa,openshift-dedicated[]
+The `oc` binary offers a built-in `login` command for authentication and lets you work with projects, which map Kubernetes namespaces to authenticated users.
Read xref:../../authentication/understanding-authentication.adoc#understanding-authentication[Understanding authentication] for more information.
-endif::[]
+endif::microshift,openshift-rosa,openshift-dedicated[]
+
ifdef::microshift[]
The `oc` binary offers a built-in `login` command for authentication to {product-title}.
@@ -31,7 +63,21 @@ The additional command `oc new-app`, for example, makes it easier to get new app
[IMPORTANT]
====
-If you installed an earlier version of the `oc` binary, you cannot use it to complete all of the commands in {product-title} {product-version}. If you want the latest features, you must download and install the latest version of the `oc` binary corresponding to your {product-title} server version.
+If you installed an earlier version of the `oc` binary, you cannot use it to complete all of the commands in
+ifndef::openshift-rosa[]
+{product-title} {product-version}
+endif::openshift-rosa[]
+ifdef::openshift-rosa[]
+ROSA
+endif::openshift-rosa[]
+. If you want the latest features, you must download and install the latest version of the `oc` binary corresponding to your
+ifndef::openshift-rosa[]
+{product-title}
+endif::openshift-rosa[]
+ifdef::openshift-rosa[]
+ROSA
+endif::openshift-rosa[]
+server version.
====
Non-security API changes will involve, at minimum, two minor releases (4.1 to 4.2 to 4.3, for example) to allow older `oc` binaries to update. Using new capabilities might require newer `oc` binaries. A 4.3 server might have additional capabilities that a 4.2 `oc` binary cannot use and a 4.3 `oc` binary might have additional capabilities that are unsupported by a 4.2 server.
@@ -62,7 +108,21 @@ image:redcircle-3.png[] `oc` client might provide options and features that migh
== The kubectl binary
-The `kubectl` binary is provided as a means to support existing workflows and scripts for new {product-title} users coming from a standard Kubernetes environment, or for those who prefer to use the `kubectl` CLI. Existing users of `kubectl` can continue to use the binary to interact with Kubernetes primitives, with no changes required to the {product-title} cluster.
+The `kubectl` binary is provided as a means to support existing workflows and scripts for new
+ifndef::openshift-rosa[]
+{product-title}
+endif::openshift-rosa[]
+ifdef::openshift-rosa[]
+ROSA
+endif::openshift-rosa[]
+users coming from a standard Kubernetes environment, or for those who prefer to use the `kubectl` CLI. Existing users of `kubectl` can continue to use the binary to interact with Kubernetes primitives, with no changes required to the
+ifndef::openshift-rosa[]
+{product-title}
+endif::openshift-rosa[]
+ifdef::openshift-rosa[]
+ROSA
+endif::openshift-rosa[]
+cluster.
You can install the supported `kubectl` binary by following the steps to xref:../../cli_reference/openshift_cli/getting-started-cli.adoc#cli-installing-cli_cli-developer-commands[Install the OpenShift CLI]. The `kubectl` binary is included in the archive if you download the binary, or is installed when you install the CLI by using an RPM.
diff --git a/cli_reference/opm/cli-opm-install.adoc b/cli_reference/opm/cli-opm-install.adoc
index 70d2c01c01bf..ba076db5f5a3 100644
--- a/cli_reference/opm/cli-opm-install.adoc
+++ b/cli_reference/opm/cli-opm-install.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="cli-opm-install"]
= Installing the opm CLI
include::_attributes/common-attributes.adoc[]
@@ -8,17 +8,20 @@ toc::[]
include::modules/olm-about-opm.adoc[leveloffset=+1]
+ifndef::openshift-rosa,openshift-dedicated[]
[role="_additional-resources"]
.Additional resources
* See xref:../../operators/understanding/olm-packaging-format.adoc#olm-bundle-format_olm-packaging-format[Operator Framework packaging format] for more information about the bundle format.
* To create a bundle image using the Operator SDK, see
xref:../../operators/operator_sdk/osdk-working-bundle-images.adoc#osdk-working-bundle-images[Working with bundle images].
+endif::openshift-rosa,openshift-dedicated[]
include::modules/olm-installing-opm.adoc[leveloffset=+1]
-
+ifndef::openshift-rosa,openshift-dedicated[]
[role="_additional-resources"]
[id="opm-addtl-resources"]
== Additional resources
* See xref:../../operators/admin/olm-managing-custom-catalogs.adoc#olm-managing-custom-catalogs[Managing custom catalogs] for `opm` procedures including creating, updating, and pruning catalogs.
+endif::openshift-rosa,openshift-dedicated[]
\ No newline at end of file
diff --git a/cli_reference/opm/cli-opm-ref.adoc b/cli_reference/opm/cli-opm-ref.adoc
index ae7d744ef70f..a45b57f1fe1c 100644
--- a/cli_reference/opm/cli-opm-ref.adoc
+++ b/cli_reference/opm/cli-opm-ref.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="cli-opm-ref"]
= opm CLI reference
include::_attributes/common-attributes.adoc[]
@@ -32,12 +32,15 @@ include::snippets/deprecated-feature.adoc[]
include::modules/opm-cli-ref-generate.adoc[leveloffset=+1]
include::modules/opm-cli-ref-index.adoc[leveloffset=+1]
+
+ifndef::openshift-rosa,openshift-dedicated[]
[role="_additional-resources"]
.Additional resources
* xref:../../operators/understanding/olm-packaging-format.adoc#olm-file-based-catalogs_olm-packaging-format[Operator Framework packaging format]
* xref:../../operators/admin/olm-managing-custom-catalogs.adoc#olm-managing-custom-catalogs-fb[Managing custom catalogs]
* xref:../../installing/disconnected_install/installing-mirroring-disconnected.adoc#installing-mirroring-disconnected[Mirroring images for a disconnected installation using the oc-mirror plugin]
+endif::openshift-rosa,openshift-dedicated[]
include::modules/opm-cli-ref-init.adoc[leveloffset=+1]
include::modules/opm-cli-ref-migrate.adoc[leveloffset=+1]
diff --git a/cli_reference/osdk/cli-osdk-install.adoc b/cli_reference/osdk/cli-osdk-install.adoc
index 085ad64eb12b..15afdc6bd957 100644
--- a/cli_reference/osdk/cli-osdk-install.adoc
+++ b/cli_reference/osdk/cli-osdk-install.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="cli-osdk-install"]
= Installing the Operator SDK CLI
include::_attributes/common-attributes.adoc[]
@@ -9,13 +9,14 @@ toc::[]
The Operator SDK provides a command-line interface (CLI) tool that Operator developers can use to build, test, and deploy an Operator. You can install the Operator SDK CLI on your workstation so that you are prepared to start authoring your own Operators.
Operator authors with cluster administrator access to a Kubernetes-based cluster, such as {product-title}, can use the Operator SDK CLI to develop their own Operators based on Go, Ansible, Java, or Helm. link:https://kubebuilder.io/[Kubebuilder] is embedded into the Operator SDK as the scaffolding solution for Go-based Operators, which means existing Kubebuilder projects can be used as is with the Operator SDK and continue to work.
-
+ifndef::openshift-rosa,openshift-dedicated[]
See xref:../../operators/operator_sdk/osdk-about.adoc#osdk-about[Developing Operators] for full documentation on the Operator SDK.
[NOTE]
====
{product-title} {product-version} supports Operator SDK {osdk_ver}.
====
+endif::openshift-rosa,openshift-dedicated[]
include::modules/osdk-installing-cli-linux-macos.adoc[leveloffset=+1]
diff --git a/cli_reference/osdk/cli-osdk-ref.adoc b/cli_reference/osdk/cli-osdk-ref.adoc
index 10b97dfa8c90..fda321995c57 100644
--- a/cli_reference/osdk/cli-osdk-ref.adoc
+++ b/cli_reference/osdk/cli-osdk-ref.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="cli-osdk-ref"]
= Operator SDK CLI reference
include::_attributes/common-attributes.adoc[]
@@ -13,8 +13,9 @@ The Operator SDK command-line interface (CLI) is a development kit designed to m
----
$ operator-sdk [] [] []
----
-
+ifndef::openshift-rosa,openshift-dedicated[]
See xref:../../operators/operator_sdk/osdk-about.adoc#osdk-about[Developing Operators] for full documentation on the Operator SDK.
+endif::openshift-rosa,openshift-dedicated[]
include::modules/osdk-cli-ref-bundle.adoc[leveloffset=+1]
include::modules/osdk-cli-ref-cleanup.adoc[leveloffset=+1]
@@ -23,26 +24,31 @@ include::modules/osdk-cli-ref-create.adoc[leveloffset=+1]
include::modules/osdk-cli-ref-generate.adoc[leveloffset=+1]
include::modules/osdk-cli-ref-generate-bundle.adoc[leveloffset=+2]
+ifndef::openshift-rosa,openshift-dedicated[]
[role="_additional-resources"]
.Additional resources
* See xref:../../operators/operator_sdk/osdk-working-bundle-images.adoc#osdk-bundle-deploy-olm_osdk-working-bundle-images[Bundling an Operator and deploying with Operator Lifecycle Manager] for a full procedure that includes using the `make bundle` command to call the `generate bundle` subcommand.
+endif::openshift-rosa,openshift-dedicated[]
include::modules/osdk-cli-ref-generate-kustomize.adoc[leveloffset=+2]
include::modules/osdk-cli-ref-init.adoc[leveloffset=+1]
include::modules/osdk-cli-ref-run.adoc[leveloffset=+1]
include::modules/osdk-cli-ref-run-bundle.adoc[leveloffset=+2]
-
+ifndef::openshift-rosa,openshift-dedicated[]
[role="_additional-resources"]
.Additional resources
* See xref:../../operators/understanding/olm/olm-understanding-operatorgroups.adoc#olm-operatorgroups-membership_olm-understanding-operatorgroups[Operator group membership] for details on possible install modes.
+endif::openshift-rosa,openshift-dedicated[]
include::modules/osdk-cli-ref-run-bundle-upgrade.adoc[leveloffset=+2]
include::modules/osdk-cli-ref-scorecard.adoc[leveloffset=+1]
+ifndef::openshift-rosa,openshift-dedicated[]
[role="_additional-resources"]
.Additional resources
* See xref:../../operators/operator_sdk/osdk-scorecard.adoc#osdk-scorecard[Validating Operators using the scorecard tool] for details about running the scorecard tool.
+endif::openshift-rosa,openshift-dedicated[]
\ No newline at end of file
diff --git a/rosa_cli/_attributes b/cli_reference/rosa_cli/_attributes
similarity index 100%
rename from rosa_cli/_attributes
rename to cli_reference/rosa_cli/_attributes
diff --git a/networking/network_observability/images b/cli_reference/rosa_cli/images
similarity index 100%
rename from networking/network_observability/images
rename to cli_reference/rosa_cli/images
diff --git a/networking/network_observability/modules b/cli_reference/rosa_cli/modules
similarity index 100%
rename from networking/network_observability/modules
rename to cli_reference/rosa_cli/modules
diff --git a/cli_reference/rosa_cli/rosa-checking-acct-version-cli.adoc b/cli_reference/rosa_cli/rosa-checking-acct-version-cli.adoc
new file mode 100644
index 000000000000..ee95a6d7745d
--- /dev/null
+++ b/cli_reference/rosa_cli/rosa-checking-acct-version-cli.adoc
@@ -0,0 +1,12 @@
+:_mod-docs-content-type: ASSEMBLY
+include::_attributes/attributes-openshift-dedicated.adoc[]
+[id="rosa-acct-version-cli"]
+= Checking account and version information with the ROSA CLI
+:context: rosa-checking-acct-version-cli
+
+toc::[]
+
+Use the following commands to check your account and version information.
+
+include::modules/rosa-checking-account-version-cli-whoami.adoc[leveloffset=+1]
+include::modules/rosa-checking-account-version-cli-version.adoc[leveloffset=+1]
diff --git a/cli_reference/rosa_cli/rosa-checking-logs-cli.adoc b/cli_reference/rosa_cli/rosa-checking-logs-cli.adoc
new file mode 100644
index 000000000000..efcefdbf9062
--- /dev/null
+++ b/cli_reference/rosa_cli/rosa-checking-logs-cli.adoc
@@ -0,0 +1,12 @@
+:_mod-docs-content-type: ASSEMBLY
+include::_attributes/attributes-openshift-dedicated.adoc[]
+[id="rosa-checking-logs-cli"]
+= Checking logs with the ROSA CLI
+:context: rosa-checking-logs-cli
+
+toc::[]
+
+Use the following commands to check your install and uninstall logs.
+
+include::modules/rosa-logs-install.adoc[leveloffset=+1]
+include::modules/rosa-logs-uninstall.adoc[leveloffset=+1]
\ No newline at end of file
diff --git a/cli_reference/rosa_cli/rosa-get-started-cli.adoc b/cli_reference/rosa_cli/rosa-get-started-cli.adoc
new file mode 100644
index 000000000000..ff1d1a365b24
--- /dev/null
+++ b/cli_reference/rosa_cli/rosa-get-started-cli.adoc
@@ -0,0 +1,21 @@
+:_mod-docs-content-type: ASSEMBLY
+include::_attributes/attributes-openshift-dedicated.adoc[]
+[id="rosa-get-started-cli"]
+= Getting started with the ROSA CLI
+:context: rosa-getting-started-cli
+toc::[]
+
+include::modules/rosa-about.adoc[leveloffset=+1]
+include::modules/rosa-setting-up-cli.adoc[leveloffset=+1]
+include::modules/rosa-configure.adoc[leveloffset=+1]
+
+[role="_additional-resources"]
+.Additional resources
+
+* xref:../../cli_reference/rosa_cli/rosa-get-started-cli.adoc#rosa-setting-up-cli_rosa-getting-started-cli[Setting up the ROSA CLI]
+
+* xref:../../cli_reference/openshift_cli/getting-started-cli.adoc#cli-getting-started[Getting started with the OpenShift CLI]
+
+include::modules/rosa-initialize.adoc[leveloffset=+1]
+include::modules/rosa-using-bash-script.adoc[leveloffset=+1]
+include::modules/rosa-updating-rosa-cli.adoc[leveloffset=+1]
\ No newline at end of file
diff --git a/cli_reference/rosa_cli/rosa-manage-objects-cli.adoc b/cli_reference/rosa_cli/rosa-manage-objects-cli.adoc
new file mode 100644
index 000000000000..7fbc5ec0f0df
--- /dev/null
+++ b/cli_reference/rosa_cli/rosa-manage-objects-cli.adoc
@@ -0,0 +1,27 @@
+:_mod-docs-content-type: ASSEMBLY
+include::_attributes/attributes-openshift-dedicated.adoc[]
+[id="rosa-managing-objects-cli"]
+= Managing objects with the ROSA CLI
+
+:context: rosa-managing-objects-cli
+
+toc::[]
+
+Managing objects with the {product-title} (ROSA) CLI, `rosa`, such as adding `dedicated-admin` users, managing clusters, and scheduling cluster upgrades.
+
+include::modules/rosa-common-commands.adoc[leveloffset=+1]
+include::modules/rosa-parent-commands.adoc[leveloffset=+1]
+include::modules/rosa-create-objects.adoc[leveloffset=+1]
+
+[role="_additional-resources"]
+== Additional resources
+* See xref:../../rosa_architecture/rosa_policy_service_definition/rosa-service-definition.adoc#rosa-sdpolicy-aws-instance-types_rosa-service-definition[AWS Instance types] for a list of supported instance types.
+* See xref:../../rosa_architecture/rosa-sts-about-iam-resources.adoc#rosa-sts-account-wide-roles-and-policies_rosa-sts-about-iam-resources[Account-wide IAM role and policy reference] for a list of IAM roles needed for cluster creation.
+* See xref:../../rosa_install_access_delete_clusters/rosa-sts-creating-a-cluster-with-customizations.adoc#rosa-sts-understanding-aws-account-association_rosa-sts-creating-a-cluster-with-customizations[Understanding AWS account association] for more information about the OCM role and user role.
+* See xref:../../rosa_install_access_delete_clusters/rosa_getting_started_iam/rosa-aws-prereqs.adoc#rosa-security-groups_prerequisites[Additional custom security groups] for information about security group requirements.
+
+include::modules/rosa-edit-objects.adoc[leveloffset=+1]
+include::modules/rosa-delete-objects.adoc[leveloffset=+1]
+include::modules/rosa-install-uninstall-addon.adoc[leveloffset=+1]
+include::modules/rosa-list-objects.adoc[leveloffset=+1]
+include::modules/rosa-upgrade-cluster-cli.adoc[leveloffset=+1]
diff --git a/networking/network_observability/snippets b/cli_reference/rosa_cli/snippets
similarity index 100%
rename from networking/network_observability/snippets
rename to cli_reference/rosa_cli/snippets
diff --git a/cli_reference/tkn_cli/installing-tkn.adoc b/cli_reference/tkn_cli/installing-tkn.adoc
index 5ea19aa0ff94..2d69b96261e7 100644
--- a/cli_reference/tkn_cli/installing-tkn.adoc
+++ b/cli_reference/tkn_cli/installing-tkn.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id='installing-tkn']
= Installing tkn
include::_attributes/common-attributes.adoc[]
@@ -8,8 +8,9 @@ toc::[]
Use the CLI tool to manage {pipelines-title} from a terminal. The following section describes how to install the CLI tool on different platforms.
+ifndef::openshift-rosa,openshift-dedicated[]
You can also find the URL to the latest binaries from the {product-title} web console by clicking the *?* icon in the upper-right corner and selecting *Command Line Tools*.
-
+endif::openshift-rosa,openshift-dedicated[]
:FeatureName: Running {pipelines-title} on ARM hardware
include::snippets/technology-preview.adoc[]
@@ -19,12 +20,14 @@ Both the archives and the RPMs contain the following executables:
* tkn
* tkn-pac
+ifndef::openshift-rosa,openshift-dedicated[]
* opc
+endif::openshift-rosa,openshift-dedicated[]
====
-
+ifndef::openshift-rosa,openshift-dedicated[]
:FeatureName: Running {pipelines-title} with the `opc` CLI tool
include::snippets/technology-preview.adoc[]
-
+endif::openshift-rosa,openshift-dedicated[]
// Install tkn on Linux
include::modules/op-installing-tkn-on-linux.adoc[leveloffset=+1]
diff --git a/cli_reference/tkn_cli/op-configuring-tkn.adoc b/cli_reference/tkn_cli/op-configuring-tkn.adoc
index 675db608c42c..1685dfd487e4 100644
--- a/cli_reference/tkn_cli/op-configuring-tkn.adoc
+++ b/cli_reference/tkn_cli/op-configuring-tkn.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id="op-configuring-tkn"]
= Configuring the OpenShift Pipelines tkn CLI
include::_attributes/common-attributes.adoc[]
diff --git a/cli_reference/tkn_cli/op-tkn-reference.adoc b/cli_reference/tkn_cli/op-tkn-reference.adoc
index 66e36f075552..dac51aac96dd 100644
--- a/cli_reference/tkn_cli/op-tkn-reference.adoc
+++ b/cli_reference/tkn_cli/op-tkn-reference.adoc
@@ -1,4 +1,4 @@
-:_content-type: ASSEMBLY
+:_mod-docs-content-type: ASSEMBLY
[id='op-tkn-reference']
= OpenShift Pipelines tkn reference
include::_attributes/common-attributes.adoc[]
diff --git a/rosa_support/_attributes b/cloud_experts_tutorials/_attributes
similarity index 100%
rename from rosa_support/_attributes
rename to cloud_experts_tutorials/_attributes
diff --git a/cloud_experts_tutorials/cloud-experts-aws-load-balancer-operator.adoc b/cloud_experts_tutorials/cloud-experts-aws-load-balancer-operator.adoc
new file mode 100644
index 000000000000..67d5930661e1
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-aws-load-balancer-operator.adoc
@@ -0,0 +1,418 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="cloud-experts-aws-load-balancer-operator"]
+= Tutorial: AWS Load Balancer Operator on ROSA
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: cloud-experts-aws-load-balancer-operator
+
+toc::[]
+
+//Mobb content metadata
+//Brought into ROSA product docs 2023-09-12
+//---
+//date: '2023-01-03T22:07:08.574151'
+//title: AWS Load Balancer Operator On ROSA
+//aliases: ['/docs/rosa/alb-sts']
+//tags: ["AWS", "ROSA"]
+//authors:
+// - Shaozhen Ding
+// - Paul Czarkowski
+//---
+
+include::snippets/mobb-support-statement.adoc[leveloffset=+1]
+
+[TIP]
+====
+Load Balancers created by the AWS Load Balancer Operator cannot be used for xref:../networking/routes/route-configuration.adoc#route-configuration[OpenShift Routes], and should only be used for individual services or ingress resources that do not need the full layer 7 capabilities of an OpenShift Route.
+====
+
+The link:https://kubernetes-sigs.github.io/aws-load-balancer-controller/[AWS Load Balancer Controller] manages AWS Elastic Load Balancers for a {product-title} (ROSA) cluster. The controller provisions link:https://docs.aws.amazon.com/elasticloadbalancing/latest/application/introduction.html[AWS Application Load Balancers (ALB)] when you create Kubernetes Ingress resources and link:https://docs.aws.amazon.com/elasticloadbalancing/latest/network/introduction.html[AWS Network Load Balancers (NLB)] when implementing Kubernetes Service resources with a type of LoadBalancer.
+
+Compared with the default AWS in-tree load balancer provider, this controller is developed with advanced annotations for both ALBs and NLBs. Some advanced use cases are:
+
+* Using native Kubernetes Ingress objects with ALBs
+* Integrate ALBs with the AWS Web Application Firewall (WAF) service
+* Specify custom NLB source IP ranges
+* Specify custom NLB internal IP addresses
+
+The link:https://github.com/openshift/aws-load-balancer-operator[AWS Load Balancer Operator] is used to used to install, manage and configure an instance of `aws-load-balancer-controller` in a ROSA cluster.
+
+[id="prerequisites_{context}"]
+== Prerequisites
+
+[NOTE]
+====
+AWS ALBs require a multi-AZ cluster, as well as three public subnets split across three AZs in the same VPC as the cluster. This makes ALBs unsuitable for many PrivateLink clusters. AWS NLBs do not have this restriction.
+====
+
+* xref:../rosa_install_access_delete_clusters/rosa-sts-creating-a-cluster-quickly.adoc#rosa-sts-creating-a-cluster-quickly[A multi-AZ ROSA classic cluster]
+* BYO VPC cluster
+* AWS CLI
+* OC CLI
+
+[id="environment_{context}"]
+=== Environment
+
+* Prepare the environment variables:
++
+[source,terminal]
+----
+$ export AWS_PAGER=""
+$ export ROSA_CLUSTER_NAME=$(oc get infrastructure cluster -o=jsonpath="{.status.infrastructureName}" | sed 's/-[a-z0-9]\{5\}$//')
+$ export REGION=$(oc get infrastructure cluster -o=jsonpath="{.status.platformStatus.aws.region}")
+$ export OIDC_ENDPOINT=$(oc get authentication.config.openshift.io cluster -o jsonpath='{.spec.serviceAccountIssuer}' | sed 's|^https://||')
+$ export AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text)
+$ export SCRATCH="/tmp/${ROSA_CLUSTER_NAME}/alb-operator"
+$ mkdir -p ${SCRATCH}
+$ echo "Cluster: ${ROSA_CLUSTER_NAME}, Region: ${REGION}, OIDC Endpoint: ${OIDC_ENDPOINT}, AWS Account ID: ${AWS_ACCOUNT_ID}"
+----
+
+[id="aws-vpc-subnets_{context}"]
+=== AWS VPC and subnets
+
+[NOTE]
+====
+This section only applies to clusters that were deployed into existing VPCs. If you did not deploy your cluster into an existing VPC, skip this section and proceed to the installation section below.
+====
+
+. Set the below variables to the proper values for your ROSA deployment:
++
+[source,terminal]
+----
+$ export VPC_ID=
+$ export PUBLIC_SUBNET_IDS=
+$ export PRIVATE_SUBNET_IDS=
+$ export CLUSTER_NAME=$(oc get infrastructure cluster -o=jsonpath="{.status.infrastructureName}")
+----
++
+. Add a tag to your cluster's VPC with the cluster name:
++
+[source,terminal]
+----
+$ aws ec2 create-tags --resources ${VPC_ID} --tags Key=kubernetes.io/cluster/${CLUSTER_NAME},Value=owned --region ${REGION}
+----
++
+. Add a tag to your public subnets:
++
+[source,terminal]
+----
+$ aws ec2 create-tags \
+ --resources ${PUBLIC_SUBNET_IDS} \
+ --tags Key=kubernetes.io/role/elb,Value='' \
+ --region ${REGION}
+----
++
+. Add a tag to your private subnets:
++
+[source,terminal]
+----
+$ aws ec2 create-tags \
+ --resources "${PRIVATE_SUBNET_IDS}" \
+ --tags Key=kubernetes.io/role/internal-elb,Value='' \
+ --region ${REGION}
+----
+
+[id="installation_{context}"]
+== Installation
+
+. Create an AWS IAM policy for the AWS Load Balancer Controller:
++
+[NOTE]
+====
+The policy is sourced from link:https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.4.4/docs/install/iam_policy.json[the upstream AWS Load Balancer Controller policy] plus permission to create tags on subnets. This is required by the operator to function.
+====
++
+[source,terminal]
+----
+$ oc new-project aws-load-balancer-operator
+$ POLICY_ARN=$(aws iam list-policies --query \
+ "Policies[?PolicyName=='aws-load-balancer-operator-policy'].{ARN:Arn}" \
+ --output text)
+$ if [[ -z "${POLICY_ARN}" ]]; then
+ wget -O "${SCRATCH}/load-balancer-operator-policy.json" \
+ https://raw.githubusercontent.com/rh-mobb/documentation/main/content/docs/rosa/aws-load-balancer-operator/load-balancer-operator-policy.json
+ POLICY_ARN=$(aws --region "$REGION" --query Policy.Arn \
+ --output text iam create-policy \
+ --policy-name aws-load-balancer-operator-policy \
+ --policy-document "file://${SCRATCH}/load-balancer-operator-policy.json")
+fi
+$ echo $POLICY_ARN
+----
++
+. Create an AWS IAM trust policy for AWS Load Balancer Operator:
++
+[source,terminal]
+----
+$ cat < "${SCRATCH}/trust-policy.json"
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Condition": {
+ "StringEquals" : {
+ "${OIDC_ENDPOINT}:sub": ["system:serviceaccount:aws-load-balancer-operator:aws-load-balancer-operator-controller-manager", "system:serviceaccount:aws-load-balancer-operator:aws-load-balancer-controller-cluster"]
+ }
+ },
+ "Principal": {
+ "Federated": "arn:aws:iam::$AWS_ACCOUNT_ID:oidc-provider/${OIDC_ENDPOINT}"
+ },
+ "Action": "sts:AssumeRoleWithWebIdentity"
+ }
+ ]
+}
+EOF
+----
++
+. Create an AWS IAM role for the AWS Load Balancer Operator:
++
+[source,terminal]
+----
+$ ROLE_ARN=$(aws iam create-role --role-name "${ROSA_CLUSTER_NAME}-alb-operator" \
+ --assume-role-policy-document "file://${SCRATCH}/trust-policy.json" \
+ --query Role.Arn --output text)
+$ echo $ROLE_ARN
+
+$ aws iam attach-role-policy --role-name "${ROSA_CLUSTER_NAME}-alb-operator" \
+ --policy-arn $POLICY_ARN
+----
++
+. Create a secret for the AWS Load Balancer Operator to assume our newly created AWS IAM role:
++
+[source,terminal]
+----
+$ cat << EOF | oc apply -f -
+apiVersion: v1
+kind: Secret
+metadata:
+ name: aws-load-balancer-operator
+ namespace: aws-load-balancer-operator
+stringData:
+ credentials: |
+ [default]
+ role_arn = $ROLE_ARN
+ web_identity_token_file = /var/run/secrets/openshift/serviceaccount/token
+EOF
+----
++
+. Install the Red Hat AWS Load Balancer Operator:
++
+[source,terminal]
+----
+$ cat << EOF | oc apply -f -
+apiVersion: operators.coreos.com/v1
+kind: OperatorGroup
+metadata:
+ name: aws-load-balancer-operator
+ namespace: aws-load-balancer-operator
+spec:
+ upgradeStrategy: Default
+---
+apiVersion: operators.coreos.com/v1alpha1
+kind: Subscription
+metadata:
+ name: aws-load-balancer-operator
+ namespace: aws-load-balancer-operator
+spec:
+ channel: stable-v1.0
+ installPlanApproval: Automatic
+ name: aws-load-balancer-operator
+ source: redhat-operators
+ sourceNamespace: openshift-marketplace
+ startingCSV: aws-load-balancer-operator.v1.0.0
+EOF
+----
++
+. Deploy an instance of the AWS Load Balancer Controller using the operator:
++
+[NOTE]
+====
+If you get an error here wait a minute and try again, it means the Operator has not completed installing yet.
+====
++
+[source,terminal]
+----
+$ cat << EOF | oc apply -f -
+apiVersion: networking.olm.openshift.io/v1
+kind: AWSLoadBalancerController
+metadata:
+ name: cluster
+spec:
+ credentials:
+ name: aws-load-balancer-operator
+EOF
+----
++
+. Check the that the operator and controller pods are both running:
++
+[source,terminal]
+----
+$ oc -n aws-load-balancer-operator get pods
+----
++
+You should see the following, if not wait a moment and retry:
++
+[source,terminal]
+----
+NAME READY STATUS RESTARTS AGE
+aws-load-balancer-controller-cluster-6ddf658785-pdp5d 1/1 Running 0 99s
+aws-load-balancer-operator-controller-manager-577d9ffcb9-w6zqn 2/2 Running 0 2m4s
+----
+
+[id="validating-the-deployment_{context}"]
+== Validating the deployment
+
+. Create a new project:
++
+[source,terminal]
+----
+$ oc new-project hello-world
+----
++
+. Deploy a hello world application:
++
+[source,terminal]
+----
+$ oc new-app -n hello-world --image=docker.io/openshift/hello-openshift
+----
++
+. Configure a NodePort service for the AWS ALB to connect to:
++
+[source,terminal]
+----
+$ cat << EOF | oc apply -f -
+apiVersion: v1
+kind: Service
+metadata:
+ name: hello-openshift-nodeport
+ namespace: hello-world
+spec:
+ ports:
+ - port: 80
+ targetPort: 8080
+ protocol: TCP
+ type: NodePort
+ selector:
+ deployment: hello-openshift
+EOF
+----
++
+. Deploy an AWS ALB using the AWS Load Balancer Operator:
++
+[source,terminal]
+----
+$ cat << EOF | oc apply -f -
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: hello-openshift-alb
+ namespace: hello-world
+ annotations:
+ alb.ingress.kubernetes.io/scheme: internet-facing
+spec:
+ ingressClassName: alb
+ rules:
+ - http:
+ paths:
+ - path: /
+ pathType: Exact
+ backend:
+ service:
+ name: hello-openshift-nodeport
+ port:
+ number: 80
+EOF
+----
++
+. Curl the AWS ALB Ingress endpoint to verify the hello world application is accessible:
++
+[NOTE]
+====
+AWS ALB provisioning takes a few minutes. If you receive an error that says `curl: (6) Could not resolve host`, please wait and try again.
+====
++
+[source,termnial]
+----
+$ INGRESS=$(oc -n hello-world get ingress hello-openshift-alb \
+ -o jsonpath='{.status.loadBalancer.ingress[0].hostname}')
+$ curl "http://${INGRESS}"
+----
++
+.Example output
+[source,text]
+----
+Hello OpenShift!
+----
+
+. Deploy an AWS NLB for your hello world application:
++
+[source,terminal]
+----
+$ cat << EOF | oc apply -f -
+apiVersion: v1
+kind: Service
+metadata:
+ name: hello-openshift-nlb
+ namespace: hello-world
+ annotations:
+ service.beta.kubernetes.io/aws-load-balancer-type: external
+ service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: instance
+ service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
+spec:
+ ports:
+ - port: 80
+ targetPort: 8080
+ protocol: TCP
+ type: LoadBalancer
+ selector:
+ deployment: hello-openshift
+EOF
+----
++
+. Test the AWS NLB endpoint:
++
+[NOTE]
+====
+NLB provisioning takes a few minutes. If you receive an error that says `curl: (6) Could not resolve host`, please wait and try again.
+====
++
+[source,terminal]
+----
+$ NLB=$(oc -n hello-world get service hello-openshift-nlb \
+ -o jsonpath='{.status.loadBalancer.ingress[0].hostname}')
+$ curl "http://${NLB}"
+----
++
+.Example output
+[source,text]
+----
+Hello OpenShift!
+----
+
+[id="cleaning-up_{context}"]
+== Cleaning up
+
+. Delete the hello world application namespace (and all the resources in the namespace):
++
+[source,terminal]
+----
+$ oc delete project hello-world
+----
++
+. Delete the AWS Load Balancer Operator and the AWS IAM roles:
++
+[source,terminal]
+----
+$ oc delete subscription aws-load-balancer-operator -n aws-load-balancer-operator
+$ aws iam detach-role-policy \
+ --role-name "${ROSA_CLUSTER_NAME}-alb-operator" \
+ --policy-arn $POLICY_ARN
+$ aws iam delete-role \
+ --role-name "${ROSA_CLUSTER_NAME}-alb-operator"
+----
++
+. Delete the AWS IAM policy:
++
+[source,terminal]
+----
+$ aws iam delete-policy --policy-arn $POLICY_ARN
+----
\ No newline at end of file
diff --git a/cloud_experts_tutorials/cloud-experts-aws-secret-manager.adoc b/cloud_experts_tutorials/cloud-experts-aws-secret-manager.adoc
new file mode 100644
index 000000000000..1f2ff9b47fc6
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-aws-secret-manager.adoc
@@ -0,0 +1,352 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="cloud-experts-aws-secret-manager"]
+= Tutorial: Using AWS Secrets Manager CSI on ROSA with STS
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: cloud-experts-aws-secret-manager
+
+toc::[]
+
+//Mobb content metadata
+//Brought into ROSA product docs 2023-09-18
+// ---
+// date: '2023-05-25'
+// title: Using AWS Secrets Manager CSI on Red Hat OpenShift on AWS with STS
+// tags: ["AWS", "ROSA"]
+// authors:
+// - Paul Czarkowski
+// - Chris Kang
+// ---
+
+The AWS Secrets and Configuration Provider (ASCP) provides a way to expose AWS Secrets as Kubernetes storage volumes. With the ASCP, you can store and manage your secrets in Secrets Manager and then retrieve them through your workloads running on {product-title} (ROSA).
+
+[id="cloud-experts-aws-secret-manager-prerequisites"]
+== Prerequisites
+
+Ensure that you have the following resources and tools before starting this process:
+
+* A ROSA cluster deployed with STS
+* Helm 3
+* `aws` CLI
+* `oc` CLI
+* `jq` CLI
+
+[discrete]
+[id="cloud-experts-aws-secret-manager-preparing-environment"]
+=== Additional environment requirements
+
+. Log in to your ROSA cluster by running the following command:
++
+[source,terminal]
+----
+$ oc login --token= --server=
+----
++
+You can find your login token by accessing your cluster in {cluster-manager-url-pull}.
+
+. Validate that your cluster has STS by running the following command:
++
+[source,terminal]
+----
+$ oc get authentication.config.openshift.io cluster -o json \
+ | jq .spec.serviceAccountIssuer
+----
++
+.Example output
++
+[source,terminal]
+----
+"https://xxxxx.cloudfront.net/xxxxx"
+----
++
+If your output is different, do not proceed. See xref:../rosa_install_access_delete_clusters/rosa-sts-creating-a-cluster-quickly.adoc#rosa-sts-creating-a-cluster-quickly[Red Hat documentation on creating an STS cluster] before continuing this process.
+
+. Set the `SecurityContextConstraints` permission to allow the CSI driver to run by running the following command:
++
+[source,terminal]
+----
+$ oc new-project csi-secrets-store
+$ oc adm policy add-scc-to-user privileged \
+ system:serviceaccount:csi-secrets-store:secrets-store-csi-driver
+$ oc adm policy add-scc-to-user privileged \
+ system:serviceaccount:csi-secrets-store:csi-secrets-store-provider-aws
+----
+
+. Create environment variables to use later in this process by running the following command:
++
+[source,terminal]
+----
+$ export REGION=$(oc get infrastructure cluster -o=jsonpath="{.status.platformStatus.aws.region}")
+$ export OIDC_ENDPOINT=$(oc get authentication.config.openshift.io cluster \
+ -o jsonpath='{.spec.serviceAccountIssuer}' | sed 's|^https://||')
+$ export AWS_ACCOUNT_ID=`aws sts get-caller-identity --query Account --output text`
+$ export AWS_PAGER=""
+----
+
+[id="cloud-experts-aws-secret-manager-deply-aws-secrets"]
+== Deploying the AWS Secrets and Configuration Provider
+
+. Use Helm to register the secrets store CSI driver by running the following command:
++
+[source,terminal]
+----
+$ helm repo add secrets-store-csi-driver \
+ https://kubernetes-sigs.github.io/secrets-store-csi-driver/charts
+----
+
+. Update your Helm repositories by running the following command:
++
+[source,terminal]
+----
+$ helm repo update
+----
+
+. Install the secrets store CSI driver by running the following command:
++
+[source,terminal]
+----
+$ helm upgrade --install -n csi-secrets-store \
+ csi-secrets-store-driver secrets-store-csi-driver/secrets-store-csi-driver
+----
+
+. Deploy the AWS provider by running the following command:
++
+[source,terminal]
+----
+$ oc -n csi-secrets-store apply -f \
+ https://raw.githubusercontent.com/rh-mobb/documentation/main/content/misc/secrets-store-csi/aws-provider-installer.yaml
+----
+
+. Check that both Daemonsets are running by running the following command:
++
+[source,terminal]
+----
+$ oc -n csi-secrets-store get ds \
+ csi-secrets-store-provider-aws \
+ csi-secrets-store-driver-secrets-store-csi-driver
+----
+
+. Label the Secrets Store CSI Driver to allow use with the restricted pod security profile by running the following command:
++
+[source,terminal]
+----
+$ oc label csidriver.storage.k8s.io/secrets-store.csi.k8s.io security.openshift.io/csi-ephemeral-volume-profile=restricted
+----
+
+[id="cloud-experts-aws-secret-manager-create-iam-polices"]
+== Creating a Secret and IAM Access Policies
+
+. Create a secret in Secrets Manager by running the following command:
++
+[source,terminal]
+----
+$ SECRET_ARN=$(aws --region "$REGION" secretsmanager create-secret \
+ --name MySecret --secret-string \
+ '{"username":"shadowman", "password":"hunter2"}' \
+ --query ARN --output text)
+$ echo $SECRET_ARN
+----
+
+. Create an IAM Access Policy document by running the following command:
++
+[source,terminal]
+----
+$ cat << EOF > policy.json
+{
+ "Version": "2012-10-17",
+ "Statement": [{
+ "Effect": "Allow",
+ "Action": [
+ "secretsmanager:GetSecretValue",
+ "secretsmanager:DescribeSecret"
+ ],
+ "Resource": ["$SECRET_ARN"]
+ }]
+}
+EOF
+----
+
+. Create an IAM Access Policy by running the following command:
++
+[source,terminal]
+----
+$ POLICY_ARN=$(aws --region "$REGION" --query Policy.Arn \
+--output text iam create-policy \
+--policy-name openshift-access-to-mysecret-policy \
+--policy-document file://policy.json)
+$ echo $POLICY_ARN
+----
+
+. Create an IAM Role trust policy document by running the following command:
++
+[NOTE]
+====
+The trust policy is locked down to the default service account of a namespace you create later in this process.
+====
++
+[source,terminal]
+----
+$ cat < trust-policy.json
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Condition": {
+ "StringEquals" : {
+ "${OIDC_ENDPOINT}:sub": ["system:serviceaccount:my-application:default"]
+ }
+ },
+ "Principal": {
+ "Federated": "arn:aws:iam::$AWS_ACCOUNT_ID:oidc-provider/${OIDC_ENDPOINT}"
+ },
+ "Action": "sts:AssumeRoleWithWebIdentity"
+ }
+ ]
+}
+EOF
+----
+
+. Create an IAM role by running the following command:
++
+[source,terminal]
+----
+$ ROLE_ARN=$(aws iam create-role --role-name openshift-access-to-mysecret \
+--assume-role-policy-document file://trust-policy.json \
+--query Role.Arn --output text)
+$ echo $ROLE_ARN
+----
+
+. Attach the role to the policy by running the following command:
++
+[source,terminal]
+----
+$ aws iam attach-role-policy --role-name openshift-access-to-mysecret \
+ --policy-arn $POLICY_ARN
+----
+
+[id="cloud-experts-aws-secret-manager-creating-application"]
+== Create an Application to use this secret
+
+. Create an OpenShift project by running the following command:
++
+[source,terminal]
+----
+$ oc new-project my-application
+----
+
+. Annotate the default service account to use the STS Role by running the following command:
++
+[source,terminal]
+----
+$ oc annotate -n my-application serviceaccount default \
+ eks.amazonaws.com/role-arn=$ROLE_ARN
+----
+
+. Create a secret provider class to access our secret by running the following command:
++
+[source,terminal]
+----
+$ cat << EOF | oc apply -f -
+apiVersion: secrets-store.csi.x-k8s.io/v1
+kind: SecretProviderClass
+metadata:
+ name: my-application-aws-secrets
+spec:
+ provider: aws
+ parameters:
+ objects: |
+ - objectName: "MySecret"
+ objectType: "secretsmanager"
+EOF
+----
+
+. Create a Deployment by using our secret in the following command:
++
+[source,terminal]
+----
+$ cat << EOF | oc apply -f -
+apiVersion: v1
+kind: Pod
+metadata:
+ name: my-application
+ labels:
+ app: my-application
+spec:
+ volumes:
+ - name: secrets-store-inline
+ csi:
+ driver: secrets-store.csi.k8s.io
+ readOnly: true
+ volumeAttributes:
+ secretProviderClass: "my-application-aws-secrets"
+ containers:
+ - name: my-application-deployment
+ image: k8s.gcr.io/e2e-test-images/busybox:1.29
+ command:
+ - "/bin/sleep"
+ - "10000"
+ volumeMounts:
+ - name: secrets-store-inline
+ mountPath: "/mnt/secrets-store"
+ readOnly: true
+EOF
+----
+
+. Verify the Pod has the secret mounted by running the following commandv:
++
+[source,terminal]
+----
+$ oc exec -it my-application -- cat /mnt/secrets-store/MySecret
+----
+
+[id="cloud-experts-aws-secret-manager-cleanup"]
+== Clean up
+
+. Delete the application by running the following command:
++
+[source,terminal]
+----
+$ oc delete project my-application
+----
+
+. Delete the secrets store csi driver by running the following command:
++
+[source,terminal]
+----
+$ helm delete -n csi-secrets-store csi-secrets-store-driver
+----
+
+. Delete Security Context Constraints by running the following command:
++
+[source,terminal]
+----
+$ oc adm policy remove-scc-from-user privileged \
+ system:serviceaccount:csi-secrets-store:secrets-store-csi-driver
+$ oc adm policy remove-scc-from-user privileged \
+ system:serviceaccount:csi-secrets-store:csi-secrets-store-provider-aws
+----
+
+. Delete the AWS provider by running the following command:
++
+[source,terminal]
+----
+$ oc -n csi-secrets-store delete -f \
+https://raw.githubusercontent.com/rh-mobb/documentation/main/content/misc/secrets-store-csi/aws-provider-installer.yaml
+----
+
+. Delete AWS Roles and Policies by running the following command:
++
+[source,terminal]
+----
+$ aws iam detach-role-policy --role-name openshift-access-to-mysecret \
+ --policy-arn $POLICY_ARN
+$ aws iam delete-role --role-name openshift-access-to-mysecret
+$ aws iam delete-policy --policy-arn $POLICY_ARN
+----
+
+. Delete the Secrets Manager secret by running the following command:
++
+[source,terminal]
+----
+$ aws secretsmanager --region $REGION delete-secret --secret-id $SECRET_ARN
+----
\ No newline at end of file
diff --git a/cloud_experts_tutorials/cloud-experts-configure-custom-tls-ciphers.adoc b/cloud_experts_tutorials/cloud-experts-configure-custom-tls-ciphers.adoc
new file mode 100644
index 000000000000..7d017da7d2a5
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-configure-custom-tls-ciphers.adoc
@@ -0,0 +1,223 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="cloud-experts-configure-custom-tls-ciphers"]
+= Tutorial: Configuring ROSA/OSD to use custom TLS ciphers on the Ingress Controller
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: cloud-experts-configure-custom-tls-ciphers
+
+toc::[]
+
+// ---
+// date: '2022-08-24'
+// title: Configure ROSA/OSD to use custom TLS ciphers on the Ingress Controller
+// aliases: ['/docs/ingress/tls-cipher-customization']
+// tags: ["ROSA", "AWS", "OSD"]
+// authors:
+// - Michael McNeill
+// - Connor Wooley
+// ---
+
+include::snippets/mobb-support-statement.adoc[leveloffset=+1]
+//Adding the support statement based on a conversation with Michael McNeill
+
+This guide demonstrates how to properly patch the cluster Ingress Controllers, as well as Ingress Controllers created by the Custom Domain Operator.
+This functionality allows customers to modify the `tlsSecurityProfile` value on cluster Ingress Controllers.
+This guide demonstrates how to apply a custom `tlsSecurityProfile`, a scoped service account with the associated role and role binding, and a CronJob that the cipher changes are reapplied with 60 minutes in the event that an Ingress Controller is recreated or modified.
+
+.Prerequisites
+
+* Review the link:https://docs.openshift.com/container-platform/4.13/networking/ingress-operator.html#configuring-ingress-controller-tls[OpenShift Documentation that explains the options for the `tlsSecurityProfile`]. By default, Ingress Controllers are configured to use the `Intermediate` profile, which corresponds to the link:https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29[Intermediate Mozilla profile].
+
+.Procedure
+
+. Create a service account for the CronJob to use.
++
+A service account allows our CronJob to directly access the cluster API, without using a regular user's credentials.
+To create a service account, run the following command:
++
+[source,terminal]
+----
+$ oc create sa cron-ingress-patch-sa -n openshift-ingress-operator
+----
+
+. Create a role and role binding that allows limited access to patch the Ingress Controllers.
++
+Role-based access control (RBAC) is critical to ensuring security inside your cluster.
+Creating a role allows us to provide scoped access to only the API resources needed within the cluster. To create the role, run the following command:
++
+[source,terminal]
+----
+$ oc create role cron-ingress-patch-role --verb=get,patch,update --resource=ingresscontroller.operator.openshift.io -n openshift-ingress-operator
+----
++
+Once the role has been created, you must bind the role to the service account using a role binding.
+To create the role binding, run the following command:
++
+[source,terminal]
+----
+$ oc create rolebinding cron-ingress-patch-rolebinding --role=cron-ingress-patch-role --serviceaccount=openshift-ingress-operator:cron-ingress-patch-sa -n openshift-ingress-operator
+----
+
+. Patch the Ingress Controllers.
++
+[IMPORTANT]
+====
+The examples provided below add an additional cipher to the Ingress Controller's `tlsSecurityProfile` to allow IE 11 access from Windows Server 2008 R2.
+Modify this command to meet your specific business requirements.
+====
++
+Before creating the CronJob, apply the `tlsSecurityProfile` configuration to validate changes.
+This process depends on if you are using the xref:../applications/deployments/osd-config-custom-domains-applications.adoc#osd-config-custom-domains-applications[Custom Domain Operator].
++
+.. Clusters not using the xref:../applications/deployments/osd-config-custom-domains-applications.adoc#osd-config-custom-domains-applications[Custom Domain Operator]:
++
+If you are only using the default Ingress Controller, and not using the xref:../applications/deployments/osd-config-custom-domains-applications.adoc#osd-config-custom-domains-applications[Custom Domain Operator], run the following command to patch the Ingress Controller:
++
+[source,terminal]
+----
+$ oc patch ingresscontroller/default -n openshift-ingress-operator --type=merge -p '{"spec":{"tlsSecurityProfile":{"type":"Custom","custom":{"ciphers":["TLS_AES_128_GCM_SHA256","TLS_AES_256_GCM_SHA384","ECDHE-ECDSA-AES128-GCM-SHA256","ECDHE-RSA-AES128-GCM-SHA256","ECDHE-ECDSA-AES256-GCM-SHA384","ECDHE-RSA-AES256-GCM-SHA384","ECDHE-ECDSA-CHACHA20-POLY1305","ECDHE-RSA-CHACHA20-POLY1305","DHE-RSA-AES128-GCM-SHA256","DHE-RSA-AES256-GCM-SHA384","TLS_CHACHA20_POLY1305_SHA256","TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA"],"minTLSVersion":"VersionTLS12"}}}}'
+----
++
+This patch adds the `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA` cipher which allows access from IE 11 on Windows Server 2008 R2 when using RSA certificates.
++
+Once you run the command, you will receive a response that looks like this:
++
+.Example output
+[source,terminal]
+----
+ingresscontroller.operator.openshift.io/default patched
+----
++
+.. Clusters using the xref:../applications/deployments/osd-config-custom-domains-applications.adoc#osd-config-custom-domains-applications[Custom Domain Operator]:
++
+Customers who are using the xref:../applications/deployments/osd-config-custom-domains-applications.adoc#osd-config-custom-domains-applications[Custom Domain Operator] need to loop through each of their Ingress Controllers to patch each one.
+To patch all of your cluster's Ingress Controllers, run the following command:
++
+[source,terminal]
+----
+$ for ic in $(oc get ingresscontroller -o name -n openshift-ingress-operator); do oc patch ${ic} -n openshift-ingress-operator --type=merge -p '{"spec":{"tlsSecurityProfile":{"type":"Custom","custom":{"ciphers":["TLS_AES_128_GCM_SHA256","TLS_AES_256_GCM_SHA384","ECDHE-ECDSA-AES128-GCM-SHA256","ECDHE-RSA-AES128-GCM-SHA256","ECDHE-ECDSA-AES256-GCM-SHA384","ECDHE-RSA-AES256-GCM-SHA384","ECDHE-ECDSA-CHACHA20-POLY1305","ECDHE-RSA-CHACHA20-POLY1305","DHE-RSA-AES128-GCM-SHA256","DHE-RSA-AES256-GCM-SHA384","TLS_CHACHA20_POLY1305_SHA256","TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA"],"minTLSVersion":"VersionTLS12"}}}}'; done
+----
++
+Once you run the command, you will receive a response that looks like this:
++
+.Example output
+[source,terminal]
+----
+ingresscontroller.operator.openshift.io/default patched
+ingresscontroller.operator.openshift.io/custom1 patched
+ingresscontroller.operator.openshift.io/custom2 patched
+----
+
+. Create the CronJob to ensure that the TLS configuration is not overwritten.
++
+Occasionally, the cluster's Ingress Controllers can get recreated. In these cases, the Ingress Controller will likely not retain the `tlsSecurityProfile` changes that were applied.
+To ensure this does not happen, create a CronJob that goes through and updates the cluster's Ingress Controllers.
+This process depends on if you are using the xref:../applications/deployments/osd-config-custom-domains-applications.adoc#osd-config-custom-domains-applications[Custom Domain Operator].
++
+.. Clusters not using the xref:../applications/deployments/osd-config-custom-domains-applications.adoc#osd-config-custom-domains-applications[Custom Domain Operator]:
++
+If you are not using the xref:../applications/deployments/osd-config-custom-domains-applications.adoc#osd-config-custom-domains-applications[Custom Domain Operator], create the CronJob by running the following command:
++
+[source,terminal]
+----
+$ cat << EOF | oc apply -f -
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+ name: tls-patch
+ namespace: openshift-ingress-operator
+spec:
+ schedule: '@hourly'
+ jobTemplate:
+ spec:
+ template:
+ spec:
+ containers:
+ - name: tls-patch
+ image: registry.redhat.io/openshift4/ose-tools-rhel8:latest
+ args:
+ - /bin/sh
+ - '-c'
+ - oc patch ingresscontroller/default -n openshift-ingress-operator --type=merge -p '{"spec":{"tlsSecurityProfile":{"type":"Custom","custom":{"ciphers":["TLS_AES_128_GCM_SHA256","TLS_AES_256_GCM_SHA384","ECDHE-ECDSA-AES128-GCM-SHA256","ECDHE-RSA-AES128-GCM-SHA256","ECDHE-ECDSA-AES256-GCM-SHA384","ECDHE-RSA-AES256-GCM-SHA384","ECDHE-ECDSA-CHACHA20-POLY1305","ECDHE-RSA-CHACHA20-POLY1305","DHE-RSA-AES128-GCM-SHA256","DHE-RSA-AES256-GCM-SHA384","TLS_CHACHA20_POLY1305_SHA256","TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA"],"minTLSVersion":"VersionTLS12"}}}}'
+ restartPolicy: Never
+ serviceAccountName: cron-ingress-patch-sa
+EOF
+----
++
+[NOTE]
+====
+This CronJob runs every hour and patches the Ingress Controllers, if necessary.
+It is important that this CronJob does not run constantly, as it can trigger reconciles that could overload the OpenShift Ingress Operator.
+Most of the time, the logs of the CronJob pod looks like the following example, as it will not be changing anything:
+
+.Example output
+[source,terminal]
+----
+ingresscontroller.operator.openshift.io/default patched (no change)
+----
+====
++
+.. Clusters using the xref:../applications/deployments/osd-config-custom-domains-applications.adoc#osd-config-custom-domains-applications[Custom Domain Operator]:
++
+If you are using the xref:../applications/deployments/osd-config-custom-domains-applications.adoc#osd-config-custom-domains-applications[Custom Domain Operator], the CronJob needs to loop through and patch each Ingress Controller.
+To create this CronJob, run the following command:
++
+[source,terminal]
+----
+$ cat << EOF | oc apply -f -
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+ name: tls-patch
+ namespace: openshift-ingress-operator
+spec:
+ schedule: '@hourly'
+ jobTemplate:
+ spec:
+ template:
+ spec:
+ containers:
+ - name: tls-patch
+ image: registry.redhat.io/openshift4/ose-tools-rhel8:latest
+ args:
+ - /bin/sh
+ - '-c'
+ - for ic in $(oc get ingresscontroller -o name -n openshift-ingress-operator); do oc patch ${ic} -n openshift-ingress-operator --type=merge -p '{"spec":{"tlsSecurityProfile":{"type":"Custom","custom":{"ciphers":["TLS_AES_128_GCM_SHA256","TLS_AES_256_GCM_SHA384","ECDHE-ECDSA-AES128-GCM-SHA256","ECDHE-RSA-AES128-GCM-SHA256","ECDHE-ECDSA-AES256-GCM-SHA384","ECDHE-RSA-AES256-GCM-SHA384","ECDHE-ECDSA-CHACHA20-POLY1305","ECDHE-RSA-CHACHA20-POLY1305","DHE-RSA-AES128-GCM-SHA256","DHE-RSA-AES256-GCM-SHA384","TLS_CHACHA20_POLY1305_SHA256","TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA"],"minTLSVersion":"VersionTLS12"}}}}'; done
+ restartPolicy: Never
+ serviceAccountName: cron-ingress-patch-sa
+EOF
+----
++
+[NOTE]
+====
+This CronJob runs every hour and patches the Ingress Controllers, if necessary. It is important that this CronJob does not run constantly, as it can trigger reconciles that could overload the OpenShift Ingress Operator. Most of the time, the logs of the CronJob pod will look something like this, as it will not be changing anything:
+
+.Example output
+[source,terminal]
+----
+ingresscontroller.operator.openshift.io/default patched (no change)
+ingresscontroller.operator.openshift.io/custom1 patched (no change)
+ingresscontroller.operator.openshift.io/custom2 patched (no change)
+----
+====
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/cloud_experts_tutorials/cloud-experts-consistent-egress-ip.adoc b/cloud_experts_tutorials/cloud-experts-consistent-egress-ip.adoc
new file mode 100644
index 000000000000..ab764e04fbd5
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-consistent-egress-ip.adoc
@@ -0,0 +1,416 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="cloud-experts-consistent-egress-ip"]
+= Tutorial: Assigning consistent egress IP for external traffic
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: cloud-experts-consistent-egress-ip
+
+toc::[]
+
+// Mobb content metadata
+// Brought into ROSA product docs 2023-09-19
+// ---
+// date: '2023-02-28'
+// title: Assign Consistent Egress IP for External Traffic
+// tags: ["OSD", "ROSA", "ARO"]
+// authors:
+// - 'Dustin Scott'
+// ---
+
+It might be desirable to assign a consistent IP address for traffic that leaves the cluster when configuring items such as security groups or other sorts of security controls which require an IP-based configuration. By default, {product-title} (ROSA) (using the OVN-Kubernetes CNI) assigns random IP addresses from a pool which makes configuring security lockdowns unpredictable or unnecessarily open. This guide shows you how to configure a set of predictable IP addresses for egress cluster traffic to meet common security standards and guidance and other potential use cases.
+
+See the link:https://docs.openshift.com/container-platform/latest/networking/ovn_kubernetes_network_provider/configuring-egress-ips-ovn.html[OpenShift documentation on this topic] for more information.
+
+== Prerequisites
+
+* ROSA cluster deployed with OVN-Kubernetes
+* OpenShift CLI (`oc`)
+* ROSA CLI (`rosa`)
+* `jq`
+
+=== Environment
+
+This sets environment variables for the tutorial so that you do not need to copy/paste in your own. Be sure to replace the `ROSA_MACHINE_POOL_NAME` variable if you wish to target a different Machine Pool.:
+
+[source,terminal]
+----
+$ export ROSA_CLUSTER_NAME=$(oc get infrastructure cluster -o=jsonpath="{.status.infrastructureName}" | sed 's/-[a-z0-9]\{5\}$//')
+$ export ROSA_MACHINE_POOL_NAME=Default
+----
+
+== Ensure capacity
+
+For each public cloud provider, there is a limit on the number of IP addresses that may be assigned per node. This can affect the ability to assign an egress IP address. To verify sufficient capacity, you can run the following command to print out the currently assigned IP addresses versus the total capacity in order to identify any nodes which may affected:
+
+[source,terminal]
+----
+$ oc get node -o json | \
+ jq '.items[] |
+ {
+ "name": .metadata.name,
+ "ips": (.status.addresses | map(select(.type == "InternalIP") | .address)),
+ "capacity": (.metadata.annotations."cloud.network.openshift.io/egress-ipconfig" | fromjson[] | .capacity.ipv4)
+ }'
+----
+
+.Example output
+[source,terminal]
+---
+{
+ "name": "ip-10-10-145-88.ec2.internal",
+ "ips": [
+ "10.10.145.88"
+ ],
+ "capacity": 14
+}
+{
+ "name": "ip-10-10-154-175.ec2.internal",
+ "ips": [
+ "10.10.154.175"
+ ],
+ "capacity": 14
+}
+---
+
+[NOTE]
+====
+The above example uses `jq` as a friendly filter. If you do not have `jq` installed, you can review the `metadata.annotations['cloud.network.openshift.io/egress-ipconfig']` field of each node manually to verify node capacity.
+====
+
+== Create the egress IP rule(s)
+
+[NOTE]
+====
+Generally speaking, it would be ideal to label the nodes prior to assigning the egress IP addresses, however there is a bug that exists which needs to be fixed first. Once this is fixed, the process and documentation will be re-ordered to address this. See link:https://issues.redhat.com/browse/OCPBUGS-4969[OCPBUGS-4969].
+====
+
+=== Identify the egress IPs
+
+Before creating the rules, we should identify which egress IPs that we will use. It should be noted that the egress IPs that you select should exist as a part of the subnets in which the worker nodes are provisioned into.
+
+=== Reserve the egress IPs
+
+It is recommended, but not required, to reserve the egress IPs that you have requested to avoid conflicts with the AWS VPC DHCP service. To do so, you can request explicit IP reservations by link:https://docs.aws.amazon.com/vpc/latest/userguide/subnet-cidr-reservation.html[following the AWS documentation for CIDR reservations].
+
+== Deploy an egress IP to a namespace
+
+Create a project to demonstrate assigning egress IP addresses based on a namespace selection:
+
+[source,terminal]
+----
+$ oc new-project demo-egress-ns
+----
+
+Create the egress rule. This rule will ensure that egress traffic will be applied to all pods within the namespace that we just created using the `spec.namespaceSelector` field:
+
+[source,terminal]
+----
+$ cat < ${SCRATCH}/policy.json
+{
+"Version": "2012-10-17",
+"Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "s3:CreateBucket",
+ "s3:DeleteBucket",
+ "s3:PutBucketTagging",
+ "s3:GetBucketTagging",
+ "s3:PutEncryptionConfiguration",
+ "s3:GetEncryptionConfiguration",
+ "s3:PutLifecycleConfiguration",
+ "s3:GetLifecycleConfiguration",
+ "s3:GetBucketLocation",
+ "s3:ListBucket",
+ "s3:GetObject",
+ "s3:PutObject",
+ "s3:DeleteObject",
+ "s3:ListBucketMultipartUploads",
+ "s3:AbortMultipartUpload",
+ "s3:ListMultipartUploadParts",
+ "ec2:DescribeSnapshots",
+ "ec2:DescribeVolumes",
+ "ec2:DescribeVolumeAttribute",
+ "ec2:DescribeVolumesModifications",
+ "ec2:DescribeVolumeStatus",
+ "ec2:CreateTags",
+ "ec2:CreateVolume",
+ "ec2:CreateSnapshot",
+ "ec2:DeleteSnapshot"
+ ],
+ "Resource": "*"
+ }
+]}
+EOF
+$ POLICY_ARN=$(aws iam create-policy --policy-name "RosaOadpVer1" \
+--policy-document file:///${SCRATCH}/policy.json --query Policy.Arn \
+--tags Key=rosa_openshift_version,Value=${CLUSTER_VERSION} Key=rosa_role_prefix,Value=ManagedOpenShift Key=operator_namespace,Value=openshift-oadp Key=operator_name,Value=openshift-oadp \
+--output text)
+fi
+$ echo ${POLICY_ARN}
+----
+
+. Create an IAM Role trust policy for the cluster:
++
+[source,terminal]
+----
+$ cat < ${SCRATCH}/trust-policy.json
+{
+ "Version": "2012-10-17",
+ "Statement": [{
+ "Effect": "Allow",
+ "Principal": {
+ "Federated": "arn:aws:iam::${AWS_ACCOUNT_ID}:oidc-provider/${OIDC_ENDPOINT}"
+ },
+ "Action": "sts:AssumeRoleWithWebIdentity",
+ "Condition": {
+ "StringEquals": {
+ "${OIDC_ENDPOINT}:sub": [
+ "system:serviceaccount:openshift-adp:openshift-adp-controller-manager",
+ "system:serviceaccount:openshift-adp:velero"]
+ }
+ }
+ }]
+}
+EOF
+$ ROLE_ARN=$(aws iam create-role --role-name \
+ "${ROLE_NAME}" \
+ --assume-role-policy-document file://${SCRATCH}/trust-policy.json \
+ --tags Key=rosa_cluster_id,Value=${ROSA_CLUSTER_ID} Key=rosa_openshift_version,Value=${CLUSTER_VERSION} Key=rosa_role_prefix,Value=ManagedOpenShift Key=operator_namespace,Value=openshift-adp Key=operator_name,Value=openshift-oadp \
+ --query Role.Arn --output text)
+
+$ echo ${ROLE_ARN}
+----
+
+. Attach the IAM Policy to the IAM Role:
++
+[source,terminal]
+----
+$ aws iam attach-role-policy --role-name "${ROLE_NAME}" \
+ --policy-arn ${POLICY_ARN}
+----
+
+[id="deploy-oadp-on-cluster_{context}"]
+== Deploy OADP on the cluster
+
+. Create a namespace for OADP:
++
+[source,terminal]
+----
+$ oc create namespace openshift-adp
+----
+
+. Create a credentials secret:
++
+[source,terminal]
+----
+$ cat < ${SCRATCH}/credentials
+[default]
+role_arn = ${ROLE_ARN}
+web_identity_token_file = /var/run/secrets/openshift/serviceaccount/token
+EOF
+$ oc -n openshift-adp create secret generic cloud-credentials \
+ --from-file=${SCRATCH}/credentials
+----
+
+. Deploy the OADP Operator:
++
+[NOTE]
+====
+There is currently an issue with version 1.1 of the Operator with backups that have a `PartiallyFailed` status. This does not seem to affect the backup and restore process, but it should be noted as there are issues with it.
+====
++
+[source,terminal]
+----
+$ cat << EOF | oc create -f -
+apiVersion: operators.coreos.com/v1
+kind: OperatorGroup
+metadata:
+ generateName: openshift-adp-
+ namespace: openshift-adp
+ name: oadp
+spec:
+ targetNamespaces:
+ - openshift-adp
+---
+apiVersion: operators.coreos.com/v1alpha1
+kind: Subscription
+metadata:
+ name: redhat-oadp-operator
+ namespace: openshift-adp
+spec:
+ channel: stable-1.2
+ installPlanApproval: Automatic
+ name: redhat-oadp-operator
+ source: redhat-operators
+ sourceNamespace: openshift-marketplace
+EOF
+----
+
+. Wait for the Operator to be ready:
++
+[source,terminal]
+----
+$ watch oc -n openshift-adp get pods
+----
++
+.Example output
+[source,terminal]
+----
+NAME READY STATUS RESTARTS AGE
+openshift-adp-controller-manager-546684844f-qqjhn 1/1 Running 0 22s
+----
+
+. Create Cloud Storage:
++
+[source,terminal]
+----
+$ cat << EOF | oc create -f -
+apiVersion: oadp.openshift.io/v1alpha1
+kind: CloudStorage
+metadata:
+ name: ${CLUSTER_NAME}-oadp
+ namespace: openshift-adp
+spec:
+ creationSecret:
+ key: credentials
+ name: cloud-credentials
+ enableSharedConfig: true
+ name: ${CLUSTER_NAME}-oadp
+ provider: aws
+ region: $REGION
+EOF
+----
+
+. Check your application's storage default storage class:
++
+[source,terminal]
+----
+$ oc get pvc -n <1>
+----
+<1> Enter your application's namespace.
++
+.Example output
+[source,terminal]
+----
+NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
+applog Bound pvc-351791ae-b6ab-4e8b-88a4-30f73caf5ef8 1Gi RWO gp3-csi 4d19h
+mysql Bound pvc-16b8e009-a20a-4379-accc-bc81fedd0621 1Gi RWO gp3-csi 4d19h
+----
++
+[source,terminal]
+----
+$ oc get storageclass
+----
++
+.Example output
+[source,terminal]
+----
+NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
+gp2 kubernetes.io/aws-ebs Delete WaitForFirstConsumer true 4d21h
+gp2-csi ebs.csi.aws.com Delete WaitForFirstConsumer true 4d21h
+gp3 ebs.csi.aws.com Delete WaitForFirstConsumer true 4d21h
+gp3-csi (default) ebs.csi.aws.com Delete WaitForFirstConsumer true 4d21h
+----
++
+Using either gp3-csi, gp2-csi, gp3 or gp2 will work.
+If the application(s) that are being backed up are all using PV's with CSI, include the CSI plugin in the OADP DPA configuration.
+
+. CSI only: Deploy a Data Protection Application:
++
+[source,terminal]
+----
+$ cat << EOF | oc create -f -
+apiVersion: oadp.openshift.io/v1alpha1
+kind: DataProtectionApplication
+metadata:
+ name: ${CLUSTER_NAME}-dpa
+ namespace: openshift-adp
+spec:
+ backupImages: false
+ features:
+ dataMover:
+ enable: false
+ backupLocations:
+ - bucket:
+ cloudStorageRef:
+ name: ${CLUSTER_NAME}-oadp
+ credential:
+ key: credentials
+ name: cloud-credentials
+ prefix: velero
+ default: true
+ config:
+ region: ${REGION}
+ configuration:
+ velero:
+ defaultPlugins:
+ - openshift
+ - aws
+ - csi
+ restic:
+ enable: false
+EOF
+----
++
+[NOTE]
+====
+If you run this command for CSI volumes, you can skip the next step.
+====
+
+. Non-CSI volumes: Deploy a Data Protection Application:
++
+[source,terminal]
+----
+$ cat << EOF | oc create -f -
+apiVersion: oadp.openshift.io/v1alpha1
+kind: DataProtectionApplication
+metadata:
+ name: ${CLUSTER_NAME}-dpa
+ namespace: openshift-adp
+spec:
+ backupImages: false
+ features:
+ dataMover:
+ enable: false
+ backupLocations:
+ - bucket:
+ cloudStorageRef:
+ name: ${CLUSTER_NAME}-oadp
+ credential:
+ key: credentials
+ name: cloud-credentials
+ prefix: velero
+ default: true
+ config:
+ region: ${REGION}
+ configuration:
+ velero:
+ defaultPlugins:
+ - openshift
+ - aws
+ restic:
+ enable: false
+ snapshotLocations:
+ - velero:
+ config:
+ credentialsFile: /tmp/credentials/openshift-adp/cloud-credentials-credentials
+ enableSharedConfig: 'true'
+ profile: default
+ region: ${REGION}
+ provider: aws
+EOF
+----
+
+[NOTE]
+====
+* Container image backup and restore (`spec.backupImages=false`) is disabled and not supported in OADP 1.1.x
+or OADP 1.2.0 ROSA STS environments.
+* The Restic feature (`restic.enable=false`) is disabled and not supported in ROSA STS environments.
+* The DataMover feature (`dataMover.enable=false`) is disabled and not supported in ROSA STS environments.
+====
+
+[id="perform-a-backup_{context}"]
+== Perform a backup
+
+[NOTE]
+====
+The following sample hello-world application has no attached PV's.
+Either DPA configuration will work.
+====
+
+. Create a workload to back up:
++
+[source,terminal]
+----
+$ oc create namespace hello-world
+$ oc new-app -n hello-world --image=docker.io/openshift/hello-openshift
+----
+
+. Expose the route:
++
+[source,terminal]
+----
+$ oc expose service/hello-openshift -n hello-world
+----
+
+. Check that the application is working:
++
+[source,terminal]
+----
+$ curl `oc get route/hello-openshift -n hello-world -o jsonpath='{.spec.host}'`
+----
++
+.Example output
++
+[source,terminal]
+----
+Hello OpenShift!
+----
+
+. Back up the workload:
++
+[source,terminal]
+----
+$ cat << EOF | oc create -f -
+apiVersion: velero.io/v1
+kind: Backup
+metadata:
+ name: hello-world
+ namespace: openshift-adp
+spec:
+ includedNamespaces:
+ - hello-world
+ storageLocation: ${CLUSTER_NAME}-dpa-1
+ ttl: 720h0m0s
+EOF
+----
+
+. Wait until the backup is done:
++
+[source,terminal]
+----
+$ watch "oc -n openshift-adp get backup hello-world -o json | jq .status"
+----
++
+.Example output
+[source,JSON]
+----
+{
+ "completionTimestamp": "2022-09-07T22:20:44Z",
+ "expiration": "2022-10-07T22:20:22Z",
+ "formatVersion": "1.1.0",
+ "phase": "Completed",
+ "progress": {
+ "itemsBackedUp": 58,
+ "totalItems": 58
+ },
+ "startTimestamp": "2022-09-07T22:20:22Z",
+ "version": 1
+}
+----
+
+. Delete the demo workload:
++
+[source,terminal]
+----
+$ oc delete ns hello-world
+----
+
+. Restore from the backup:
++
+[source,terminal]
+----
+$ cat << EOF | oc create -f -
+apiVersion: velero.io/v1
+kind: Restore
+metadata:
+ name: hello-world
+ namespace: openshift-adp
+spec:
+ backupName: hello-world
+EOF
+----
+
+. Wait for the Restore to finish:
++
+[source,terminal]
+----
+$ watch "oc -n openshift-adp get restore hello-world -o json | jq .status"
+----
++
+.Example output
+[source,JSON]
+----
+{
+ "completionTimestamp": "2022-09-07T22:25:47Z",
+ "phase": "Completed",
+ "progress": {
+ "itemsRestored": 38,
+ "totalItems": 38
+ },
+ "startTimestamp": "2022-09-07T22:25:28Z",
+ "warnings": 9
+}
+----
+
+. Check that the workload is restored:
++
+[source,terminal]
+----
+$ oc -n hello-world get pods
+----
++
+.Example output
+[source,terminal]
+----
+NAME READY STATUS RESTARTS AGE
+hello-openshift-9f885f7c6-kdjpj 1/1 Running 0 90s
+----
++
+[source,terminal]
+----
+$ curl `oc get route/hello-openshift -n hello-world -o jsonpath='{.spec.host}'`
+----
++
+.Example output
+[source,terminal]
+----
+Hello OpenShift!
+----
+
+. For troubleshooting tips please refer to the OADP team's link:https://github.com/openshift/oadp-operator/blob/master/docs/TROUBLESHOOTING.md[troubleshooting documentation]
+
+. Additional sample applications can be found in the OADP team's link:https://github.com/openshift/oadp-operator/tree/master/tests/e2e/sample-applications[sample applications directory]
+
+[id="cleanup_{context}"]
+== Cleanup
+
+. Delete the workload:
++
+[source,terminal]
+----
+$ oc delete ns hello-world
+----
+
+. Remove the backup and restore resources from the cluster if they are no longer required:
++
+[source,terminal]
+----
+$ oc delete backup hello-world
+$ oc delete restore hello-world
+----
+
+. To delete the backup/restore and remote objects in s3:
++
+[source,terminal]
+----
+$ velero backup delete hello-world
+$ velero restore delete hello-world
+----
+
+. Delete the Data Protection Application:
++
+[source,terminal]
+----
+$ oc -n openshift-adp delete dpa ${CLUSTER_NAME}-dpa
+----
+
+. Delete the Cloud Storage:
++
+[source,terminal]
+----
+$ oc -n openshift-adp delete cloudstorage ${CLUSTER_NAME}-oadp
+----
++
+[WARNING]
+====
+If this command hangs, you might need to delete the finalizer:
+[source,terminal]
+----
+$ oc -n openshift-adp patch cloudstorage ${CLUSTER_NAME}-oadp -p '{"metadata":{"finalizers":null}}' --type=merge
+----
+====
+
+. Remove the Operator if it is no longer required:
++
+[source,terminal]
+----
+$ oc -n openshift-adp delete subscription oadp-operator
+----
+
+. Remove the namespace for the Operator:
++
+[source,terminal]
+----
+$ oc delete ns redhat-openshift-adp
+----
+
+. Remove the Custom Resource Definitions from the cluster if you no longer wish to have them:
++
+[source,terminal]
+----
+$ for CRD in `oc get crds | grep velero | awk '{print $1}'`; do oc delete crd $CRD; done
+$ for CRD in `oc get crds | grep -i oadp | awk '{print $1}'`; do oc delete crd $CRD; done
+----
+
+. Delete the AWS S3 Bucket:
++
+[source,terminal]
+----
+$ aws s3 rm s3://${CLUSTER_NAME}-oadp --recursive
+$ aws s3api delete-bucket --bucket ${CLUSTER_NAME}-oadp
+----
+
+. Detach the Policy from the role:
++
+[source,terminal]
+----
+$ aws iam detach-role-policy --role-name "${ROLE_NAME}" \
+ --policy-arn "${POLICY_ARN}"
+----
+
+. Delete the role:
++
+[source,terminal]
+----
+$ aws iam delete-role --role-name "${ROLE_NAME}"
+----
\ No newline at end of file
diff --git a/sd_support/_attributes b/cloud_experts_tutorials/cloud-experts-deploying-application/_attributes
similarity index 100%
rename from sd_support/_attributes
rename to cloud_experts_tutorials/cloud-experts-deploying-application/_attributes
diff --git a/cloud_experts_tutorials/cloud-experts-deploying-application/cloud-experts-deploying-application-intro.adoc b/cloud_experts_tutorials/cloud-experts-deploying-application/cloud-experts-deploying-application-intro.adoc
new file mode 100644
index 000000000000..8a8f7628259e
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-deploying-application/cloud-experts-deploying-application-intro.adoc
@@ -0,0 +1,30 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="cloud-experts-deploying-application-intro"]
+= Tutorial: Deploying an application
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: cloud-experts-deploying-application-intro
+
+toc::[]
+
+//rosaworkshop.io content metadata
+//Brought into ROSA product docs 2023-12-14
+
+== Introduction
+
+After successfully provisioning your cluster, you can deploy an application on it. This application allows you to become more familiar with some of the features of {product-title} (ROSA) and Kubernetes.
+
+=== Lab overview
+In this lab, you will complete the following set of tasks designed to help you understand the concepts of deploying and operating container-based applications:
+
+* Deploy a Node.js based app by using S2I and Kubernetes Deployment objects.
+* Set up a continuous delivery (CD) pipeline to automatically push source code changes.
+* Explore logging.
+* Experience self healing of applications.
+* Explore configuration management through configmaps, secrets, and environment variables.
+* Use persistent storage to share data across pod restarts.
+* Explore networking within Kubernetes and applications.
+* Familiarize yourself with ROSA and Kubernetes functionality.
+* Automatically scale pods based on loads from the Horizontal Pod Autoscaler.
+* Use AWS Controllers for Kubernetes (ACK) to deploy and use an S3 bucket.
+
+This lab uses either the ROSA CLI or ROSA web user interface (UI).
diff --git a/cloud_experts_tutorials/cloud-experts-deploying-application/cloud-experts-deploying-application-lab-overview.adoc b/cloud_experts_tutorials/cloud-experts-deploying-application/cloud-experts-deploying-application-lab-overview.adoc
new file mode 100644
index 000000000000..c85afbf7ad6d
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-deploying-application/cloud-experts-deploying-application-lab-overview.adoc
@@ -0,0 +1,279 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="cloud-experts-deploying-application-lab-overview"]
+= Tutorial: Deploying an application
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: cloud-experts-deploying-application-intro
+
+toc::[]
+
+//rosaworkshop.io content metadata
+//Brought into ROSA product docs 22-JAN-2024
+
+== Lab overview
+
+=== Lab resources
+
+* link:https://github.com/openshift-cs/ostoy[Source code for the OSToy application]
+* link:https://quay.io/ostoylab/ostoy-frontend[OSToy front-end container image]
+* link:https://quay.io/ostoylab/ostoy-microservice[OSToy microservice container image]
+* Deployment Definition YAML files:
++
+.`ostoy-frontend-deployment.yaml`
++
+[source,yaml]
+----
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: ostoy-pvc
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: ostoy-frontend
+ labels:
+ app: ostoy
+spec:
+ selector:
+ matchLabels:
+ app: ostoy-frontend
+ strategy:
+ type: Recreate
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: ostoy-frontend
+ spec:
+ # Uncomment to use with ACK portion of the workshop
+ # If you chose a different service account name please replace it.
+ # serviceAccount: ostoy-sa
+ containers:
+ - name: ostoy-frontend
+ securityContext:
+ allowPrivilegeEscalation: false
+ runAsNonRoot: true
+ seccompProfile:
+ type: RuntimeDefault
+ capabilities:
+ drop:
+ - ALL
+ image: quay.io/ostoylab/ostoy-frontend:1.6.0
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: ostoy-port
+ containerPort: 8080
+ resources:
+ requests:
+ memory: "256Mi"
+ cpu: "100m"
+ limits:
+ memory: "512Mi"
+ cpu: "200m"
+ volumeMounts:
+ - name: configvol
+ mountPath: /var/config
+ - name: secretvol
+ mountPath: /var/secret
+ - name: datavol
+ mountPath: /var/demo_files
+ livenessProbe:
+ httpGet:
+ path: /health
+ port: 8080
+ initialDelaySeconds: 10
+ periodSeconds: 5
+ env:
+ - name: ENV_TOY_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: ostoy-secret-env
+ key: ENV_TOY_SECRET
+ - name: MICROSERVICE_NAME
+ value: OSTOY_MICROSERVICE_SVC
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ volumes:
+ - name: configvol
+ configMap:
+ name: ostoy-configmap-files
+ - name: secretvol
+ secret:
+ defaultMode: 420
+ secretName: ostoy-secret
+ - name: datavol
+ persistentVolumeClaim:
+ claimName: ostoy-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: ostoy-frontend-svc
+ labels:
+ app: ostoy-frontend
+spec:
+ type: ClusterIP
+ ports:
+ - port: 8080
+ targetPort: ostoy-port
+ protocol: TCP
+ name: ostoy
+ selector:
+ app: ostoy-frontend
+---
+apiVersion: route.openshift.io/v1
+kind: Route
+metadata:
+ name: ostoy-route
+spec:
+ to:
+ kind: Service
+ name: ostoy-frontend-svc
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: ostoy-secret-env
+type: Opaque
+data:
+ ENV_TOY_SECRET: VGhpcyBpcyBhIHRlc3Q=
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: ostoy-configmap-files
+data:
+ config.json: '{ "default": "123" }'
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: ostoy-secret
+data:
+ secret.txt: VVNFUk5BTUU9bXlfdXNlcgpQQVNTV09SRD1AT3RCbCVYQXAhIzYzMlk1RndDQE1UUWsKU01UUD1sb2NhbGhvc3QKU01UUF9QT1JUPTI1
+type: Opaque
+----
++
+.`ostoy-microservice-deployment.yaml`
++
+[source,yaml]
+----
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: ostoy-microservice
+ labels:
+ app: ostoy
+spec:
+ selector:
+ matchLabels:
+ app: ostoy-microservice
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: ostoy-microservice
+ spec:
+ containers:
+ - name: ostoy-microservice
+ securityContext:
+ allowPrivilegeEscalation: false
+ runAsNonRoot: true
+ seccompProfile:
+ type: RuntimeDefault
+ capabilities:
+ drop:
+ - ALL
+ image: quay.io/ostoylab/ostoy-microservice:1.5.0
+ imagePullPolicy: IfNotPresent
+ ports:
+ - containerPort: 8080
+ protocol: TCP
+ resources:
+ requests:
+ memory: "128Mi"
+ cpu: "50m"
+ limits:
+ memory: "256Mi"
+ cpu: "100m"
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: ostoy-microservice-svc
+ labels:
+ app: ostoy-microservice
+spec:
+ type: ClusterIP
+ ports:
+ - port: 8080
+ targetPort: 8080
+ protocol: TCP
+ selector:
+ app: ostoy-microservice
+----
+* S3 bucket manifest for ACK S3
++
+.`s3-bucket.yaml`
++
+[source,yaml]
+----
+apiVersion: s3.services.k8s.aws/v1alpha1
+kind: Bucket
+metadata:
+ name: ostoy-bucket
+ namespace: ostoy
+spec:
+ name: ostoy-bucket
+----
+
+[NOTE]
+====
+To simplify deployment of the OSToy application, all of the objects required in the above deployment manifests are grouped together. For a typical enterprise deployment, a separate manifest file for each Kubernetes object is recommended.
+====
+
+=== About the OSToy application
+
+OSToy is a simple Node.js application that you will deploy to a ROSA cluster to help explore the functionality of Kubernetes. This application has a user interface where you can:
+
+* Write messages to the log (stdout / stderr).
+* Intentionally crash the application to view self-healing.
+* Toggle a liveness probe and monitor OpenShift behavior.
+* Read config maps, secrets, and env variables.
+* If connected to shared storage, read and write files.
+* Check network connectivity, intra-cluster DNS, and intra-communication with the included microservice.
+* Increase the load to view automatic scaling of the pods to handle the load using the Horizontal Pod Autoscaler.
+* Optional: Connect to an AWS S3 bucket to read and write objects.
+
+=== OSToy Application Diagram
+
+image::ostoy-arch.png[OSToy architecture diagram]
+
+=== Understanding the OSToy UI
+
+image::ostoy-homepage.png[Preview of the OSToy homepage]
+
+1. Shows the pod name that served your browser the page.
+2. *Home:* The main page of the application where you can perform some of the functions listed which we will explore.
+3. *Persistent Storage:* Allows you to write data to the persistent volume bound to this application.
+4. *Config Maps:* Shows the contents of configmaps available to the application and the key:value pairs.
+5. *Secrets:* Shows the contents of secrets available to the application and the key:value pairs.
+6. *ENV Variables:* Shows the environment variables available to the application.
+7. *Networking:* Tools to illustrate networking within the application.
+8. *Pod Auto Scaling:* Tool to increase the load of the pods and test the HPA.
+9. *ACK S3:* Optional: Integrate with AWS S3 to read and write objects to a bucket.
++
+[NOTE]
+====
+In order see the "ACK S3" section of OSToy, you must complete the ACK section of this workshop. If you decide not to complete that section, the OSToy application will still function.
+====
++
+10. *About:* Displays more information about the application.
diff --git a/cloud_experts_tutorials/cloud-experts-deploying-application/cloud-experts-deploying-application-prerequisites.adoc b/cloud_experts_tutorials/cloud-experts-deploying-application/cloud-experts-deploying-application-prerequisites.adoc
new file mode 100644
index 000000000000..a8b663d98d5e
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-deploying-application/cloud-experts-deploying-application-prerequisites.adoc
@@ -0,0 +1,24 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="cloud-experts-deploying-application-prerequisites"]
+= Tutorial: Deploying an application
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: cloud-experts-deploying-application-prerequisites
+
+toc::[]
+
+//rosaworkshop.io content metadata
+//Brought into ROSA product docs 22-JAN-2024
+
+== Prerequisites
+
+. A Provisioned ROSA cluster
++
+This lab assumes you have access to a successfully provisioned a ROSA cluster. If you have not yet created a ROSA cluster, see xref:../../rosa_getting_started/rosa-quickstart-guide-ui.html#rosa-getting-started-prerequisites_rosa-quickstart-guide-ui[Red Hat OpenShift Service on AWS quickstart guide] for more information.
+
+. The OpenShift Command Line Interface (CLI)
++
+For more information, see xref:../../cli_reference/openshift_cli/getting-started-cli.adoc#cli-getting-started[Getting started with the OpenShift CLI].
+
+. A GitHub Account
++
+Use your existing GitHub account or register at link:https://github.com/signup[https://github.com/signup].
diff --git a/distr_tracing/distr_tracing_config/images b/cloud_experts_tutorials/cloud-experts-deploying-application/images
similarity index 100%
rename from distr_tracing/distr_tracing_config/images
rename to cloud_experts_tutorials/cloud-experts-deploying-application/images
diff --git a/rosa_cli/modules b/cloud_experts_tutorials/cloud-experts-deploying-application/modules
similarity index 100%
rename from rosa_cli/modules
rename to cloud_experts_tutorials/cloud-experts-deploying-application/modules
diff --git a/cloud_experts_tutorials/cloud-experts-deploying-application/snippets b/cloud_experts_tutorials/cloud-experts-deploying-application/snippets
new file mode 120000
index 000000000000..9d58b92e5058
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-deploying-application/snippets
@@ -0,0 +1 @@
+../snippets/
\ No newline at end of file
diff --git a/cloud_experts_tutorials/cloud-experts-dynamic-certificate-custom-domain.adoc b/cloud_experts_tutorials/cloud-experts-dynamic-certificate-custom-domain.adoc
new file mode 100644
index 000000000000..42ac22737fb6
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-dynamic-certificate-custom-domain.adoc
@@ -0,0 +1,544 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="cloud-experts-dynamic-certificate-custom-domain"]
+= Tutorial: Dynamically issuing certificates using the cert-manager Operator on ROSA
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: cloud-experts-dynamic-certificate-custom-domain
+
+toc::[]
+
+//Mobb content metadata
+//Brought into ROSA product docs 2023-09-20
+//---
+//date: '2022-10-11'
+//title: Dynamic Certificates for ROSA Custom Domain
+//weight: 1
+//tags: ["AWS", "ROSA"]
+//authors:
+// Kevin Collins
+//---
+
+While wildcard certificates provide simplicity by securing all first-level subdomains of a given domain with a single certificate, other use cases can require the use of individual certificates per domain.
+
+Learn how to use the link:https://docs.openshift.com/container-platform/latest/security/cert_manager_operator/index.html[cert-manager Operator for Red Hat OpenShift] and link:https://letsencrypt.org/[Let's Encrypt] to dynamically issue certificates for routes created using a custom domain.
+
+[id="cloud-experts-dynamic-certificate-custom-domain-prerequisites"]
+== Prerequisites
+
+* A ROSA cluster
+* A user account with `cluster-admin` privileges
+* The OpenShift CLI (`oc`)
+* The Amazon Web Services (AWS) CLI (`aws`)
+* A unique domain, such as `*.apps..io`
+* An Amazon Route 53 public hosted zone for the above domain
+
+[id="cloud-experts-dynamic-certificate-custom-domain-environment-setup"]
+== Setting up your environment
+
+. Configure the following environment variables:
++
+[source,terminal]
+----
+$ export DOMAIN=apps..io <1>
+$ export EMAIL= <2>
+$ export AWS_PAGER=""
+$ export CLUSTER_NAME=$(oc get infrastructure cluster -o=jsonpath="{.status.infrastructureName}" | sed 's/-[a-z0-9]\{5\}$//')
+$ export OIDC_ENDPOINT=$(oc get authentication.config.openshift.io cluster -o json | jq -r .spec.serviceAccountIssuer | sed 's|^https://||')
+$ export REGION=$(oc get infrastructure cluster -o=jsonpath="{.status.platformStatus.aws.region}")
+$ export AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text)
+$ export SCRATCH="/tmp/${CLUSTER_NAME}/dynamic-certs"
+$ mkdir -p ${SCRATCH}
+----
+<1> The custom domain.
+<2> The e-mail Let's Encrypt will use to send notifications about your certificates.
+. Ensure all fields output correctly before moving to the next section:
++
+[source,terminal]
+----
+$ echo "Cluster: ${CLUSTER_NAME}, Region: ${REGION}, OIDC Endpoint: ${OIDC_ENDPOINT}, AWS Account ID: ${AWS_ACCOUNT_ID}"
+----
+
+[id="cloud-experts-dynamic-certificate-prep-aws"]
+== Preparing your AWS account
+
+When cert-manager requests a certificate from Let’s Encrypt (or another ACME certificate issuer), Let's Encrypt servers validate that you control the domain name in that certificate using _challenges_. For this tutorial, you are using a link:https://letsencrypt.org/docs/challenge-types/#dns-01-challenge[DNS-01 challenge] that proves that you control the DNS for your domain name by putting a specific value in a TXT record under that domain name. This is all done automatically by cert-manager. To allow cert-manager permission to modify the Amazon Route 53 public hosted zone for your domain, you need to create an Identity Access Management (IAM) role with specific policy permissions and a trust relationship to allow access to the pod.
+
+The public hosted zone that is used in this tutorial is in the same AWS account as the ROSA cluster. If your public hosted zone is in a different account, a few additional steps for link:https://cert-manager.io/docs/configuration/acme/dns01/route53/#cross-account-access[Cross Account Access] are required.
+
+. Retrieve the Amazon Route 53 public hosted zone ID:
++
+[NOTE]
+====
+This command looks for a public hosted zone that matches the custom domain you specified earlier as the `DOMAIN` environment variable. You can manually specify the Amazon Route 53 public hosted zone by running `export ZONE_ID=`, replacing `` with your specific Amazon Route 53 public hosted zone ID.
+====
++
+[source,terminal]
+----
+$ export ZONE_ID=$(aws route53 list-hosted-zones-by-name --output json \
+ --dns-name "${DOMAIN}." --query 'HostedZones[0]'.Id --out text | sed 's/\/hostedzone\///')
+----
++
+. Create an AWS IAM policy document for the `cert-manager` Operator that provides the ability to update _only_ the specified public hosted zone:
++
+[source,terminal]
+----
+$ cat < "${SCRATCH}/cert-manager-policy.json"
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": "route53:GetChange",
+ "Resource": "arn:aws:route53:::change/*"
+ },
+ {
+ "Effect": "Allow",
+ "Action": [
+ "route53:ChangeResourceRecordSets",
+ "route53:ListResourceRecordSets"
+ ],
+ "Resource": "arn:aws:route53:::hostedzone/${ZONE_ID}"
+ },
+ {
+ "Effect": "Allow",
+ "Action": "route53:ListHostedZonesByName",
+ "Resource": "*"
+ }
+ ]
+}
+EOF
+----
++
+. Create the IAM policy using the file you created in the previous step:
++
+[source,terminal]
+----
+$ POLICY_ARN=$(aws iam create-policy --policy-name "${CLUSTER_NAME}-cert-manager-policy" \
+ --policy-document file://${SCRATCH}/cert-manager-policy.json \
+ --query 'Policy.Arn' --output text)
+----
+. Create an AWS IAM trust policy for the `cert-manager` Operator:
++
+[source,terminal]
+----
+$ cat < "${SCRATCH}/trust-policy.json"
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Condition": {
+ "StringEquals" : {
+ "${OIDC_ENDPOINT}:sub": "system:serviceaccount:cert-manager:cert-manager"
+ }
+ },
+ "Principal": {
+ "Federated": "arn:aws:iam::$AWS_ACCOUNT_ID:oidc-provider/${OIDC_ENDPOINT}"
+ },
+ "Action": "sts:AssumeRoleWithWebIdentity"
+ }
+ ]
+}
+EOF
+----
++
+. Create an IAM role for the `cert-manager` Operator using the trust policy you created in the previous step:
++
+[source,terminal]
+----
+$ ROLE_ARN=$(aws iam create-role --role-name "${CLUSTER_NAME}-cert-manager-operator" \
+ --assume-role-policy-document "file://${SCRATCH}/trust-policy.json" \
+ --query Role.Arn --output text)
+----
++
+. Attach the permissions policy to the role:
++
+[source,terminal]
+----
+$ aws iam attach-role-policy --role-name "${CLUSTER_NAME}-cert-manager-operator" \
+ --policy-arn ${POLICY_ARN}
+----
+
+[id="cloud-experts-dynamic-certificate-custom-domain-install-cert-man-op"]
+== Installing the cert-manager Operator
+
+. Create a project to install the `cert-manager` Operator into:
++
+[source,terminal]
+----
+$ oc new-project cert-manager-operator
+----
++
+[IMPORTANT]
+====
+Do not attempt to use more than one `cert-manager` Operator in your cluster. If you have a community `cert-manager` Operator installed in your cluster, you must uninstall it before installing the `cert-manager` Operator for Red Hat OpenShift.
+====
++
+. Install the `cert-manager` Operator for Red Hat OpenShift:
++
+[source,terminal]
+----
+$ cat << EOF | oc apply -f -
+apiVersion: operators.coreos.com/v1
+kind: OperatorGroup
+metadata:
+ name: openshift-cert-manager-operator-group
+ namespace: cert-manager-operator
+spec:
+ targetNamespaces:
+ - cert-manager-operator
+---
+apiVersion: operators.coreos.com/v1alpha1
+kind: Subscription
+metadata:
+ name: openshift-cert-manager-operator
+ namespace: cert-manager-operator
+spec:
+ channel: stable-v1
+ installPlanApproval: Automatic
+ name: openshift-cert-manager-operator
+ source: redhat-operators
+ sourceNamespace: openshift-marketplace
+EOF
+----
++
+[NOTE]
+====
+It takes a few minutes for this Operator to install and complete its set up.
+====
++
+. Verify that the `cert-manager` Operator is running:
++
+[source,terminal]
+----
+$ oc -n cert-manager-operator get pods
+----
+.Example output
++
+[source,text]
+----
+NAME READY STATUS RESTARTS AGE
+cert-manager-operator-controller-manager-84b8799db5-gv8mx 2/2 Running 0 12s
+----
++
+. Annotate the service account used by the `cert-manager` pods with the AWS IAM role you created earlier:
++
+[source,terminal]
+----
+$ oc -n cert-manager annotate serviceaccount cert-manager eks.amazonaws.com/role-arn=${ROLE_ARN}
+----
++
+. Restart the existing `cert-manager` controller pod by running the following command:
++
+[source,terminal]
+----
+$ oc -n cert-manager delete pods -l app.kubernetes.io/name=cert-manager
+----
++
+. Patch the Operator's configuration to use external nameservers to prevent DNS-01 challenge resolution issues:
++
+[source,terminal]
+----
+$ oc patch certmanager.operator.openshift.io/cluster --type merge \
+ -p '{"spec":{"controllerConfig":{"overrideArgs":["--dns01-recursive-nameservers-only","--dns01-recursive-nameservers=1.1.1.1:53"]}}}'
+----
++
+. Create a `ClusterIssuer` resource to use Let's Encrypt by running the following command:
++
+[source,terminal]
+----
+$ cat << EOF | oc apply -f -
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+ name: letsencrypt-production
+spec:
+ acme:
+ server: https://acme-v02.api.letsencrypt.org/directory
+ email: ${EMAIL}
+ # This key doesn't exist, cert-manager creates it
+ privateKeySecretRef:
+ name: prod-letsencrypt-issuer-account-key
+ solvers:
+ - dns01:
+ route53:
+ hostedZoneID: ${ZONE_ID}
+ region: ${REGION}
+ secretAccessKeySecretRef:
+ name: ''
+EOF
+----
++
+. Verify the `ClusterIssuer` resource is ready:
++
+[source,terminal]
+----
+$ oc get clusterissuer.cert-manager.io/letsencrypt-production
+----
++
+.Example output
++
+[source,text]
+----
+NAME READY AGE
+letsencrypt-production True 47s
+----
+
+[id="cloud-experts-dynamic-certificate-custom-domain-create-cd-ingress-con"]
+== Creating a custom domain Ingress Controller
+
+. Create a new project:
++
+[source,terminal]
+----
+$ oc new-project custom-domain-ingress
+----
++
+. Create and configure a certificate resource to provision a certificate for the custom domain Ingress Controller:
++
+[NOTE]
+====
+The following example uses a single domain certificate. SAN and wildcard certificates are also supported.
+====
++
+[source,terminal]
+----
+$ cat << EOF | oc apply -f -
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+ name: custom-domain-ingress-cert
+ namespace: custom-domain-ingress
+spec:
+ secretName: custom-domain-ingress-cert-tls
+ issuerRef:
+ name: letsencrypt-production
+ kind: ClusterIssuer
+ commonName: "${DOMAIN}"
+ dnsNames:
+ - "${DOMAIN}"
+EOF
+----
++
+. Verify the certificate has been issued:
++
+[NOTE]
+====
+It takes a few minutes for this certificate to be issued by Let's Encrypt. If it takes longer than 5 minutes, run `oc -n custom-domain-ingress describe certificate.cert-manager.io/custom-domain-ingress-cert` to see any issues reported by cert-manager.
+====
++
+[source,terminal]
+----
+$ oc -n custom-domain-ingress get certificate.cert-manager.io/custom-domain-ingress-cert
+----
++
+.Example output
++
+[source,text]
+----
+NAME READY SECRET AGE
+custom-domain-ingress-cert True custom-domain-ingress-cert-tls 9m53s
+----
++
+. Create a new `CustomDomain` custom resource (CR):
++
+[source,terminal]
+----
+$ cat << EOF | oc apply -f -
+apiVersion: managed.openshift.io/v1alpha1
+kind: CustomDomain
+metadata:
+ name: custom-domain-ingress
+spec:
+ domain: ${DOMAIN}
+ scope: External
+ loadBalancerType: NLB
+ certificate:
+ name: custom-domain-ingress-cert-tls
+ namespace: custom-domain-ingress
+EOF
+----
+. Verify that your custom domain Ingress Controller has been deployed and has a `Ready` status:
++
+[source,terminal]
+----
+$ oc get customdomains
+----
++
+.Example output
+[source,terminal]
+----
+NAME ENDPOINT DOMAIN STATUS
+custom-domain-ingress tfoxdx.custom-domain-ingress.cluster.1234.p1.openshiftapps.com example.com Ready
+----
++
+. Prepare a document with the necessary DNS changes to enable DNS resolution for your custom domain Ingress Controller:
++
+[source,terminal]
+----
+$ INGRESS=$(oc get customdomain.managed.openshift.io/custom-domain-ingress --template={{.status.endpoint}})
+$ cat << EOF > "${SCRATCH}/create-cname.json"
+{
+ "Comment":"Add CNAME to custom domain endpoint",
+ "Changes":[{
+ "Action":"CREATE",
+ "ResourceRecordSet":{
+ "Name": "*.${DOMAIN}",
+ "Type":"CNAME",
+ "TTL":30,
+ "ResourceRecords":[{
+ "Value": "${INGRESS}"
+ }]
+ }
+ }]
+}
+EOF
+----
++
+. Submit your changes to Amazon Route 53 for propagation:
++
+[source,terminal]
+----
+$ aws route53 change-resource-record-sets \
+ --hosted-zone-id ${ZONE_ID} \
+ --change-batch file://${SCRATCH}/create-cname.json
+----
++
+[NOTE]
+====
+While the wildcard CNAME record avoids the need to create a new record for every new application you deploy using the custom domain Ingress Controller, the certificate that each of these applications use *is not* a wildcard certificate.
+====
+
+[id="cloud-experts-dynamic-certificate-custom-domain-config-dynamic-cert"]
+== Configuring dynamic certificates for custom domain routes
+
+Now you can expose cluster applications on any first-level subdomains of the specified domain, but the connection will not be secured with a TLS certificate that matches the domain of the application. To ensure these cluster applications have valid certificates for each domain name, configure cert-manager to dynamically issue a certificate to every new route created under this domain.
+
+. Create the necessary OpenShift resources cert-manager requires to manage certificates for OpenShift routes.
++
+This step creates a new deployment (and therefore a pod) that specifically monitors annotated routes in the cluster. If the `issuer-kind` and `issuer-name` annotations are found in a new route, it requests the Issuer (ClusterIssuer in this case) for a new certificate that is unique to this route and which will honor the hostname that was specified while creating the route.
++
+[NOTE]
+====
+If the cluster does not have access to GitHub, you can save the raw contents locally and run `oc apply -f localfilename.yaml -n cert-manager`.
+====
++
+[source,terminal]
+----
+$ oc -n cert-manager apply -f https://github.com/cert-manager/openshift-routes/releases/latest/download/cert-manager-openshift-routes.yaml
+----
++
+The following additional OpenShift resources are also created in this step:
++
+* `ClusterRole` - grants permissions to watch and update the routes across the cluster
+* `ServiceAccount` - uses permissions to run the newly created pod
+* `ClusterRoleBinding` - binds these two resources
++
+. Ensure that the new `cert-manager-openshift-routes` pod is running successfully:
++
+[source,terminal]
+----
+$ oc -n cert-manager get pods
+----
++
+.Example result
++
+[source,terminal]
+----
+NAME READY STATUS RESTARTS AGE
+cert-manager-866d8f788c-9kspc 1/1 Running 0 4h21m
+cert-manager-cainjector-6885c585bd-znws8 1/1 Running 0 4h41m
+cert-manager-openshift-routes-75b6bb44cd-f8kd5 1/1 Running 0 6s
+cert-manager-webhook-8498785dd9-bvfdf 1/1 Running 0 4h41m
+----
+
+[id="cloud-experts-dynamic-certificate-custom-domain-config-deploy-sample-app"]
+== Deploying a sample application
+
+Now that dynamic certificates are configured, you can deploy a sample application to confirm that certificates are provisioned and trusted when you expose a new route.
+
+. Create a new project for your sample application:
++
+[source,terminal]
+----
+$ oc new-project hello-world
+----
++
+. Deploy a hello world application:
++
+[source,terminal]
+----
+$ oc -n hello-world new-app --image=docker.io/openshift/hello-openshift
+----
++
+. Create a route to expose the application from outside the cluster:
++
+[source,terminal]
+----
+$ oc -n hello-world create route edge --service=hello-openshift hello-openshift-tls --hostname hello.${DOMAIN}
+----
++
+. Verify the certificate for the route is untrusted:
++
+[source,terminal]
+----
+$ curl -I https://hello.${DOMAIN}
+----
+.Example output
++
+[source,text]
+----
+curl: (60) SSL: no alternative certificate subject name matches target host name 'hello.example.com'
+More details here: https://curl.se/docs/sslcerts.html
+
+curl failed to verify the legitimacy of the server and therefore could not
+establish a secure connection to it. To learn more about this situation and
+how to fix it, please visit the web page mentioned above.
+----
++
+. Annotate the route to trigger cert-manager to provision a certificate for the custom domain:
++
+[source,terminal]
+----
+$ oc -n hello-world annotate route hello-openshift-tls cert-manager.io/issuer-kind=ClusterIssuer cert-manager.io/issuer-name=letsencrypt-production
+----
++
+[NOTE]
+====
+It takes 2-3 minutes for the certificate to be created. The renewal of the certificate will automatically be managed by the `cert-manager` Operator as it approaches expiration.
+====
+. Verify the certificate for the route is now trusted:
++
+[source,terminal]
+----
+$ curl -I https://hello.${DOMAIN}
+----
++
+.Example output
++
+[source,terminal]
+----
+HTTP/2 200
+date: Thu, 05 Oct 2023 23:45:33 GMT
+content-length: 17
+content-type: text/plain; charset=utf-8
+set-cookie: 52e4465485b6fb4f8a1b1bed128d0f3b=68676068bb32d24f0f558f094ed8e4d7; path=/; HttpOnly; Secure; SameSite=None
+cache-control: private
+----
+
+[id="cloud-experts-dynamic-certificate-custom-domain-troubleshoot"]
+== Troubleshooting dynamic certificate provisioning
+[NOTE]
+====
+The validation process usually takes 2-3 minutes to complete while creating certificates.
+====
+
+If annotating your route does not trigger certificate creation during the certificate create step, run `oc describe` against each of the `certificate`,`certificaterequest`,`order`, and `challenge` resources to view the events or reasons that can help identify the cause of the issue.
+
+[source,terminal]
+----
+$ oc get certificate,certificaterequest,order,challenge
+----
+
+For troubleshooting, you can refer to this link:https://cert-manager.io/docs/faq/acme/[helpful guide in debugging certificates].
+
+You can also use the link:https://cert-manager.io/docs/reference/cmctl/[cmctl] CLI tool for various certificate management activities, such as checking the status of certificates and testing renewals.
diff --git a/cloud_experts_tutorials/cloud-experts-entra-id-idp.adoc b/cloud_experts_tutorials/cloud-experts-entra-id-idp.adoc
new file mode 100644
index 000000000000..ed250313f800
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-entra-id-idp.adoc
@@ -0,0 +1,259 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="cloud-experts-entra-id-idp"]
+= Tutorial: Configuring Microsoft Entra ID (formerly Azure Active Directory) as an identity provider
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: cloud-experts-entra-id-idp
+
+toc::[]
+
+//Mobb content metadata
+//Brought into ROSA product docs 2023-09-18
+// ---
+// date: '2022-09-23'
+// title: Configure Azure AD as an OIDC identity provider for ROSA/OSD
+// tags: ["Azure", "ROSA", "OSD"]
+// authors:
+// - Michael McNeill
+// - Andrea Bozzoni
+// - Steve Mirman
+// - Thatcher Hubbard
+// ---
+
+You can configure Microsoft Entra ID (formerly Azure Active Directory) as the cluster identity provider in {product-title} (ROSA).
+
+This tutorial guides you to complete the following tasks:
+
+. Register a new application in Entra ID for authentication.
+. Configure the application registration in Entra ID to include optional and group claims in tokens.
+. Configure the {product-title} cluster to use Entra ID as the identity provider.
+. Grant additional permissions to individual groups.
+
+[id="cloud-experts-entra-id-idp-prerequisites"]
+== Prerequisites
+
+* You created a set of security groups and assigned users by following link:https://learn.microsoft.com/en-us/azure/active-directory/fundamentals/how-to-manage-groups[the Microsoft documentation].
+
+[id="cloud-experts-entra-id-idp-register-application"]
+== Registering a new application in Entra ID for authentication
+
+To register your application in Entra ID, first create the OAuth callback URL, then register your application.
+
+.Procedure
+
+. Create the cluster's OAuth callback URL by changing the specified variables and running the following command:
++
+[NOTE]
+====
+Remember to save this callback URL; it will be required later in the process.
+====
++
+[source,terminal]
+----
+$ domain=$(rosa describe cluster -c | grep "DNS" | grep -oE '\S+.openshiftapps.com')
+$ echo "OAuth callback URL: https://oauth-openshift.apps.$domain/oauth2callback/AAD"
+----
++
+The "AAD" directory at the end of the OAuth callback URL must match the OAuth identity provider name that you will set up later in this process.
+
+. Create the Entra ID application by logging in to the Azure portal, and select the link:https://portal.azure.com/#blade/Microsoft_AAD_RegisteredApps/ApplicationsListBlade[App registrations blade]. Then, select *New registration* to create a new application.
++
+image:azure-portal_app-registrations-blade.png[Azure Portal - App registrations blade]
+
+. Name the application, for example `openshift-auth`.
+. Select *Web* from the _Redirect URI_ dropdown and enter the value of the OAuth callback URL you retrieved in the previous step.
+. After providing the required information, click *Register* to create the application.
++
+image:azure-portal_register-an-application-page.png[Azure Portal - Register an application page]
++
+. Select the *Certificates & secrets* sub-blade and select *New client secret*.
++
+image:azure-portal_certificates-secrets-page.png[Azure Portal - Certificates and secrets page]
+
+. Complete the requested details and store the generated client secret value. This secret is required later in this process.
++
+[IMPORTANT]
+====
+After initial setup, you cannot see the client secret. If you did not record the client secret, you must generate a new one.
+====
++
+image:azure-portal_add-a-client-secret-page.png[Azure Portal - Add a Client Secret page]
+
+. Select the *Overview* sub-blade and note the `Application (client) ID` and `Directory (tenant) ID`. You will need these values in a future step.
++
+image:azure-portal_copy-client-secret-page.png[Azure Portal - Copy Client Secret page]
+
+[id="rosa-mobb-entra-id-configure-claims"]
+== Configuring the application registration in Entra ID to include optional and group claims
+
+So that {product-title} has enough information to create the user's account, you must configure Entra ID to give two optional claims: `email` and `preferred_username`. For more information about optional claims in Entra ID, see link:https://learn.microsoft.com/en-us/azure/active-directory/develop/optional-claims[the Microsoft documentation].
+
+In addition to individual user authentication, {product-title} provides group claim functionality. This functionality allows an OpenID Connect (OIDC) identity provider, such as Entra ID, to offer a user's group membership for use within {product-title}.
+
+[discrete]
+[id="rosa-mobb-entra-id-configure-optional-claims"]
+=== Configuring optional claims
+
+You can configure the optional claims in Entra ID.
+
+. Click the *Token configuration* sub-blade and select the *Add optional claim* button.
++
+image:azure-portal_optional-claims-page.png[Azure Portal - Add Optional Claims Page]
+
+. Select the *ID* radio button.
++
+image:azure-portal_add-optional-claims-page.png[Azure Portal - Add Optional Claims - Token Type]
+
+. Select the *email* claim checkbox.
++
+image:azure-portal_add-optional-email-claims-page.png[Azure Portal - Add Optional Claims - email]
+
+. Select the `preferred_username` claim checkbox. Then, click *Add* to configure the *email* and *preferred_username* claims your Entra ID application.
++
+image:azure-portal_add-optional-preferred_username-claims-page.png[Azure Portal - Add Optional Claims - preferred_username]
+
+. A dialog box appears at the top of the page. Follow the prompt to enable the necessary Microsoft Graph permissions.
++
+image:azure-portal_add-optional-claims-graph-permissions-prompt.png[Azure Portal - Add Optional Claims - Graph Permissions Prompt]
+
+[discrete]
+[id="rosa-mobb-entra-id-configure-group-claims"]
+=== Configuring group claims (optional)
+
+Configure Entra ID to offer a groups claim.
+
+.Procedure
+. From the *Token configuration* sub-blade, click *Add groups claim*.
++
+image:azure-portal_optional-group-claims-page.png[Azure Portal - Add Groups Claim Page]
+
+. To configure group claims for your Entra ID application, select *Security groups* and then click the *Add*.
++
+[NOTE]
+====
+In this example, the group claim includes all of the security groups that a user is a member of. In a real production environment, ensure that the groups that the group claim only includes groups that apply to {product-title}.
+====
++
+image:azure-portal_edit-group-claims-page.png[Azure Portal - Edit Groups Claim Page]
+
+[id="cloud-experts-entra-id-idp-configure-cluster"]
+== Configuring the {product-title} cluster to use Entra ID as the identity provider
+
+You must configure {product-title} to use Entra ID as its identity provider.
+
+Although ROSA offers the ability to configure identity providers by using {cluster-manager}, use the ROSA CLI to configure the cluster's OAuth provider to use Entra ID as its identity provider. Before configuring the identity provider, set the necessary variables for the identity provider configuration.
+
+.Procedure
+
+. Create the variables by running the following command:
++
+[source,terminal]
+----
+$ CLUSTER_NAME=example-cluster <1>
+$ IDP_NAME=AAD <2>
+$ APP_ID=yyyyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy <3>
+$ CLIENT_SECRET=xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx <4>
+$ TENANT_ID=zzzzzzzz-zzzz-zzzz-zzzz-zzzzzzzzzzzz <5>
+----
++
+--
+<1> Replace this with the name of your ROSA cluster.
+<2> Replace this value with the name you used in the OAuth callback URL that you generated earlier in this process.
+<3> Replace this with the Application (client) ID.
+<4> Replace this with the Client Secret.
+<5> Replace this with the Directory (tenant) ID.
+--
+
+. Configure the cluster's OAuth provider by running the following command. If you enabled group claims, ensure that you use the `--group-claims groups` argument.
+** If you enabled group claims, run the following command:
++
+[source,terminal]
+----
+$ rosa create idp \
+--cluster ${CLUSTER_NAME} \
+--type openid \
+--name ${IDP_NAME} \
+--client-id ${APP_ID} \
+--client-secret ${CLIENT_SECRET} \
+--issuer-url https://login.microsoftonline.com/${TENANT_ID}/v2.0 \
+--email-claims email \
+--name-claims name \
+--username-claims preferred_username \
+--extra-scopes email,profile \
+--groups-claims groups
+----
+
+** If you did not enable group claims, run the following command:
++
+[source,terminal]
+----
+$ rosa create idp \
+--cluster ${CLUSTER_NAME} \
+--type openid \
+--name ${IDP_NAME} \
+--client-id ${APP_ID} \
+--client-secret ${CLIENT_SECRET} \
+--issuer-url https://login.microsoftonline.com/${TENANT_ID}/v2.0 \
+--email-claims email \
+--name-claims name \
+--username-claims preferred_username \
+--extra-scopes email,profile
+----
+
+After a few minutes, the cluster authentication Operator reconciles your changes, and you can log in to the cluster by using Entra ID.
+
+[id="rosa-mobb-azure-oidc-grant-permissions"]
+== Granting additional permissions to individual users and groups
+
+When your first log in, you might notice that you have very limited permissions. By default, {product-title} only grants you the ability to create new projects, or namespaces, in the cluster. Other projects are restricted from view.
+
+You must grant these additional abilities to individual users and groups.
+
+[discrete]
+[id="rosa-mobb-azure-oidc-grant-permissions-users"]
+=== Granting additional permissions to individual users
+
+{product-title} includes a significant number of preconfigured roles, including the `cluster-admin` role that grants full access and control over the cluster.
+
+.Procedure
+* Grant a user access to the `cluster-admin` role by running the following command:
++
+[source,terminal]
+----
+$ rosa grant user cluster-admin \
+ --user= <1>
+ --cluster=${CLUSTER_NAME}
+----
++
+--
+<1> Provide the Entra ID username that you want to have cluster admin permissions.
+--
+
+[discrete]
+[id="cloud-experts-entra-id-idp-additional-permissions-groups"]
+=== Granting additional permissions to individual groups
+
+If you opted to enable group claims, the cluster OAuth provider automatically creates or updates the user's group memberships by using the group ID. The cluster OAuth provider does not automatically create `RoleBindings` and `ClusterRoleBindings` for the groups that are created; you are responsible for creating those bindings by using your own processes.
+
+To grant an automatically generated group access to the `cluster-admin` role, you must create a `ClusterRoleBinding` to the group ID.
+
+.Procedure
+* Create the `ClusterRoleBinding` by running the following command:
++
+[source,terminal]
+----
+$ oc create clusterrolebinding cluster-admin-group \
+--clusterrole=cluster-admin \
+--group= <1>
+----
++
+--
+<1> Provide the Entra ID group ID that you want to have cluster admin permissions.
+--
++
+Now, any user in the specified group automatically receives `cluster-admin` access.
+
+[id="cloud-experts-entra-id-idp-additional-resources"]
+[role="_additional-resources"]
+== Additional resources
+
+For more information about how to use RBAC to define and apply permissions in {product-title}, see link:https://docs.openshift.com/container-platform/latest/authentication/using-rbac.html[the {product-title} documentation].
diff --git a/cloud_experts_tutorials/cloud-experts-external-dns.adoc b/cloud_experts_tutorials/cloud-experts-external-dns.adoc
new file mode 100644
index 000000000000..39820fb86c97
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-external-dns.adoc
@@ -0,0 +1,349 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="cloud-experts-external-dns"]
+= Tutorial: Deploying the External DNS Operator on ROSA
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: cloud-experts-external-dns
+
+toc::[]
+
+//Mobb content metadata
+//Brought into ROSA product docs 2023-09-20
+//---
+//date: '2021-06-10'
+//title: External DNS for ROSA custom domain
+//weight: 1
+//tags: ["AWS", "ROSA"]
+//authors:
+// - Chris Kang
+// - Dustin Scott
+//---
+
+[NOTE]
+====
+Starting with {product-title} 4.14, the Custom Domain Operator is deprecated. To manage Ingress in {product-title} 4.14, use the Ingress Operator. The functionality is unchanged for {product-title} 4.13 and earlier versions.
+====
+
+Configuring the xref:../applications/deployments/osd-config-custom-domains-applications.adoc[Custom Domain Operator] requires a wildcard CNAME DNS record in your Amazon Route 53 hosted zone. If you do not want to use a wildcard record, you can use the `External DNS` Operator to create individual entries for routes.
+
+Use this tutorial to deploy and configure the `External DNS` Operator with a custom domain in {product-title} (ROSA).
+
+[IMPORTANT]
+====
+The `External DNS` Operator does not support STS using IAM Roles for Service Accounts (IRSA) and uses long-lived Identity Access Management (IAM) credentials instead. This tutorial will be updated when the Operator supports STS.
+====
+
+[id="cloud-experts-external-dns-prerequisites"]
+== Prerequisites
+
+* A ROSA cluster
+* A user account with `dedicated-admin` privileges
+* The OpenShift CLI (`oc`)
+* The Amazon Web Services (AWS) CLI (`aws`)
+* A unique domain, such as `*.apps..io`
+* An Amazon Route 53 public hosted zone for the above domain
+
+[id="cloud-experts-external-dns-environment-setup"]
+== Setting up your environment
+
+. Configure the following environment variables, replacing `CLUSTER_NAME` with the name of your cluster:
++
+[source,terminal]
+----
+$ export DOMAIN=apps..io <1>
+$ export AWS_PAGER=""
+$ export CLUSTER_NAME=$(oc get infrastructure cluster -o=jsonpath="{.status.infrastructureName}" | sed 's/-[a-z0-9]\{5\}$//')
+$ export REGION=$(oc get infrastructure cluster -o=jsonpath="{.status.platformStatus.aws.region}")
+$ export AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text)
+$ export SCRATCH="/tmp/${CLUSTER_NAME}/external-dns"
+$ mkdir -p ${SCRATCH}
+----
+<1> The custom domain.
+. Ensure all fields output correctly before moving to the next section:
++
+[source,terminal]
+----
+$ echo "Cluster: ${CLUSTER_NAME}, Region: ${REGION}, AWS Account ID: ${AWS_ACCOUNT_ID}"
+----
+
+[id="cloud-experts-external-dns-custom-domain-setup"]
+== Setting up your custom domain
+
+ROSA manages secondary Ingress Controllers using the `Custom Domain` Operator. Use the following procedure to deploy a secondary Ingress Controller using a custom domain.
+
+.Prerequisites
+
+* A unique domain, such as `*.apps..io`
+* A custom SAN or wildcard certificate, such as `CN=*.apps..io`
+
+.Procedure
+
+. Create a new project:
++
+[source,terminal]
+----
+$ oc new-project external-dns-operator
+----
+
+. Create a new TLS secret from a private key and a public certificate, where `fullchain.pem` is your full wildcard certificate chain (including any intermediaries) and `privkey.pem` is your wildcard certificate's private key:
++
+[source,terminal]
+----
+$ oc -n external-dns-operator create secret tls external-dns-tls --cert=fullchain.pem --key=privkey.pem
+----
+
+. Create a new `CustomDomain` custom resource (CR):
++
+.Example `external-dns-custom-domain.yaml`
+[source,yaml]
+----
+apiVersion: managed.openshift.io/v1alpha1
+kind: CustomDomain
+metadata:
+ name: external-dns
+spec:
+ domain: apps..io <1>
+ scope: External
+ loadBalancerType: NLB
+ certificate:
+ name: external-dns-tls
+ namespace: external-dns-operator
+----
+<1> The custom domain.
+
+. Apply the CR:
++
+[source,terminal]
+----
+$ oc apply -f external-dns-custom-domain.yaml
+----
+
+. Verify that your custom domain Ingress Controller has been deployed and has a `Ready` status:
++
+[source,terminal]
+----
+$ oc get customdomains
+----
++
+.Example output
+[source,terminal]
+----
+NAME ENDPOINT DOMAIN STATUS
+external-dns xxrywp..cluster-01.opln.s1.openshiftapps.com *.apps..io Ready
+----
+
+[id="cloud-experts-external-dns-prepare-aws-account"]
+== Preparing your AWS account
+
+. Retrieve the Amazon Route 53 public hosted zone ID:
++
+[source,terminal]
+----
+$ export ZONE_ID=$(aws route53 list-hosted-zones-by-name --output json \
+ --dns-name "${DOMAIN}." --query 'HostedZones[0]'.Id --out text | sed 's/\/hostedzone\///')
+----
++
+. Create an AWS IAM Policy document that allows the `External DNS` Operator to update _only_ the custom domain public hosted zone:
++
+[source,terminal]
+----
+$ cat << EOF > "${SCRATCH}/external-dns-policy.json"
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "route53:ChangeResourceRecordSets"
+ ],
+ "Resource": [
+ "arn:aws:route53:::hostedzone/${ZONE_ID}"
+ ]
+ },
+ {
+ "Effect": "Allow",
+ "Action": [
+ "route53:ListHostedZones",
+ "route53:ListResourceRecordSets"
+ ],
+ "Resource": [
+ "*"
+ ]
+ }
+ ]
+}
+EOF
+----
++
+. Create an AWS IAM policy:
++
+[source,terminal]
+----
+$ export POLICY_ARN=$(aws iam create-policy --policy-name "${CLUSTER_NAME}-AllowExternalDNSUpdates" \
+ --policy-document file://${SCRATCH}/external-dns-policy.json \
+ --query 'Policy.Arn' --output text)
+----
++
+. Create an AWS IAM user:
++
+[source,terminal]
+----
+$ aws iam create-user --user-name "${CLUSTER_NAME}-external-dns-operator"
+----
+. Attach the policy:
++
+[source,terminal]
+----
+$ aws iam attach-user-policy --user-name "${CLUSTER_NAME}-external-dns-operator" --policy-arn $POLICY_ARN
+----
++
+[NOTE]
+====
+This will be changed to STS using IRSA in the future.
+====
+. Create AWS keys for the IAM user:
++
+[source,terminal]
+----
+$ SECRET_ACCESS_KEY=$(aws iam create-access-key --user-name "${CLUSTER_NAME}-external-dns-operator")
+----
+. Create static credentials:
++
+[source,terminal]
+----
+$ cat << EOF > "${SCRATCH}/credentials"
+[default]
+aws_access_key_id = $(echo $SECRET_ACCESS_KEY | jq -r '.AccessKey.AccessKeyId')
+aws_secret_access_key = $(echo $SECRET_ACCESS_KEY | jq -r '.AccessKey.SecretAccessKey')
+EOF
+----
+
+[id="cloud-experts-external-dns-install-external-dns-operator"]
+== Installing the External DNS Operator
+
+. Install the `External DNS` Operator from OperatorHub:
++
+[source,terminal]
+----
+$ cat << EOF | oc apply -f -
+apiVersion: operators.coreos.com/v1
+kind: OperatorGroup
+metadata:
+ name: external-dns-group
+ namespace: external-dns-operator
+spec:
+ targetNamespaces:
+ - external-dns-operator
+---
+apiVersion: operators.coreos.com/v1alpha1
+kind: Subscription
+metadata:
+ name: external-dns-operator
+ namespace: external-dns-operator
+spec:
+ channel: stable-v1.1
+ installPlanApproval: Automatic
+ name: external-dns-operator
+ source: redhat-operators
+ sourceNamespace: openshift-marketplace
+EOF
+----
++
+. Wait until the `External DNS` Operator is running:
++
+[source,terminal]
+----
+$ oc rollout status deploy external-dns-operator --timeout=300s
+----
++
+. Create a secret from the AWS IAM user credentials:
++
+[source,terminal]
+----
+$ oc -n external-dns-operator create secret generic external-dns \
+ --from-file "${SCRATCH}/credentials"
+----
+. Deploy the `ExternalDNS` controller:
++
+[source,terminal]
+----
+$ cat << EOF | oc apply -f -
+apiVersion: externaldns.olm.openshift.io/v1beta1
+kind: ExternalDNS
+metadata:
+ name: ${DOMAIN}
+spec:
+ domains:
+ - filterType: Include
+ matchType: Exact
+ name: ${DOMAIN}
+ provider:
+ aws:
+ credentials:
+ name: external-dns
+ type: AWS
+ source:
+ openshiftRouteOptions:
+ routerName: external-dns
+ type: OpenShiftRoute
+ zones:
+ - ${ZONE_ID}
+EOF
+----
+. Wait until the controller is running:
++
+[source,terminal]
+----
+$ oc rollout status deploy external-dns-${DOMAIN} --timeout=300s
+----
+
+[id="cloud-experts-external-dns-deploy-a-sample-application"]
+== Deploying a sample application
+
+Now that the `ExternalDNS` controller is running, you can deploy a sample application to confirm that the custom domain is configured and trusted when you expose a new route.
+
+. Create a new project for your sample application:
++
+[source,terminal]
+----
+$ oc new-project hello-world
+----
++
+. Deploy a hello world application:
++
+[source,terminal]
+----
+$ oc new-app -n hello-world --image=docker.io/openshift/hello-openshift
+----
++
+. Create a route for the application specifying your custom domain name:
++
+[source,terminal]
+----
+$ oc -n hello-world create route edge --service=hello-openshift hello-openshift-tls \
+--hostname hello-openshift.${DOMAIN}
+----
+. Check if the DNS record was created automatically by ExternalDNS:
++
+[NOTE]
+====
+It can take a few minutes for the record to appear in Amazon Route 53.
+====
++
+[source,terminal]
+----
+$ aws route53 list-resource-record-sets --hosted-zone-id ${ZONE_ID} \
+ --query "ResourceRecordSets[?Type == 'CNAME']" | grep hello-openshift
+----
+. Optional: You can also view the TXT records that indicate they were created by ExternalDNS:
++
+[source,terminal]
+----
+$ aws route53 list-resource-record-sets --hosted-zone-id ${ZONE_ID} \
+ --query "ResourceRecordSets[?Type == 'TXT']" | grep ${DOMAIN}
+----
+. Navigate to your custom console domain in the browser where you see the OpenShift login:
++
+[source,terminal]
+----
+$ echo console.${DOMAIN}
+----
diff --git a/cloud_experts_tutorials/cloud-experts-getting-started/_attributes b/cloud_experts_tutorials/cloud-experts-getting-started/_attributes
new file mode 120000
index 000000000000..f27fd275ea6b
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-getting-started/_attributes
@@ -0,0 +1 @@
+../_attributes/
\ No newline at end of file
diff --git a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-accessing.adoc b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-accessing.adoc
new file mode 100644
index 000000000000..8bc000512c85
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-accessing.adoc
@@ -0,0 +1,80 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="cloud-experts-getting-started-accessing"]
+= Tutorial: Accessing your cluster
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: cloud-experts-getting-started-accessing
+
+toc::[]
+
+//rosaworkshop.io content metadata
+//Brought into ROSA product docs 2023-11-30
+
+You can connect to your cluster using the command line interface (CLI) or the console user interface (UI).
+
+== Accessing your cluster using the CLI
+
+To access the cluster using the CLI, you must have the `oc` CLI installed. If you are following the tutorials, you already installed the `oc` CLI.
+
+. Log in to the link:https://console.redhat.com/openshift[Red Hat console].
+. Click your username in the top right corner.
+. Click *Copy Login Command*.
++
+image::cloud-experts-getting-started-accessing-copy-login.png[]
+
+. This opens a new tab with a choice of identity providers (IDPs). Click the IDP you want to use. For example, "rosa-github".
++
+image::cloud-experts-getting-started-accessing-copy-token.png[]
+
+. A new tab opens. Click *Display token*.
+
+. Run the following command in your terminal:
++
+[source,terminal]
+----
+$ oc login --token=sha256~GBAfS4JQ0t1UTKYHbWAK6OUWGUkdMGz000000000000 --server=https://api.my-rosa-cluster.abcd.p1.openshiftapps.com:6443
+----
++
+.Example output
++
+[source,terminal]
+----
+Logged into "https://api.my-rosa-cluster.abcd.p1.openshiftapps.com:6443" as "rosa-user" using the token provided.
+
+You have access to 79 projects, the list has been suppressed. You can list all projects with ' projects'
+
+Using project "default".
+----
+
+. Confirm that you are logged in by running the following command:
++
+[source,terminal]
+----
+$ oc whoami
+----
++
+.Example output
++
+[source,terminal]
+----
+rosa-user
+----
+
+. You can now access your cluster.
+
+== Accessing the cluster via the web console
+. Log in to the link:https://console.redhat.com/openshift/[Red Hat console].
+.. To retrieve the console URL run:
++
+[source,terminal]
+----
+rosa describe cluster -c | grep Console
+----
+
+. Click your IDP. For example, "rosa-github".
++
+image::cloud-experts-getting-started-accessing-copy-token.png[]
+
+. Enter your user credentials.
+. You should be logged in. If you are following the tutorials, you will be a cluster-admin and should see a web console with the *Administrator* panel visible.
++
+image::cloud-experts-getting-started-accessing-logged.png[]
\ No newline at end of file
diff --git a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-admin-rights.adoc b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-admin-rights.adoc
new file mode 100644
index 000000000000..89d68e5125be
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-admin-rights.adoc
@@ -0,0 +1,80 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="cloud-experts-getting-started-admin-rights"]
+= Tutorial: Granting admin privileges
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: cloud-experts-getting-started-admin-rights
+
+toc::[]
+
+//rosaworkshop.io content metadata
+//Brought into ROSA product docs 2023-11-30
+
+Administration (admin) privileges are not automatically granted to users that you add to your cluster. If you want to grant admin-level privileges to certain users, you will need to manually grant them to each user. You can grant admin privileges from either the ROSA command line interface (CLI) or the Red Hat OpenShift Cluster Manager web user interface (UI).
+
+Red Hat offers two types of admin privileges:
+
+* `cluster-admin`: `cluster-admin` privileges give the admin user full privileges within the cluster.
+
+* `dedicated-admin`: `dedicated-admin` privileges allow the admin user to complete most administrative tasks with certain limitations to prevent cluster damage. It is best practice to use `dedicated-admin` when elevated privileges are needed.
+
+For more information on admin privileges, see the xref:../../rosa_install_access_delete_clusters/rosa-sts-accessing-cluster.adoc#rosa-create-cluster-admins_rosa-sts-accessing-cluster[administering a cluster] documentation.
+
+== Using the ROSA CLI
+
+. Assuming you are the user who created the cluster, run one of the following commands to grant admin privileges:
++
+* For `cluster-admin`:
++
+[source,terminal]
+----
+$ rosa grant user cluster-admin --user --cluster=
+----
++
+* For `dedicated-admin`:
++
+[source,terminal]
+----
+$ rosa grant user dedicated-admin --user --cluster=
+----
+
+. Verify that the admin privileges were added by running the following command:
++
+[source,terminal]
+----
+$ rosa list users --cluster=
+----
++
+.Example output
++
+[source,terminal]
+----
+$ rosa list users --cluster=my-rosa-cluster
+ID GROUPS
+ cluster-admins
+----
+
+. If you are currently logged into the Red Hat console, log out of the console and log back in to the cluster to see a new perspective with the "Administrator Panel". You might need an incognito or private window.
++
+image:cloud-experts-getting-started-admin-rights-admin-panel.png[]
+
+. You can also test that admin privileges were added to your account by running the following command. Only a `cluster-admin` users can run this command without errors.
++
+[source,terminal]
+----
+$ oc get all -n openshift-apiserver
+----
+
+== Using the Red Hat OpenShift Cluster Manager UI
+
+. Log in to the link:https://console.redhat.com/openshift[Red Hat OpenShift Cluster Manager console].
+. Select your cluster.
+. Click the *Access Control* tab.
+. Click the *Cluster roles and Access* tab in the sidebar.
+. Click *Add user*.
++
+image::cloud-experts-getting-started-admin-rights-access-control.png[]
+
+. On the pop-up screen, enter the user ID.
+. Select whether you want to grant the user `cluster-admins` or `dedicated-admins` privileges.
++
+image::cloud-experts-getting-started-admin-rights-add-user2.png[]
\ No newline at end of file
diff --git a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-admin.adoc b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-admin.adoc
new file mode 100644
index 000000000000..4291e29b29d6
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-admin.adoc
@@ -0,0 +1,83 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="cloud-experts-getting-started-admin"]
+= Tutorial: Creating an admin user
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: cloud-experts-getting-started-admin
+
+toc::[]
+
+//rosaworkshop.io content metadata
+//Brought into ROSA product docs 2023-11-27
+
+Creating an administration (admin) user allows you to access your cluster quickly. Follow these steps to create an admin user.
+
+[NOTE]
+====
+An admin user works well in this tutorial setting. For actual deployment, use a xref:../../authentication/sd-configuring-identity-providers.adoc#sd-configuring-identity-providers[formal identity provider] to access the cluster and grant the user admin privileges.
+====
+
+. Run the following command to create the admin user:
++
+[source,terminal]
+----
+rosa create admin --cluster=
+----
++
+.Example output
++
+[source,terminal]
+----
+W: It is recommended to add an identity provider to login to this cluster. See 'rosa create idp --help' for more information.
+I: Admin account has been added to cluster 'my-rosa-cluster'. It may take up to a minute for the account to become active.
+I: To login, run the following command:
+oc login https://api.my-rosa-cluster.abcd.p1.openshiftapps.com:6443 \
+--username cluster-admin \
+--password FWGYL-2mkJI-00000-00000
+----
+
+. Copy the log in command returned to you in the previous step and paste it into your terminal. This will log you in to the cluster using the CLI so you can start using the cluster.
++
+[source,terminal]
+----
+$ oc login https://api.my-rosa-cluster.abcd.p1.openshiftapps.com:6443 \
+> --username cluster-admin \
+> --password FWGYL-2mkJI-00000-00000
+----
++
+.Example output
++
+[source,terminal]
+----
+Login successful.
+
+You have access to 79 projects, the list has been suppressed. You can list all projects with ' projects'
+
+Using project "default".
+----
+
+. To check that you are logged in as the admin user, run one of the following commands:
++
+* Option 1:
++
+[source,terminal]
+----
+$ oc whoami
+----
++
+.Example output
++
+[source,terminal]
+----
+cluster-admin
+----
++
+* Option 2:
++
+[source,terminal]
+----
+oc get all -n openshift-apiserver
+----
++
+Only an admin user can run this command without errors.
+
+. You can now use the cluster as an admin user, which will suffice for this tutorial. For actual deployment, it is highly recommended to set up an identity provider, which is explained in the xref:../../cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-idp.adoc#cloud-experts-getting-started-idp[next tutorial].
\ No newline at end of file
diff --git a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-autoscaling.adoc b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-autoscaling.adoc
new file mode 100644
index 000000000000..850d1a546234
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-autoscaling.adoc
@@ -0,0 +1,85 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="cloud-experts-getting-started-autoscaling"]
+= Tutorial: Autoscaling
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: cloud-experts-getting-started-autoscaling
+
+toc::[]
+
+//rosaworkshop.io content metadata
+//Brought into ROSA product docs 2024-01-04
+
+The xref:../../rosa_cluster_admin/rosa_nodes/rosa-nodes-about-autoscaling-nodes.adoc#rosa-nodes-about-autoscaling-nodes[cluster autoscaler] adds or removes worker nodes from a cluster based on pod resources.
+
+The cluster autoscaler increases the size of the cluster when:
+
+* Pods fail to schedule on the current nodes due to insufficient resources.
+* Another node is necessary to meet deployment needs.
+
+The cluster autoscaler does not increase the cluster resources beyond the limits that you specify.
+
+The cluster autoscaler decreases the size of the cluster when:
+
+* Some nodes are consistently not needed for a significant period. For example, when a node has low resource use and all of its important pods can fit on other nodes.
+
+== Enabling autoscaling for an existing machine pool using the CLI
+
+[NOTE]
+====
+Cluster autoscaling can be enabled at cluster creation and when creating a new machine pool by using the `--enable-autoscaling` option.
+====
+
+. Autoscaling is set based on machine pool availability. To find out which machine pools are available for autoscaling, run the following command:
++
+[source,terminal]
+----
+$ rosa list machinepools -c
+----
++
+.Example output
++
+[source,terminal]
+----
+ID AUTOSCALING REPLICAS INSTANCE TYPE LABELS TAINTS AVAILABILITY ZONES
+Default No 2 m5.xlarge us-east-1a
+----
+
+. Run the following command to add autoscaling to an available machine pool:
++
+[source,terminal]
+----
+$ rosa edit machinepool -c --enable-autoscaling --min-replicas= --max-replicas=
+----
++
+.Example input
++
+[source,terminal]
+----
+$ rosa edit machinepool -c my-rosa-cluster --enable-autoscaling Default --min-replicas=2 --max-replicas=4
+----
++
+The above command creates an autoscaler for the worker nodes that scales between 2 and 4 nodes depending on the resources.
+
+== Enabling autoscaling for an existing machine pool using the UI
+
+[NOTE]
+====
+Cluster autoscaling can be enabled at cluster creation by checking the *Enable autoscaling* checkbox when creating machine pools.
+====
+
+. Go to the *Machine pools* tab and click the three dots in the right..
+. Click *Scale*, then *Enable autoscaling*.
+. Run the following command to confirm that autoscaling was added:
++
+[source,terminal]
+----
+$ rosa list machinepools -c
+----
++
+.Example output
++
+[source,terminal]
+----
+ID AUTOSCALING REPLICAS INSTANCE TYPE LABELS TAINTS AVAILABILITY ZONES
+Default Yes 2-4 m5.xlarge us-east-1a
+----
\ No newline at end of file
diff --git a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deleting.adoc b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deleting.adoc
new file mode 100644
index 000000000000..5d224f59dce5
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deleting.adoc
@@ -0,0 +1,91 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="cloud-experts-getting-started-deleting"]
+= Tutorial: Deleting your cluster
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: cloud-experts-getting-started-deleting
+
+toc::[]
+
+//rosaworkshop.io content metadata
+//Brought into ROSA product docs 2024-01-11
+
+You can delete your {product-title} (ROSA) cluster using either the command line interface (CLI) or the user interface (UI).
+
+== Deleting a ROSA cluster using the CLI
+
+. *Optional:* List your clusters to make sure you are deleting the correct one by running the following command:
++
+[source,terminal]
+----
+$ rosa list clusters
+----
+
+. Delete a cluster by running the following command:
++
+[source,terminal]
+----
+$ rosa delete cluster --cluster
+----
++
+[WARNING]
+====
+This command is non-recoverable.
+====
+
+. The CLI prompts you to confirm that you want to delete the cluster. Press *y* and then *Enter*. The cluster and all its associated infrastructure will be deleted.
++
+[NOTE]
+====
+All AWS STS and IAM roles and policies will remain and must be deleted manually once the cluster deletion is complete by following the steps below.
+====
+
+. The CLI outputs the commands to delete the OpenID Connect (OIDC) provider and Operator IAM roles resources that were created. Wait until the cluster finishes deleting before deleting these resources. Perform a quick status check by running the following command:
++
+[source,terminal]
+----
+$ rosa list clusters
+----
+
+. Once the cluster is deleted, delete the OIDC provider by running the following command:
++
+[source,terminal]
+----
+$ rosa delete oidc-provider -c --mode auto --yes
+----
+
+. Delete the Operator IAM roles by running the following command:
++
+[source,terminal]
+----
+$ rosa delete operator-roles -c --mode auto --yes
+----
++
+[NOTE]
+====
+This command requires the cluster ID and not the cluster name.
+====
+
+. Only remove the remaining account roles if they are no longer needed by other clusters in the same account. If you want to create other ROSA clusters in this account, do not perform this step.
++
+To delete the account roles, you need to know the prefix used when creating them. The default is "ManagedOpenShift" unless you specified otherwise.
++
+Delete the account roles by running the following command:
++
+[source,terminal]
+----
+$ rosa delete account-roles --prefix --mode auto --yes
+----
+
+== Deleting a ROSA cluster using the UI
+
+. Log in to the Red Hat OpenShift Cluster Manager, and locate the cluster you want to delete.
+
+. Click the three dots to the right of the cluster.
++
+image::cloud-experts-getting-started-deleting1.png[]
+
+. In the dropdown menu, click *Delete cluster*.
++
+image::cloud-experts-getting-started-deleting2.png[]
+
+. Enter the name of the cluster to confirm deletion, and click *Delete*.
\ No newline at end of file
diff --git a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/_attributes b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/_attributes
new file mode 120000
index 000000000000..f27fd275ea6b
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/_attributes
@@ -0,0 +1 @@
+../_attributes/
\ No newline at end of file
diff --git a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/cloud-experts-getting-started-choose-deployment-method.adoc b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/cloud-experts-getting-started-choose-deployment-method.adoc
new file mode 100644
index 000000000000..07f038e7d22f
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/cloud-experts-getting-started-choose-deployment-method.adoc
@@ -0,0 +1,24 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="cloud-experts-getting-started-choose-deployment-method"]
+= Tutorial: Choosing a deployment method
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: cloud-experts-getting-started-choose-deployment-method
+
+toc::[]
+
+//rosaworkshop.io content metadata
+//Brought into ROSA product docs 2023-11-16
+
+There are a few different ways to deploy a cluster, and this tutorial outlines each method. You only need to choose one based on your preferences and needs. Use the decision tree below to find the deployment method that best fits your situation.
+
+== Deployment options
+
+For those who are thinking:
+
+* "Just tell me the commands I need to run!" - xref:../../cloud-experts-getting-started/cloud-experts-getting-started-deploying/cloud-experts-getting-started-simple-cli-guide.adoc#cloud-experts-getting-started-simple-cli-guide[Simple CLI guide]
+* "I do not like CLI tools. Give me a user interface!" - xref:../../cloud-experts-getting-started/cloud-experts-getting-started-deploying/cloud-experts-getting-started-simple-ui-guide.adoc#cloud-experts-getting-started-simple-ui-guide[Simple UI guide]
+* "I need details and want to use a CLI!" - xref:../../cloud-experts-getting-started/cloud-experts-getting-started-deploying/cloud-experts-getting-started-detailed-cli-guide.adoc#cloud-experts-getting-started-detailed-cli-guide[Detailed CLI guide]
+* "I need details and want to use a user interface!" xref:../../cloud-experts-getting-started/cloud-experts-getting-started-deploying/cloud-experts-getting-started-detailed-ui.adoc#cloud-experts-getting-started-detailed-ui[Detailed UI guide]
+* "I want to experiment with the newest technologies." - xref:../../cloud-experts-getting-started/cloud-experts-getting-started-deploying/cloud-experts-getting-started-hcp.adoc#cloud-experts-getting-started-detailed-ui-guide[ROSA with HCP]
+
+All of the above deployment options work well for this workshop. If you are doing this workshop for the first time, the xref:../../cloud-experts-getting-started/cloud-experts-getting-started-deploying/cloud-experts-getting-started-simple-cli-guide.adoc#cloud-experts-getting-started-simple-cli-guide[Simple CLI guide] is the simplest and recommended method.
\ No newline at end of file
diff --git a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/cloud-experts-getting-started-detailed-cli-guide.adoc b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/cloud-experts-getting-started-detailed-cli-guide.adoc
new file mode 100644
index 000000000000..eb903c3868ec
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/cloud-experts-getting-started-detailed-cli-guide.adoc
@@ -0,0 +1,428 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="cloud-experts-getting-started-detailed-cli-guide"]
+= Tutorial: Detailed CLI guide
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: cloud-experts-getting-started-detailed-cli-guide
+
+toc::[]
+
+//rosaworkshop.io content metadata
+//Brought into ROSA product docs 2023-11-17
+
+This tutorial outlines the detailed steps to deploy a ROSA cluster using the ROSA CLI.
+
+//For a guide to deploying with a user interface, see the *xref needed*[Detailed user interface] page.
+
+== CLI deployment modes
+There are two modes with which to deploy a ROSA cluster. One is automatic, which is quicker and performs the manual work for you. The other is manual, requires you to run extra commands, and allows you to inspect the roles and policies being created. This tutorial documents both options.
+
+If you want to create a cluster quickly, use the automatic option. If you prefer exploring the roles and policies being created, use the manual option.
+
+Choose the deployment mode by using the `--mode` flag in the relevant commands.
+
+Valid options for `--mode` are:
+
+* *`manual`:* Role and policies are created and saved in the current directory. You must manually run the provided commands as the next step. This option allows you to review the policy and roles before creating them.
+* *`auto`:* Roles and policies are created and applied automatically using the current AWS account.
+
+[TIP]
+====
+You can use either deployment method for this tutorial. The `auto` mode is faster and has less steps.
+====
+
+== Deployment workflow
+The overall deployment workflow follows these steps:
+
+. `rosa create account-roles` - This is executed only _once_ for each account. Once created, the account roles do *not* need to be created again for more clusters of the same y-stream version.
+. `rosa create cluster`
+. `rosa create operator-roles` - For manual mode only.
+. `rosa create oidc-provider` - For manual mode only.
+
+For each additional cluster in the same account for the same y-stream version, only step 2 is needed for automatic mode. Steps 2 through 4 are needed for manual mode.
+
+== Automatic mode
+Use this method if you want the ROSA CLI to automate the creation of the roles and policies to create your cluster quickly.
+
+=== Creating account roles
+If this is the _first time_ you are deploying ROSA in this account and you have _not_ yet created the account roles, then create the account-wide roles and policies, including Operator policies.
+
+Run the following command to create the account-wide roles:
+[source,terminal]
+----
+rosa create account-roles --mode auto --yes
+----
+
+.Example output
+[source,terminal]
+----
+I: Creating roles using 'arn:aws:iam::000000000000:user/rosa-user'
+I: Created role 'ManagedOpenShift-ControlPlane-Role' with ARN 'arn:aws:iam::000000000000:role/ManagedOpenShift-ControlPlane-Role'
+I: Created role 'ManagedOpenShift-Worker-Role' with ARN 'arn:aws:iam::000000000000:role/ManagedOpenShift-Worker-Role'
+I: Created role 'ManagedOpenShift-Support-Role' with ARN 'arn:aws:iam::000000000000:role/ManagedOpenShift-Support-Role'
+I: Created role 'ManagedOpenShift-Installer-Role' with ARN 'arn:aws:iam::000000000000:role/ManagedOpenShift-Installer-Role'
+I: Created policy with ARN 'arn:aws:iam::000000000000:policy/ManagedOpenShift-openshift-machine-api-aws-cloud-credentials'
+I: Created policy with ARN 'arn:aws:iam::000000000000:policy/ManagedOpenShift-openshift-cloud-credential-operator-cloud-crede'
+I: Created policy with ARN 'arn:aws:iam::000000000000:policy/ManagedOpenShift-openshift-image-registry-installer-cloud-creden'
+I: Created policy with ARN 'arn:aws:iam::000000000000:policy/ManagedOpenShift-openshift-ingress-operator-cloud-credentials'
+I: Created policy with ARN 'arn:aws:iam::000000000000:policy/ManagedOpenShift-openshift-cluster-csi-drivers-ebs-cloud-credent'
+I: To create a cluster with these roles, run the following command:
+ rosa create cluster --sts
+----
+
+=== Creating a cluster
+Run the following command to create a cluster with all the default options:
+[source,terminal]
+----
+rosa create cluster --cluster-name --sts --mode auto --yes
+----
+
+[NOTE]
+====
+This will also create the required Operator roles and OIDC provider. If you want to see all available options for your cluster use the `--help` flag or `--interactive` for interactive mode.
+====
+
+.Example input
+[source,terminal]
+----
+$ rosa create cluster --cluster-name my-rosa-cluster --sts --mode auto --yes
+----
+
+.Example output
+[source,terminal]
+----
+I: Creating cluster 'my-rosa-cluster'
+I: To view a list of clusters and their status, run 'rosa list clusters'
+I: Cluster 'my-rosa-cluster' has been created.
+I: Once the cluster is installed you will need to add an Identity Provider before you can login into the cluster. See 'rosa create idp --help' for more information.
+I: To determine when your cluster is Ready, run 'rosa describe cluster -c my-rosa-cluster'.
+I: To watch your cluster installation logs, run 'rosa logs install -c my-rosa-cluster --watch'.
+Name: my-rosa-cluster
+ID: 1mlhulb3bo0l54ojd0ji000000000000
+External ID:
+OpenShift Version:
+Channel Group: stable
+DNS: my-rosa-cluster.ibhp.p1.openshiftapps.com
+AWS Account: 000000000000
+API URL:
+Console URL:
+Region: us-west-2
+Multi-AZ: false
+Nodes:
+- Master: 3
+- Infra: 2
+- Compute: 2
+Network:
+- Service CIDR: 172.30.0.0/16
+- Machine CIDR: 10.0.0.0/16
+- Pod CIDR: 10.128.0.0/14
+- Host Prefix: /23
+STS Role ARN: arn:aws:iam::000000000000:role/ManagedOpenShift-Installer-Role
+Support Role ARN: arn:aws:iam::000000000000:role/ManagedOpenShift-Support-Role
+Instance IAM Roles:
+- Master: arn:aws:iam::000000000000:role/ManagedOpenShift-ControlPlane-Role
+- Worker: arn:aws:iam::000000000000:role/ManagedOpenShift-Worker-Role
+Operator IAM Roles:
+- arn:aws:iam::000000000000:role/my-rosa-cluster-openshift-image-registry-installer-cloud-credentials
+- arn:aws:iam::000000000000:role/my-rosa-cluster-openshift-ingress-operator-cloud-credentials
+- arn:aws:iam::000000000000:role/my-rosa-cluster-openshift-cluster-csi-drivers-ebs-cloud-credentials
+- arn:aws:iam::000000000000:role/my-rosa-cluster-openshift-machine-api-aws-cloud-credentials
+- arn:aws:iam::000000000000:role/my-rosa-cluster-openshift-cloud-credential-operator-cloud-credential-oper
+State: waiting (Waiting for OIDC configuration)
+Private: No
+Created: Oct 28 2021 20:28:09 UTC
+Details Page: https://console.redhat.com/openshift/details/s/1wupmiQy45xr1nN000000000000
+OIDC Endpoint URL: https://rh-oidc.s3.us-east-1.amazonaws.com/1mlhulb3bo0l54ojd0ji000000000000
+----
+
+==== Default configuration
+The default settings are as follows:
+
+* Nodes:
+** 3 control plane nodes
+** 2 infrastructure nodes
+** 2 worker nodes
+** No autoscaling
+** See the documentation on xref:../../../rosa_planning/rosa-sts-aws-prereqs.adoc#rosa-ec2-instances_rosa-sts-aws-prereqs[ec2 instances] for more details.
+* Region: As configured for the `aws` CLI
+* Networking IP ranges:
+** Machine CIDR: 10.0.0.0/16
+** Service CIDR: 172.30.0.0/16
+** Pod CIDR: 10.128.0.0/14
+* New VPC
+* Default AWS KMS key for encryption
+* The most recent version of OpenShift available to `rosa`
+* A single availability zone
+* Public cluster
+
+=== Checking the installation status
+. Run one of the following commands to check the status of your cluster:
++
+* For a detailed view of the status, run:
++
+[source,terminal]
+----
+rosa describe cluster --cluster
+----
+* For an abridged view of the status, run:
++
+[source,terminal]
+----
+rosa list clusters
+----
+
+. The cluster state will change from “waiting” to “installing” to "ready". This will take about 40 minutes.
+
+. Once the state changes to “ready” your cluster is installed.
+
+== Manual Mode
+If you want to review the roles and policies before applying them to a cluster, use the manual method. This method requires running a few extra commands to create the roles and policies.
+
+This section uses the `--interactive` mode. See the documentation on xref:../../../rosa_install_access_delete_clusters/rosa-sts-interactive-mode-reference.adoc#rosa-sts-interactive-mode-reference[interactive mode] for a description of the fields in this section.
+
+=== Creating account roles
+. If this is the _first time_ you are deploying ROSA in this account and you have _not_ yet created the account roles, create the account-wide roles and policies, including the Operator policies. The command creates the needed JSON files for the required roles and policies for your account in the current directory. It also outputs the `aws` CLI commands that you need to run to create these objects.
++
+Run the following command to create the needed files and output the additional commands:
++
+[source,terminal]
+----
+rosa create account-roles --mode manual
+----
++
+.Example output
+[source,terminal]
+----
+I: All policy files saved to the current directory
+I: Run the following commands to create the account roles and policies:
+aws iam create-role \
+--role-name ManagedOpenShift-Worker-Role \
+--assume-role-policy-document file://sts_instance_worker_trust_policy.json \
+--tags Key=rosa_openshift_version,Value=4.8 Key=rosa_role_prefix,Value=ManagedOpenShift Key=rosa_role_type,Value=instance_worker
+aws iam put-role-policy \
+--role-name ManagedOpenShift-Worker-Role \
+--policy-name ManagedOpenShift-Worker-Role-Policy \
+--policy-document file://sts_instance_worker_permission_policy.json
+----
+
+. Check the contents of your current directory to see the new files. Use the `aws` CLI to create each of these objects.
++
+.Example output
+[source,terminal]
+----
+$ ls
+openshift_cloud_credential_operator_cloud_credential_operator_iam_ro_creds_policy.json
+sts_instance_controlplane_permission_policy.json
+openshift_cluster_csi_drivers_ebs_cloud_credentials_policy.json sts_instance_controlplane_trust_policy.json
+openshift_image_registry_installer_cloud_credentials_policy.json sts_instance_worker_permission_policy.json
+openshift_ingress_operator_cloud_credentials_policy.json sts_instance_worker_trust_policy.json
+openshift_machine_api_aws_cloud_credentials_policy.json sts_support_permission_policy.json
+sts_installer_permission_policy.json sts_support_trust_policy.json
+sts_installer_trust_policy.json
+----
+
+. *Optional:* Open the files to review what you will create. For example, opening the `sts_installer_permission_policy.json` shows:
++
+.Example output
++
+[source,terminal]
+----
+$ cat sts_installer_permission_policy.json
+ {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "autoscaling:DescribeAutoScalingGroups",
+ "ec2:AllocateAddress",
+ "ec2:AssociateAddress",
+ "ec2:AssociateDhcpOptions",
+ "ec2:AssociateRouteTable",
+ "ec2:AttachInternetGateway",
+ "ec2:AttachNetworkInterface",
+ "ec2:AuthorizeSecurityGroupEgress",
+ "ec2:AuthorizeSecurityGroupIngress",
+ [...]
+----
++
+You can also see the contents in the xref:../../../rosa_architecture/rosa-sts-about-iam-resources.adoc#rosa-sts-account-wide-roles-and-policies-creation-methods_rosa-sts-about-iam-resources[About IAM resources for ROSA clusters] documentation.
+
+. Run the `aws` commands listed in step 1. You can copy and paste if you are in the same directory as the JSON files you created.
+
+=== Creating a cluster
+. After the `aws` commands are executed successfully, run the following command to begin ROSA cluster creation in interactive mode:
++
+[source,terminal]
+----
+rosa create cluster --interactive --sts
+----
++
+See the xref:../../../rosa_install_access_delete_clusters/rosa-sts-interactive-mode-reference.adoc#rosa-sts-interactive-mode-reference[ROSA documentation] for a description of the fields.
+
+. For the purpose of this tutorial, copy and then input the following values:
++
+[source,terminal]
+----
+Cluster name: **my-rosa-cluster**
+OpenShift version: **<choose version>**
+External ID (optional): **<leave blank>**
+Operator roles prefix: **<accept default>**
+Multiple availability zones: **No**
+AWS region: **<choose region>**
+PrivateLink cluster: **No**
+Install into an existing VPC: **No**
+Enable Customer Managed key: **No**
+Compute nodes instance type: **m5.xlarge**
+Enable autoscaling: **No**
+Compute nodes: **2**
+Machine CIDR: **<accept default>**
+Service CIDR: **<accept default>**
+Pod CIDR: **<accept default>**
+Host prefix: **<accept default>**
+Encrypt etcd data (optional): **No**
+Disable Workload monitoring: **No**
+----
++
+.Example output
++
+[source,terminal]
+----
+I: Creating cluster 'my-rosa-cluster'
+I: To create this cluster again in the future, you can run:
+rosa create cluster --cluster-name my-rosa-cluster --role-arn arn:aws:iam::000000000000:role/ManagedOpenShift-Installer-Role --support-role-arn arn:aws:iam::000000000000:role/ManagedOpenShift-Support-Role --master-iam-role arn:aws:iam::000000000000:role/ManagedOpenShift-ControlPlane-Role --worker-iam-role arn:aws:iam::000000000000:role/ManagedOpenShift-Worker-Role --operator-roles-prefix my-rosa-cluster --region us-west-2 --version 4.8.13 --compute-nodes 2 --machine-cidr 10.0.0.0/16 --service-cidr 172.30.0.0/16 --pod-cidr 10.128.0.0/14 --host-prefix 23
+I: To view a list of clusters and their status, run 'rosa list clusters'
+I: Cluster 'my-rosa-cluster' has been created.
+I: Once the cluster is installed you will need to add an Identity Provider before you can login into the cluster. See 'rosa create idp --help' for more information.
+Name: my-rosa-cluster
+ID: 1t6i760dbum4mqltqh6o000000000000
+External ID:
+OpenShift Version:
+Channel Group: stable
+DNS: my-rosa-cluster.abcd.p1.openshiftapps.com
+AWS Account: 000000000000
+API URL:
+Console URL:
+Region: us-west-2
+Multi-AZ: false
+Nodes:
+- Control plane: 3
+- Infra: 2
+- Compute: 2
+Network:
+- Service CIDR: 172.30.0.0/16
+- Machine CIDR: 10.0.0.0/16
+- Pod CIDR: 10.128.0.0/14
+- Host Prefix: /23
+STS Role ARN: arn:aws:iam::000000000000:role/ManagedOpenShift-Installer-Role
+Support Role ARN: arn:aws:iam::000000000000:role/ManagedOpenShift-Support-Role
+Instance IAM Roles:
+- Control plane: arn:aws:iam::000000000000:role/ManagedOpenShift-ControlPlane-Role
+- Worker: arn:aws:iam::000000000000:role/ManagedOpenShift-Worker-Role
+Operator IAM Roles:
+- arn:aws:iam::000000000000:role/my-rosa-cluster-w7i6-openshift-ingress-operator-cloud-credentials
+- arn:aws:iam::000000000000:role/my-rosa-cluster-w7i6-openshift-cluster-csi-drivers-ebs-cloud-credentials
+- arn:aws:iam::000000000000:role/my-rosa-cluster-w7i6-openshift-cloud-network-config-controller-cloud-cre
+- arn:aws:iam::000000000000:role/my-rosa-cluster-openshift-machine-api-aws-cloud-credentials
+- arn:aws:iam::000000000000:role/my-rosa-cluster-openshift-cloud-credential-operator-cloud-credentia
+- arn:aws:iam::000000000000:role/my-rosa-cluster-openshift-image-registry-installer-cloud-credential
+State: waiting (Waiting for OIDC configuration)
+Private: No
+Created: Jul 1 2022 22:13:50 UTC
+Details Page: https://console.redhat.com/openshift/details/s/2BMQm8xz8Hq5yEN000000000000
+OIDC Endpoint URL: https://rh-oidc.s3.us-east-1.amazonaws.com/1t6i760dbum4mqltqh6o000000000000
+I: Run the following commands to continue the cluster creation:
+rosa create operator-roles --cluster my-rosa-cluster
+rosa create oidc-provider --cluster my-rosa-cluster
+I: To determine when your cluster is Ready, run 'rosa describe cluster -c my-rosa-cluster'.
+I: To watch your cluster installation logs, run 'rosa logs install -c my-rosa-cluster --watch'.
+----
++
+[NOTE]
+====
+The cluster state will remain as “waiting” until the next two steps are completed.
+====
+
+=== Creating Operator roles
+
+. The above step outputs the next commands to run. These roles need to be created _once_ for _each_ cluster. To create the roles run the following command:
++
+[source,terminal]
+----
+rosa create operator-roles --mode manual --cluster
+----
++
+.Example output
++
+[source,terminal]
+----
+I: Run the following commands to create the operator roles:
+ aws iam create-role \
+ --role-name my-rosa-cluster-openshift-image-registry-installer-cloud-credentials \
+ --assume-role-policy-document file://operator_image_registry_installer_cloud_credentials_policy.json \
+ --tags Key=rosa_cluster_id,Value=1mkesci269png3tck000000000000000 Key=rosa_openshift_version,Value=4.8 Key=rosa_role_prefix,Value= Key=operator_namespace,Value=openshift-image-registry Key=operator_name,Value=installer-cloud-credentials
+
+ aws iam attach-role-policy \
+ --role-name my-rosa-cluster-openshift-image-registry-installer-cloud-credentials \
+ --policy-arn arn:aws:iam::000000000000:policy/ManagedOpenShift-openshift-image-registry-installer-cloud-creden
+ [...]
+----
+
+. Run each of the `aws` commands.
+
+=== Creating the OIDC provider
+. Run the following command to create the OIDC provider:
++
+[source,terminal]
+----
+rosa create oidc-provider --mode manual --cluster
+----
+
+. This displays the `aws` commands that you need to run.
++
+.Example output
++
+[source,terminal]
+----
+I: Run the following commands to create the OIDC provider:
+$ aws iam create-open-id-connect-provider \
+--url https://rh-oidc.s3.us-east-1.amazonaws.com/1mkesci269png3tckknhh0rfs2da5fj9 \
+--client-id-list openshift sts.amazonaws.com \
+--thumbprint-list a9d53002e97e00e043244f3d170d000000000000
+
+$ aws iam create-open-id-connect-provider \
+--url https://rh-oidc.s3.us-east-1.amazonaws.com/1mkesci269png3tckknhh0rfs2da5fj9 \
+--client-id-list openshift sts.amazonaws.com \
+--thumbprint-list a9d53002e97e00e043244f3d170d000000000000
+----
+
+. Your cluster will now continue the installation process.
+
+=== Checking the installation status
+. Run one of the following commands to check the status of your cluster:
++
+* For a detailed view of the status, run:
++
+[source,terminal]
+----
+rosa describe cluster --cluster
+----
+* For an abridged view of the status, run:
++
+[source,terminal]
+----
+rosa list clusters
+----
+
+. The cluster state will change from “waiting” to “installing” to "ready". This will take about 40 minutes.
+
+. Once the state changes to “ready” your cluster is installed.
+
+== Obtaining the console URL
+* To obtain the console URL, run the following command:
++
+[source,terminal]
+----
+rosa describe cluster -c | grep Console
+----
+
+The cluster has now been successfully deployed. The next tutorial shows how to create an admin user to be able to use the cluster immediately.
\ No newline at end of file
diff --git a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/cloud-experts-getting-started-detailed-ui.adoc b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/cloud-experts-getting-started-detailed-ui.adoc
new file mode 100644
index 000000000000..fbaaa08b6aef
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/cloud-experts-getting-started-detailed-ui.adoc
@@ -0,0 +1,318 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="cloud-experts-getting-started-detailed-ui"]
+= Tutorial: Detailed UI guide
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: cloud-experts-getting-started-detailed-ui
+
+toc::[]
+
+//rosaworkshop.io content metadata
+//Brought into ROSA product docs 2023-11-20
+
+This tutorial outlines the detailed steps to deploy a {product-title} (ROSA) cluster using the Red Hat OpenShift Cluster Manager user interface (UI).
+
+== Deployment workflow
+The overall deployment workflow follows these steps:
+
+. Create the account wide roles and policies.
+. Associate your AWS account with your Red Hat account.
+.. Create and link the Red Hat OpenShift Cluster Manager role.
+.. Create and link the user role.
+. Create the cluster.
+
+Step 1 only needs to be performed the *first time* you are deploying into an AWS account. Step 2 only needs to be performed the *first time* you are using the UI. For successive clusters of the same y-stream version, you only need to create the cluster.
+
+== Creating account wide roles
+
+[NOTE]
+====
+If you already have account roles from an earlier deployment, skip this step. The UI will detect your existing roles after you select an associated AWS account.
+====
+
+If this is the _first time_ you are deploying ROSA in this account and you have _not_ yet created the account roles, create the account-wide roles and policies, including the Operator policies.
+
+* In your terminal, run the following command to create the account-wide roles:
++
+[source,terminal]
+----
+$ rosa create account-roles --mode auto --yes
+----
++
+.Example output
++
+[source,terminal]
+----
+I: Creating roles using 'arn:aws:iam::000000000000:user/rosa-user'
+I: Created role 'ManagedOpenShift-ControlPlane-Role' with ARN 'arn:aws:iam::000000000000:role/ManagedOpenShift-ControlPlane-Role'
+I: Created role 'ManagedOpenShift-Worker-Role' with ARN 'arn:aws:iam::000000000000:role/ManagedOpenShift-Worker-Role'
+I: Created role 'ManagedOpenShift-Support-Role' with ARN 'arn:aws:iam::000000000000:role/ManagedOpenShift-Support-Role'
+I: Created role 'ManagedOpenShift-Installer-Role' with ARN 'arn:aws:iam::000000000000:role/ManagedOpenShift-Installer-Role'
+I: Created policy with ARN 'arn:aws:iam::000000000000:policy/ManagedOpenShift-openshift-machine-api-aws-cloud-credentials'
+I: Created policy with ARN 'arn:aws:iam::000000000000:policy/ManagedOpenShift-openshift-cloud-credential-operator-cloud-crede'
+I: Created policy with ARN 'arn:aws:iam::000000000000:policy/ManagedOpenShift-openshift-image-registry-installer-cloud-creden'
+I: Created policy with ARN 'arn:aws:iam::000000000000:policy/ManagedOpenShift-openshift-ingress-operator-cloud-credentials'
+I: Created policy with ARN 'arn:aws:iam::000000000000:policy/ManagedOpenShift-openshift-cluster-csi-drivers-ebs-cloud-credent'
+I: To create a cluster with these roles, run the following command:
+rosa create cluster --sts
+----
+
+== Associating your AWS account with your Red Hat account
+This step tells the OpenShift Cluster Manager what AWS account you want to use when deploying ROSA.
+
+[NOTE]
+====
+If you have already associated your AWS accounts, skip this step.
+====
+
+. Open the OpenShift Cluster Manager by visiting the Red Hat link:https://console.redhat.com/openshift[console] and logging in to your Red Hat account.
+
+. Click *Create Cluster*.
+
+. Scroll down to the {product-title} (ROSA) row and click *Create Cluster*.
++
+image::cloud-experts-getting-started-rosa-deployment-detailed-ui-create.png[]
+
+. A dropdown menu appears. Click *With web interface*.
++
+image::cloud-experts-getting-started-rosa-deployment-detailed-ui-web-interface.png[]
+
+. Under "Select an AWS control plane type," choose *Classic*. Then click *Next*.
++
+image::cloud-experts-getting-started-rosa-deployment-detailed-ui-classic.png[]
+
+. Click the dropbox under *Associated AWS infrastructure account*. If you have not yet associated any AWS accounts, the dropbox may be empty.
+
+. Click *How to associate a new AWS account*.
++
+image::cloud-experts-getting-started-rosa-deployment-detailed-ui-associate.png[]
+
+. A sidebar appears with instructions for associating a new AWS account.
++
+image::cloud-experts-getting-started-rosa-deployment-detailed-ui-associate2.png[]
+
+== Creating and associating an OpenShift Cluster Manager role
+
+. Run the following command to see if an OpenShift Cluster Manager role exists:
++
+[source,terminal]
+----
+$ rosa list ocm-role
+----
+
+. The UI displays the commands to create an OpenShift Cluster Manager role with two different levels of permissions:
++
+* *Basic OpenShift Cluster Manager role:* Allows the OpenShift Cluster Manager to have read-only access to the account to check if the roles and policies that are required by ROSA are present before creating a cluster. You will need to manually create the required roles, policies, and OIDC provider using the CLI.
+* *Admin OpenShift Cluster Manager role:* Grants the OpenShift Cluster Manager additional permissions to create the required roles, policies, and OIDC provider for ROSA. Using this makes the deployment of a ROSA cluster quicker since the OpenShift Cluster Manager will be able to create the required resources for you.
++
+To read more about these roles, see the xref:../../../rosa_architecture/rosa-sts-about-iam-resources.adoc#rosa-sts-ocm-roles-and-permissions_rosa-sts-about-iam-resources[OpenShift Cluster Manager roles and permissions] section of the documentation.
++
+For the purposes of this tutorial, use the *Admin OpenShift Cluster Manager role* for the simplest and quickest approach.
+
+. Copy the command to create the Admin OpenShift Cluster Manager role from the sidebar or switch to your terminal and enter the following command:
++
+[source,terminal]
+----
+$ rosa create ocm-role --mode auto --admin --yes
+----
++
+This command creates the OpenShift Cluster Manager role and associates it with your Red Hat account.
++
+.Example output
++
+[source,terminal]
+----
+I: Creating ocm role
+I: Creating role using 'arn:aws:iam::000000000000:user/rosa-user'
+I: Created role 'ManagedOpenShift-OCM-Role-12561000' with ARN 'arn:aws:iam::000000000000:role/ManagedOpenShift-OCM-Role-12561000'
+I: Linking OCM role
+I: Successfully linked role-arn 'arn:aws:iam::000000000000:role/ManagedOpenShift-OCM-Role-12561000' with organization account '1MpZfntsZeUdjWHg7XRgP000000'
+----
+
+. Click *Step 2: User role*.
+
+=== Other OpenShift Cluster Manager role creation options
+* *Manual mode:* If you prefer to run the AWS CLI commands yourself, you can define the mode as `manual` rather than `auto`. The CLI will output the AWS commands and the relevant JSON files are created in the current directory.
++
+Use the following command to create the OpenShift Cluster Manager role in manual mode:
++
+[source,terminal]
+----
+$ rosa create ocm-role --mode manual --admin --yes
+----
+* *Basic OpenShift Cluster Manager role:* If you prefer that the OpenShift Cluster Manager has read only access to the account, create a basic OpenShift Cluster Manager role. You will then need to manually create the required roles, policies, and OIDC provider using the CLI.
++
+Use the following command to create a Basic OpenShift Cluster Manager role:
++
+[source,terminal]
+----
+$ rosa create ocm-role --mode auto --yes
+----
+
+== Creating an OpenShift Cluster Manager user role
+
+As defined in the xref:../../../rosa_architecture/rosa-sts-about-iam-resources.adoc#rosa-sts-understanding-user-role_rosa-sts-about-iam-resources[user role documentation], the user role needs to be created so that ROSA can verify your AWS identity. This role has no permissions, and it is only used to create a trust relationship between the installation program account and your OpenShift Cluster Manager role resources.
+
+. Check if a user role already exists by running the following command:
++
+[source,terminal]
+----
+$ rosa list user-role
+----
+
+. Run the following command to create the user role and to link it to your Red Hat account:
++
+[source,terminal]
+----
+$ rosa create user-role --mode auto --yes
+----
++
+.Example output
++
+[source,terminal]
+----
+I: Creating User role
+I: Creating ocm user role using 'arn:aws:iam::000000000000:user/rosa-user'
+I: Created role 'ManagedOpenShift-User-rosa-user-Role' with ARN 'arn:aws:iam::000000000000:role/ManagedOpenShift-User-rosa-user-Role'
+I: Linking User role
+I: Successfully linked role ARN 'arn:aws:iam::000000000000:role/ManagedOpenShift-User-rosa-user-Role' with account '1rbOQez0z5j1YolInhcXY000000'
+----
++
+[NOTE]
+====
+As before, you can define `--mode manual` if you'd prefer to run the AWS CLI commands yourself. The CLI outputs the AWS commands and the relevant JSON files are created in the current directory. Make sure to link the role.
+====
+
+. Click *Step 3: Account roles*.
+
+== Creating account roles
+. Create your account roles by running the following command:
++
+[source,terminal]
+----
+$ rosa create account-roles --mode auto
+----
+
+. Click *OK* to close the sidebar.
+
+== Confirming successful account association
+
+. You should now see your AWS account in the *Associated AWS infrastructure account* dropdown menu. If you see your account, account association was successful.
+
+. Select the account.
+
+. You will see the account role ARNs populated below.
++
+image::cloud-experts-getting-started-rosa-deployment-detailed-ui-account-roles.png[]
+
+. Click *Next*.
+
+== Creating the cluster
+
+. For the purposes of this tutorial make the following selections:
++
+.Cluster settings
++
+* Cluster name: ****
+* Version: ****
+* Region: ****
+* Availability: **Single zone**
+* Enable user workload monitoring: **leave checked**
+* Enable additional etcd encryption: **leave unchecked**
+* Encrypt persistent volumes with customer keys: **leave unchecked**
+
+. Click *Next*.
+
+. Leave the default settings on for the machine pool:
++
+.Default machine pool settings
++
+* Compute node instance type: **m5.xlarge - 4 vCPU 16 GiB RAM**
+* Enable autoscaling: **unchecked**
+* Compute node count: **2**
+* Leave node labels blank
+
+. Click *Next*.
+
+=== Networking
+
+. Leave all the default values for configuration.
+
+. Click *Next*.
+
+. Leave all the default values for CIDR ranges.
+
+. Click *Next*.
+
+=== Cluster roles and policies
+For this tutorial, leave *Auto* selected. It will make the cluster deployment process simpler and quicker.
+
+[NOTE]
+====
+If you selected a *Basic OpenShift Cluster Manager role* earlier, you can only use manual mode. You must manually create the operator roles and OIDC provider. See the "Basic OpenShift Cluster Manager role" section below after you have completed the "Cluster updates" section and started cluster creation.
+====
+
+=== Cluster updates
+* Leave all the options at default in this section.
+
+=== Reviewing and creating your cluster
+. Review the content for the cluster configuration.
+. Click *Create cluster*.
+
+=== Monitoring the installation progress
+* Stay on the current page to monitor the installation progress. It should take about 40 minutes.
++
+image::cloud-experts-getting-started-rosa-deployment-detailed-ui-cluster-create.png[]
+
+== Basic OpenShift Cluster Manager Role
+
+[NOTE]
+If you created an *Admin OpenShift Cluster Manager role* as directed above *ignore* this entire section. The OpenShift Cluster Manager will create the resources for you.
+
+If you created a *Basic OpenShift Cluster Manager role* earlier, you will need to manually create two more elements before cluster installation can continue:
+
+* Operator roles
+* OIDC provider
+
+//To understand what these do, please see the ROSA with AWS STS Explained tutorial section. xref needed
+
+=== Creating Operator roles
+. A pop up window will show you the commands to run.
++
+image::cloud-experts-getting-started-rosa-deployment-detailed-ui-create-cmds.png[]
+
+. Run the commands from the window in your terminal to launch interactive mode. Or, for simplicity, run the following command to create the Operator roles:
++
+[source,terminal]
+----
+$ rosa create operator-roles --mode auto --cluster --yes
+----
++
+.Example output
++
+[source,terminal]
+----
+I: Creating roles using 'arn:aws:iam::000000000000:user/rosauser'
+I: Created role 'rosacluster-b736-openshift-ingress-operator-cloud-credentials' with ARN 'arn:aws:iam::000000000000:role/rosacluster-b736-openshift-ingress-operator-cloud-credentials'
+I: Created role 'rosacluster-b736-openshift-cluster-csi-drivers-ebs-cloud-credent' with ARN 'arn:aws:iam::000000000000:role/rosacluster-b736-openshift-cluster-csi-drivers-ebs-cloud-credent'
+I: Created role 'rosacluster-b736-openshift-cloud-network-config-controller-cloud' with ARN 'arn:aws:iam::000000000000:role/rosacluster-b736-openshift-cloud-network-config-controller-cloud'
+I: Created role 'rosacluster-b736-openshift-machine-api-aws-cloud-credentials' with ARN 'arn:aws:iam::000000000000:role/rosacluster-b736-openshift-machine-api-aws-cloud-credentials'
+I: Created role 'rosacluster-b736-openshift-cloud-credential-operator-cloud-crede' with ARN 'arn:aws:iam::000000000000:role/rosacluster-b736-openshift-cloud-credential-operator-cloud-crede'
+I: Created role 'rosacluster-b736-openshift-image-registry-installer-cloud-creden' with ARN 'arn:aws:iam::000000000000:role/rosacluster-b736-openshift-image-registry-installer-cloud-creden'
+----
+
+=== Creating the OIDC provider
+
+* In your terminal, run the following command to create the OIDC provider:
++
+[source,terminal]
+----
+$ rosa create oidc-provider --mode auto --cluster --yes
+----
++
+.Example output
++
+[source,terminal]
+----
+I: Creating OIDC provider using 'arn:aws:iam::000000000000:user/rosauser'
+I: Created OIDC provider with ARN 'arn:aws:iam::000000000000:oidc-provider/rh-oidc.s3.us-east-1.amazonaws.com/1tt4kvrr2kha2rgs8gjfvf0000000000'
+----
\ No newline at end of file
diff --git a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/cloud-experts-getting-started-hcp.adoc b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/cloud-experts-getting-started-hcp.adoc
new file mode 100644
index 000000000000..b73acfbfb992
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/cloud-experts-getting-started-hcp.adoc
@@ -0,0 +1,140 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="cloud-experts-getting-started-detailed-ui-guide"]
+= Tutorial: Hosted Control Planes guide
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: cloud-experts-getting-started-hcp
+
+toc::[]
+
+//rosaworkshop.io content metadata
+//Brought into ROSA product docs 2023-11-21
+
+This tutorial outlines deploying a {hcp-title-first} cluster.
+
+With {hcp-title}, you can decouple the control plane from the data plane. This is a new deployment model for ROSA in which the control plane is hosted in a Red Hat-owned AWS account. The control plane is no longer hosted in your AWS account, reducing your AWS infrastructure expenses. The control plane is dedicated to a single cluster and is highly available. For more information, see the xref:../../../rosa_hcp/rosa-hcp-sts-creating-a-cluster-quickly.adoc#rosa-hcp-sts-creating-a-cluster-quickly[{hcp-title} documentation].
+
+== Prerequisites
+
+Before deploying a {hcp-title} cluster, you must have the following resources:
+
+* VPC - This is a bring-your-own VPC model, also referred to as BYO-VPC.
+* OIDC - OIDC configuration and an OIDC provider with that specific configuration.
+* ROSA version 1.2.31 or higher
+
+In this tutorial, we will create these resources first. We will also set up some environment variables so that it is easier to run the command to create the {hcp-title} cluster.
+
+=== Creating a VPC
+. First, ensure that your AWS CLI (`aws`) is configured to use a region where {hcp-title} is available. To find out which regions are supported run the following command:
++
+[source,terminal]
+----
+rosa list regions --hosted-cp
+----
+
+. Create the VPC. For this tutorial, the following script will create the VPC and its required components for you. It will use the region configured for the `aws` CLI.
++
+[source,terminal]
+----
+curl https://raw.githubusercontent.com/openshift-cs/rosaworkshop/master/rosa-workshop/rosa/resources/setup-vpc.sh | bash
+----
++
+For more about VPC requirements, see the xref:../../../rosa_planning/rosa-sts-aws-prereqs.adoc#rosa-vpc_rosa-sts-aws-prereqs[VPC documentation].
+
+. The above script outputs two commands. Set the commands as environment variables to make running the `create cluster` command easier. Copy them from the output and run them as shown:
++
+[source,terminal]
+----
+export PUBLIC_SUBNET_ID=
+export PRIVATE_SUBNET_ID=
+----
+
+. Confirm that the environment variables are set by running the following command:
++
+[source,terminal]
+----
+echo "Public Subnet: $PUBLIC_SUBNET_ID"; echo "Private Subnet: $PRIVATE_SUBNET_ID"
+----
++
+.Example output
++
+[source,terminal]
+----
+Public Subnet: subnet-0faeeeb0000000000
+Private Subnet: subnet-011fe340000000000
+----
+
+=== Creating your OIDC configuration
+
+In this tutorial, we will use the automatic mode when creating the OIDC configuration. We will also store the OIDC ID as an environment variable for later use. The command uses the ROSA CLI to create your cluster's unique OIDC configuration.
+
+* To create the OIDC configuration for this tutorial, run the following command:
++
+[source,terminal]
+----
+export OIDC_ID=$(rosa create oidc-config --mode auto --managed --yes -o json | jq -r '.id')
+----
+
+=== Creating additional environment variables
+
+* Run the following command to set up some environment variables so that it is easier to run the command to create the {hcp-title} cluster:
++
+[source,terminal]
+----
+export CLUSTER_NAME=
+export REGION=
+----
++
+[TIP]
+====
+Run `rosa whoami` to find the VPC region.
+====
+
+== Creating the cluster
+If this is the _first time_ you are deploying ROSA in this account and you have _not_ yet created the account roles, create the account-wide roles and policies, including the Operator policies. Since ROSA uses AWS Security Token Service (STS), this step creates the AWS IAM roles and policies that are needed for ROSA to interact with your account.
+
+. Run the following command to create the account-wide roles:
++
+[source,terminal]
+----
+rosa create account-roles --mode auto --yes
+----
+
+. Run the following command to create the cluster:
++
+[source,terminal]
+----
+rosa create cluster --cluster-name $CLUSTER_NAME \
+ --subnet-ids ${PUBLIC_SUBNET_ID},${PRIVATE_SUBNET_ID} \
+ --hosted-cp \
+ --region $REGION \
+ --oidc-config-id $OIDC_ID \
+ --sts --mode auto --yes
+----
+
+The cluster is ready and completely usable after about 10 minutes. The cluster will have a control plane across three AWS availability zones in your selected region and create two worker nodes in your AWS account.
+
+== Checking the installation status
+. Run one of the following commands to check the status of the cluster:
++
+* For a detailed view of the cluster status, run:
++
+[source,terminal]
+----
+rosa describe cluster --cluster $CLUSTER_NAME
+----
++
+* For an abridged view of the cluster status, run:
++
+[source,terminal]
+----
+rosa list clusters
+----
++
+* To watch the log as it progresses, run:
++
+[source,terminal]
+----
+rosa logs install --cluster $CLUSTER_NAME --watch
+----
+
+. Once the state changes to “ready” your cluster is installed. It might take a few more minutes for the worker nodes to come online.
\ No newline at end of file
diff --git a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/cloud-experts-getting-started-simple-cli-guide.adoc b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/cloud-experts-getting-started-simple-cli-guide.adoc
new file mode 100644
index 000000000000..d54db7a21cf0
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/cloud-experts-getting-started-simple-cli-guide.adoc
@@ -0,0 +1,45 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="cloud-experts-getting-started-simple-cli-guide"]
+= Tutorial: Simple CLI guide
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: cloud-experts-getting-started-simple-cli-guide
+
+toc::[]
+
+//rosaworkshop.io content metadata
+//Brought into ROSA product docs 2023-11-16
+
+This page outlines the minimum list of commands to deploy a {product-title} (ROSA) cluster using the command line interface (CLI).
+
+[NOTE]
+====
+While this simple deployment works well for a tutorial setting, clusters used in production should be deployed with a more detailed method.
+====
+
+== Prerequisites
+
+* You have completed the prerequisites in the Setup tutorial.
+
+== Creating account roles
+Run the following command _once_ for each AWS account and y-stream OpenShift version:
+
+[source,terminal]
+----
+rosa create account-roles --mode auto --yes
+----
+
+== Deploying the cluster
+
+. Create the cluster with the default configuration by running the following command substituting your own cluster name:
++
+[source,terminal]
+----
+rosa create cluster --cluster-name --sts --mode auto --yes
+----
+
+. Check the status of your cluster by running the following command:
++
+[source,terminal]
+----
+rosa list clusters
+----
\ No newline at end of file
diff --git a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/cloud-experts-getting-started-simple-ui-guide.adoc b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/cloud-experts-getting-started-simple-ui-guide.adoc
new file mode 100644
index 000000000000..004207e645e0
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/cloud-experts-getting-started-simple-ui-guide.adoc
@@ -0,0 +1,50 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="cloud-experts-getting-started-simple-ui-guide"]
+= Tutorial: Simple UI guide
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: cloud-experts-getting-started-simple-ui-guide
+
+toc::[]
+
+//rosaworkshop.io content metadata
+//Brought into ROSA product docs 2023-11-17
+
+This page outlines the minimum list of commands to deploy a ROSA cluster using the user interface (UI).
+
+[NOTE]
+====
+While this simple deployment works well for a workshop setting, clusters used in production should be deployed with a more detailed method.
+====
+
+== Prerequisites
+
+* You have completed the prerequisites in the Setup tutorial.
+
+== Creating account roles
+Run the following command _once_ for each AWS account and y-stream OpenShift version:
+
+[source,terminal]
+----
+rosa create account-roles --mode auto --yes
+----
+
+== Creating Red Hat OpenShift Cluster Manager roles
+. Create one OpenShift Cluster Manager role for each AWS account by running the following command:
++
+[source,terminal]
+----
+rosa create ocm-role --mode auto --admin --yes
+----
+
+. Create one OpenShift Cluster Manager user role for each AWS account by running the following command:
++
+[source,terminal]
+----
+rosa create user-role --mode auto --yes
+----
+
+. Use the link:https://console.redhat.com/openshift/create/rosa/wizard[Red Hat OpenShift Cluster Manager UI] to select your AWS account, cluster options, and begin deployment.
+
+. OpenShift Cluster Manager UI displays cluster status.
++
+image:cloud-experts-getting-started-deployment-ui-cluster-create.png[]
\ No newline at end of file
diff --git a/distr_tracing/distr_tracing_install/images b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/images
similarity index 100%
rename from distr_tracing/distr_tracing_install/images
rename to cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/images
diff --git a/rosa_support/modules b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/modules
similarity index 100%
rename from rosa_support/modules
rename to cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/modules
diff --git a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/snippets b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/snippets
new file mode 120000
index 000000000000..9d58b92e5058
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/snippets
@@ -0,0 +1 @@
+../snippets/
\ No newline at end of file
diff --git a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-idp.adoc b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-idp.adoc
new file mode 100644
index 000000000000..81add39fc415
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-idp.adoc
@@ -0,0 +1,107 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="cloud-experts-getting-started-idp"]
+= Tutorial: Setting up an identity provider
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: cloud-experts-getting-started-idp
+
+toc::[]
+
+//rosaworkshop.io content metadata
+//Brought into ROSA product docs 2023-11-28
+
+To log in to your cluster, set up an identity provider (IDP). This tutorial uses GitHub as an example IDP. See the full list of xref:../../rosa_install_access_delete_clusters/rosa-sts-config-identity-providers.adoc#understanding-idp-supported_rosa-sts-config-identity-providers[IDPs supported by ROSA].
+
+* To view all IDP options, run the following command:
++
+[source,terminal]
+----
+rosa create idp --help
+----
+
+== Setting up an IDP with GitHub
+. Log in to your GitHub account.
+. Create a new GitHub organization where you are an administrator.
++
+[TIP]
+====
+If you are already an administrator in an existing organization and you want to use that organization, skip to step 9.
+====
++
+Click the *+* icon, then click *New Organization*.
++
+image::cloud-experts-getting-started-idp-new-org.png[]
+
+. Choose the most applicable plan for your situation or click *Join for free*.
+
+. Enter an organization account name, an email, and whether it is a personal or business account. Then, click *Next*.
++
+image::cloud-experts-getting-started-idp-team.png[]
+
+. *Optional:* Add the GitHub IDs of other users to grant additional access to your ROSA cluster. You can also add them later.
+. Click *Complete Setup*.
+. *Optional:* Enter the requested information on the following page.
+. Click *Submit*.
+. Go back to the terminal and enter the following command to set up the GitHub IDP:
++
+[source,terminal]
+----
+rosa create idp --cluster= --interactive
+----
+
+. Enter the following values:
++
+[source,terminal]
+----
+Type of identity provider: github
+Identity Provider Name:
+Restrict to members of: organizations
+GitHub organizations:
+----
+
+. The CLI will provide you with a link. Copy and paste the link into a browser and press *Enter*. This will fill the required information to register this application for OAuth. You do not need to modify any of the information.
++
+image::cloud-experts-getting-started-idp-link.png[]
+
+. Click *Register application*.
++
+image::cloud-experts-getting-started-idp-register.png[]
+
+. The next page displays a *Client ID*. Copy the ID and paste it in the terminal where it asks for *Client ID*.
++
+[NOTE]
+====
+Do not close the tab.
+====
+
+. The CLI will ask for a *Client Secret*. Go back in your browser and click *Generate a new client secret*.
++
+image::cloud-experts-getting-started-idp-secret.png[]
+
+. A secret is generated for you. Copy your secret because it will never be visible again.
+
+. Paste your secret into the terminal and press *Enter*.
+. Leave *GitHub Enterprise Hostname* blank.
+. Select *claim*.
+. Wait approximately 1 minute for the IDP to be created and the configuration to land on your cluster.
++
+image::cloud-experts-getting-started-idp-inputs.png[]
+
+. Copy the returned link and paste it into your browser. The new IDP should be available under your chosen name. Click your IDP and use your GitHub credentials to access the cluster.
++
+image::cloud-experts-getting-started-idp-login.png[]
+
+== Granting other users access to the cluster
+To grant access to other cluster user you will need to add their GitHub user ID to the GitHub organization used for this cluster.
+
+. In GitHub, go to the *Your organizations* page.
+
+. Click your *profile icon*, then *Your organizations*. Then click **. In our example, it is `my-rosa-cluster`.
++
+image::cloud-experts-getting-started-idp-org.png[]
+
+. Click *Invite someone*.
++
+image::cloud-experts-getting-started-idp-invite.png[]
+
+. Enter the GitHub ID of the new user, select the correct user, and click *Invite*.
+. Once the new user accepts the invitation, they will be able to log in to the ROSA cluster using the console link and their GitHub credentials.
\ No newline at end of file
diff --git a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-managing-worker-nodes.adoc b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-managing-worker-nodes.adoc
new file mode 100644
index 000000000000..1918ba6b6acf
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-managing-worker-nodes.adoc
@@ -0,0 +1,233 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="cloud-experts-getting-started-managing-worker-nodes"]
+= Tutorial: Managing worker nodes
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: cloud-experts-getting-started-managing-worker-nodes
+
+toc::[]
+
+//rosaworkshop.io content metadata
+//Brought into ROSA product docs 2023-11-30
+
+In {product-title} (ROSA), changing aspects of your worker nodes is performed through the use of machine pools. A machine pool allows users to manage many machines as a single entity. Every ROSA cluster has a default machine pool that is created when the cluster is created. For more information, see the xref:../../rosa_cluster_admin/rosa_nodes/rosa-nodes-machinepools-about.adoc#rosa-nodes-machinepools-about[machine pool] documentation.
+
+
+== Creating a machine pool
+You can create a machine pool with either the command line interface (CLI) or the user interface (UI).
+
+=== Creating a machine pool with the CLI
+. Run the following command:
++
+[source,terminal]
+----
+rosa create machinepool --cluster= --name= --replicas=
+----
++
+.Example input
++
+[source,terminal]
+----
+ $ rosa create machinepool --cluster=my-rosa-cluster --name=new-mp
+ --replicas=2
+----
++
+.Example output
++
+[source,terminal]
+----
+I: Machine pool 'new-mp' created successfully on cluster 'my-rosa-cluster'
+I: To view all machine pools, run 'rosa list machinepools -c my-rosa-cluster'
+----
+
+. *Optional:* Add node labels or taints to specific nodes in a new machine pool by running the following command:
++
+[source,terminal]
+----
+rosa create machinepool --cluster= --name= --replicas= --labels=``
+----
++
+.Example input
++
+[source,terminal]
+----
+$ rosa create machinepool --cluster=my-rosa-cluster --name=db-nodes-mp --replicas=2 --labels='app=db','tier=backend'
+----
++
+.Example output
++
+[source,terminal]
+----
+I: Machine pool 'db-nodes-mp' created successfully on cluster 'my-rosa-cluster'
+----
++
+This creates an additional 2 nodes that can be managed as a unit and also assigns them the labels shown.
+
+. Run the following command to confirm machine pool creation and the assigned labels:
++
+[source,terminal]
+----
+rosa list machinepools --cluster=
+----
++
+.Example output
++
+[source,terminal]
+----
+ID AUTOSCALING REPLICAS INSTANCE TYPE LABELS TAINTS AVAILABILITY ZONES
+Default No 2 m5.xlarge us-east-1a
+----
+
+=== Creating a machine pool with the UI
+. Log in to the link:https://console.redhat.com/openshift[Red Hat console] and click your cluster.
++
+image::cloud-experts-getting-started-managing-ocm-cluster.png[]
+
+. Click *Machine pools*.
++
+image:cloud-experts-getting-started-managing-mp-ocm.png[]
+
+. Click *Add machine pool*.
+
+. Enter the desired configuration.
++
+[TIP]
+====
+You can also and expand the *Edit node labels and taints* section to add node labels and taints to the nodes in the machine pool.
+====
++
+image::cloud-experts-getting-started-managing-mp-nlt.png[]
+
+. You will see the new machine pool you created.
++
+image::cloud-experts-getting-started-managing-mp-fromui.png[]
+
+== Scaling worker nodes
+
+Edit a machine pool to scale the number of worker nodes in that specific machine pool. You can use either the CLI or the UI to scale worker nodes.
+
+=== Scaling worker nodes using the CLI
+
+. Run the following command to see the default machine pool that is created with each cluster:
++
+[source,terminal]
+----
+rosa list machinepools --cluster=
+----
++
+.Example output
++
+[source,terminal]
+----
+ID AUTOSCALING REPLICAS INSTANCE TYPE LABELS TAINTS AVAILABILITY ZONES
+Default No 2 m5.xlarge us-east-1a
+----
+
+. To scale the default machine pool out to a different number of nodes, run the following command:
++
+[source,terminal]
+----
+rosa edit machinepool --cluster= --replicas=
+----
++
+.Example input
++
+[source,terminal]
+----
+rosa edit machinepool --cluster=my-rosa-cluster --replicas 3 Default
+----
+
+. Run the following command to confirm that the machine pool has scaled:
++
+[source,terminal]
+----
+rosa describe cluster --cluster= | grep Compute
+----
++
+.Example input
++
+[source,terminal]
+----
+$ rosa describe cluster --cluster=my-rosa-cluster | grep Compute
+----
++
+.Example output
++
+[source,terminal]
+----
+- Compute: 3 (m5.xlarge)
+----
+
+=== Scaling worker nodes using the UI
+
+. Click the three dots to the right of the machine pool you want to edit.
+. Click *Edit*.
+. Enter the desired number of nodes, and click *Save*.
+. Confirm that the cluster has scaled by selecting the cluster, clicking the *Overview* tab, and scrolling to *Compute listing*. The compute listing should equal the scaled nodes. For example, 3/3.
++
+image::cloud-experts-getting-started-managing-ocm-nodes.png[]
+
+=== Adding node labels
+
+. Use the following command to add node labels:
++
+[source,terminal]
+----
+rosa edit machinepool --cluster= --replicas= --labels='key=value'
+----
++
+.Example input
++
+[source,terminal]
+----
+rosa edit machinepool --cluster=my-rosa-cluster --replicas=2 --labels 'foo=bar','baz=one' new-mp
+----
++
+This adds 2 labels to the new machine pool.
+
+[IMPORTANT]
+====
+This command replaces all machine pool configurations with the newly defined configuration. If you want to add another label *and* keep the old label, you must state both the new and preexisting the label. Otherwise the command will replace all preexisting labels with the one you wanted to add. Similarly, if you want to delete a label, run the command and state the ones you want, excluding the one you want to delete.
+====
+
+== Mixing node types
+
+You can also mix different worker node machine types in the same cluster by using new machine pools. You cannot change the node type of a machine pool once it is created, but you can create a new machine pool with different nodes by adding the `--instance-type` flag.
+
+. For example, to change the database nodes to a different node type, run the following command:
++
+[source,terminal]
+----
+rosa create machinepool --cluster= --name= --replicas= --labels='' --instance-type=
+----
++
+.Example input
++
+[source,terminal]
+----
+rosa create machinepool --cluster=my-rosa-cluster --name=db-nodes-large-mp --replicas=2 --labels='app=db','tier=backend' --instance-type=m5.2xlarge
+----
+
+. To see all the xref:../../rosa_architecture/rosa_policy_service_definition/rosa-service-definition.adoc#rosa-sdpolicy-aws-instance-types_rosa-service-definition[instance types available], run the following command:
++
+[source,terminal]
+----
+rosa list instance-types
+----
+
+. To make step-by-step changes, use the `--interactive` flag:
++
+[source,terminal]
+----
+rosa create machinepool -c --interactive
+----
++
+image::cloud-experts-getting-started-managing-mp-interactive.png[]
+
+. Run the following command to list the machine pools and see the new, larger instance type:
++
+[source,terminal]
+----
+rosa list machinepools -c
+----
++
+image::cloud-experts-getting-started-managing-large-mp.png[]
\ No newline at end of file
diff --git a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-setup.adoc b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-setup.adoc
new file mode 100644
index 000000000000..cfa61246c059
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-setup.adoc
@@ -0,0 +1,232 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="cloud-experts-getting-started-setup"]
+= Tutorial: Setup
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: cloud-experts-getting-started-setup
+
+toc::[]
+
+//rosaworkshop.io content metadata
+//Brought into ROSA product docs 2023-11-13
+
+There are currently two supported credential methods when creating a {product-title} (ROSA) cluster. One method uses an IAM user with the `AdministratorAccess` policy. The second and *recommended* method uses Amazon Web Services (AWS) Security Token Service (STS).
+//To be added when the ROSA with STS Explained tutorial is published:
+//For more information, see the xref../cloud_experts_tutorials/cloud_experts_rosa_with_sts_explained.adoc#id[ROSA with STS Explained] tutorial. This workshop uses the STS method.
+
+== Prerequisites
+
+Review the prerequisites listed in the xref:../../rosa_planning/rosa-cloud-expert-prereq-checklist.adoc#rosa-cloud-expert-prereq-checklist[Prerequisites for ROSA with STS] checklist.
+
+You will need the following information from your AWS account:
+
+* AWS IAM user
+* AWS access key ID
+* AWS secret access key
+
+== Setting up a Red Hat account
+. If you do not have a Red Hat account, create one on the link:https://console.redhat.com/[Red Hat console].
+. Accept the required terms and conditions.
+. Then check your email for a verification link.
+
+== Installing the AWS CLI
+* Install the link:https://aws.amazon.com/cli/[AWS CLI] for your operating system.
+
+== Enabling ROSA
+
+[NOTE]
+====
+Only complete this step if you have *not* enabled ROSA in your AWS account.
+====
+
+. Visit the link:https://console.aws.amazon.com/rosa[AWS console] to enable your account to use ROSA.
+. Click the orange *Enable OpenShift* button.
++
+image::cloud-experts-getting-started-setup-enable.png[]
+
+. After about a minute, a green *service enabled* bar should appear.
++
+image::cloud-experts-getting-started-setup-enabled.png[]
+
+== Installing the ROSA CLI
+. Install the link:https://console.redhat.com/openshift/downloads[ROSA CLI] for your operating system.
+. Download and extract the relevant file for your operating system by using the following command:
++
+[source,terminal]
+----
+tar -xvf rosa-linux.tar.gz
+----
+. Save the file to a location within your `PATH` by using the following command:
++
+[source,terminal]
+----
+sudo mv rosa /usr/local/bin/rosa
+----
+. Run `rosa version` to verify a successful installation.
+
+== Installing the OpenShift CLI
+There are a few ways to install the OpenShift CLI (`oc`):
+
+* *Option 1: Using the ROSA CLI:*
+.. Run `rosa download oc`.
+.. Once downloaded, unzip the file and move the executables into a directory in your `PATH`.
+* *Option 2: Using the Openshift documentation:*
+.. Follow the directions on the xref:../../cli_reference/openshift_cli/getting-started-cli.adoc#installing-openshift-cli[documentation page]
+* *Option 3: Using your OpenShift cluster:*
+.. If you already have an OpenShift cluster, you can access the CLI tools page by clicking the *Question mark*, then *Command Line Tools*.
++
+image::cloud_experts_getting_started_setup_cli_tools.png[]
+
+.. Then, download the relevant tool for your operating system.
+
+=== Using `oc` instead of `kubectl`
+While `kubectl` can be used with an OpenShift cluster, `oc` is specific to OpenShift. It includes the standard set of features from `kubectl` as well as additional support for OpenShift functionality. For more information, see xref:../../cli_reference/openshift_cli/usage-oc-kubectl.adoc#usage-oc-kubectl[Usage of oc and kubectl commands].
+
+== Configuring the AWS CLI
+To configure the AWS CLI, follow these steps:
+
+. Enter `aws configure` in the terminal.
+. Enter your AWS access key ID and press enter.
+. Enter your AWS secret access key and press enter.
+. Enter the default region in which you want to deploy.
+. Enter the desired output format, specifying either `table` or `json`.
+
+.Example output
+[source, terminal]
+----
+$ aws configure
+AWS Access Key ID: AKIA0000000000000000
+AWS Secret Access Key: NGvmP0000000000000000000000000
+Default region name: us-east-1
+Default output format: table
+----
+
+== Verifying the configuration
+Verify that the configuration is correct by following these steps:
+
+. Run the following command to query the AWS API:
++
+[source,terminal]
+----
+aws sts get-caller-identity
+----
+. You should see a table or JSON file. Verify that the account information is correct.
++
+.Example output
++
+[source, terminal]
+----
+$ aws sts get-caller-identity
+------------------------------------------------------------------------------
+| GetCallerIdentity |
++--------------+----------------------------------------+--------------------+
+| Account | Arn | UserId |
++--------------+----------------------------------------+--------------------+
+| 000000000000| arn:aws:iam::00000000000:user/myuser | AIDA00000000000000|
++--------------+----------------------------------------+--------------------+
+----
+
+== Ensuring the ELB service role exists
+[TIP]
+====
+Make sure that the service role for the ELB already exists, otherwise cluster deployment could fail.
+====
+
+* Run the following command to check for the ELB service role and create it if it is missing:
++
+[source,terminal]
+----
+aws iam get-role --role-name "AWSServiceRoleForElasticLoadBalancing" || aws iam create-service-linked-role --aws-service-name "elasticloadbalancing.amazonaws.com"
+----
+
+=== Fixing ELB service role errors
+
+. The following error during cluster creation means that an ELB service role does not exist:
++
+.Example output
++
+[source,terminal]
+----
+Error: Error creating network Load Balancer: AccessDenied: User: arn:aws:sts::970xxxxxxxxx:assumed-role/ManagedOpenShift-Installer-Role/163xxxxxxxxxxxxxxxx is not authorized to perform: iam:CreateServiceLinkedRole on resource: arn:aws:iam::970xxxxxxxxx:role/aws-service-role/elasticloadbalancing.amazonaws.com/AWSServiceRoleForElasticLoadBalancing"
+----
+
+. If you receive the above error during cluster creation, run the following command:
++
+[source,terminal]
+----
+aws iam get-role --role-name "AWSServiceRoleForElasticLoadBalancing" || aws iam create-service-linked-role --aws-service-name "elasticloadbalancing.amazonaws.com"
+----
+
+== Logging in to your Red Hat account
+. Enter `rosa login` in a terminal.
+. It will prompt you to open a web browser and go to the link:https://console.redhat.com/openshift/token/rosa[Red Hat console].
+. Log in, if necessary.
+. Click *Load token*.
+. Copy the token, paste it into the CLI prompt, and press enter. Alternatively, you can copy the full `rosa login --token=abc...` command and paste it in the terminal.
++
+image::cloud-experts-getting-started-setup-token.png[]
+
+== Verifying credentials
+Verify that all the credentials are correct.
+
+. Run `rosa whoami` in the terminal.
++
+.Example output
+[source,terminal]
+----
+AWS Account ID: 000000000000
+AWS Default Region: us-east-2
+AWS ARN: arn:aws:iam::000000000000:user/myuser
+OCM API: https://api.openshift.com
+OCM Account ID: 1DzGIdIhqEWy000000000000000
+OCM Account Name: Your Name
+OCM Account Username: you@domain.com
+OCM Account Email: you@domain.com
+OCM Organization ID: 1HopHfA20000000000000000000
+OCM Organization Name: Red Hat
+OCM Organization External ID: 0000000
+----
+. Check the information for accuracy before proceeding.
+
+== Verifying quota
+Verify that your AWS account has ample quota in the region in which you will be deploying your cluster.
+
+* Run the following command:
++
+[source,terminal]
+----
+rosa verify quota
+----
++
+.Example output
++
+[source,terminal]
+----
+I: Validating AWS quota...
+I: AWS quota ok.
+----
+
+* If cluster installation fails, validate the actual AWS resource usage against the xref:../../rosa_planning/rosa-sts-required-aws-service-quotas.adoc#rosa-sts-required-aws-service-quotas[AWS service quotas].
+
+== Verifying the `oc` CLI
+Verify that the `oc` CLI is installed correctly:
+
+[source,terminal]
+----
+rosa verify openshift-client
+----
+
+You have now successfully set up you account and environment. You are ready to deploy your cluster.
+
+//== Deploying a cluster
+//In the next section you will deploy your cluster. There are two mechanisms to do so:
+
+//- Using the ROSA CLI
+//- Using the OCM Web User Interface
+
+//Either way is perfectly fine for the purposes of this workshop. Though keep in mind that if you are using the OCM UI, there will be a few extra steps to set it up in order to deploy into your AWS account for the first time. This will not need to be repeated for subsequent deployments using the OCM UI for the same AWS account.
+
+//Please select the desired mechanism in the left menu under "Deploy the cluster".
+
+//*[ROSA]: Red Hat OpenShift Service on AWS
+//*[STS]: AWS Security Token Service
+//*[OCM]: OpenShift Cluster Manager
\ No newline at end of file
diff --git a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-support.adoc b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-support.adoc
new file mode 100644
index 000000000000..2820a0e45148
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-support.adoc
@@ -0,0 +1,69 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="cloud-experts-getting-started-support"]
+= Tutorial: Obtaining support
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: cloud-experts-getting-started-support
+
+toc::[]
+
+//rosaworkshop.io content metadata
+//Brought into ROSA product docs 2024-01-17
+
+Finding the right help when you need it is important. These are some of the resources at your disposal when you need assistance.
+
+== Adding support contacts
+You can add additional email addresses for communications about your cluster.
+
+. On the {cluster-manager-first} user interface (UI), click *select cluster*.
+. Click the *Support* tab.
+. Click *Add notification contact*, and enter the additional email addresses.
+
+== Contacting Red Hat for support using the UI
+
+. On the {cluster-manager} UI, click the *Support* tab.
+. Click *Open support case*.
+
+== Contacting Red Hat for support using the support page
+
+. Go to the link:https://support.redhat.com[Red Hat support page].
+. Click *Open a new Case*.
++
+image::obtain-support-case.png[]
+
+. Log in to your Red Hat account.
+. Select the reason for contacting support.
++
+image::obtain-support-reason.png[]
+
+. Select *Red Hat OpenShift Service on AWS*.
+
+image::obtain-support-select-rosa.png[]
+
+. Click *continue*.
+. Enter a summary of the issue and the details of your request. Upload any files, logs, and screenshots. The more details you provide, the better Red Hat support can help your case.
++
+[NOTE]
+====
+Relevant suggestions that might help with your issue will appear at the bottom of this page.
+====
++
+image::obtain-support-summary.png[]
+
+. Click *Continue*.
+. Answer the questions in the new fields.
+. Click *Continue*.
+. Enter the following information about your case:
+.. *Support level:* Premium
+.. *Severity:* Review the Red Hat Support Severity Level Definitions to choose the correct one.
+.. *Group:* If this is related to a few other cases you can select the corresponding group.
+.. *Language*
+.. *Send notifications:* Add any additional email addresses to keep notified of activity.
+.. *Red Hat associates:* If you are working with anyone from Red Hat and want to keep them in the loop you can enter their email address here.
+.. *Alternate Case ID:* If you want to attach your own ID to it you can enter it here.
+. Click *Continue*.
+. On the review screen make sure you select the correct cluster ID that you are contacting support about.
++
+image::obtain-support-cluster-id.png[]
+
+. Click *Submit*.
+. You will be contacted based on the response time committed to for the link:https://access.redhat.com/support/offerings/openshift/sla[indicated severity level].
\ No newline at end of file
diff --git a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-upgrading.adoc b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-upgrading.adoc
new file mode 100644
index 000000000000..7f08181e10c6
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-upgrading.adoc
@@ -0,0 +1,78 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="cloud-experts-getting-started-upgrading"]
+= Tutorial: Upgrading your cluster
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: cloud-experts-getting-started-upgrading
+
+toc::[]
+
+//rosaworkshop.io content metadata
+//Brought into ROSA product docs 2024-01-08
+
+{product-title} (ROSA) executes all cluster upgrades as part of the managed service. You do not need to run any commands or make changes to the cluster. You can schedule the upgrades at a convenient time.
+
+Ways to schedule a cluster upgrade include:
+
+* *Manually using the command line interface (CLI)*: Start a one-time immediate upgrade or schedule a one-time upgrade for a future date and time.
+* *Manually using the Red Hat OpenShift Cluster Manager user interface (UI)*: Start a one-time immediate upgrade or schedule a one-time upgrade for a future date and time.
+* *Automated upgrades*: Set an upgrade window for recurring y-stream upgrades whenever a new version is available without needing to manually schedule it. Minor versions have to be manually scheduled.
+
+For more details about cluster upgrades, run the following command:
+
+[source,terminal]
+----
+$ rosa upgrade cluster --help
+----
+
+== Manually upgrading your cluster using the CLI
+. Check if there is an upgrade available by running the following command:
++
+[source,terminal]
+----
+$ rosa list upgrade -c
+----
++
+.Example output
++
+[source,terminal]
+----
+$ rosa list upgrade -c
+VERSION NOTES
+4.14.7 recommended
+4.14.6
+...
+----
++
+In the above example, versions 4.14.7 and 4.14.6 are both available.
+
+. Schedule the cluster to upgrade within the hour by running the following command:
++
+[source,terminal]
+----
+$ rosa upgrade cluster -c --version
+----
+
+. *Optional:* Schedule the cluster to upgrade at a later date and time by running the following command:
++
+[source,terminal]
+----
+$ rosa upgrade cluster -c --version --schedule-date --schedule-time
+----
+
+== Manually upgrading your cluster using the UI
+. Log in to the OpenShift Cluster Manager, and select the cluster you want to upgrade.
+. Click *Settings*.
+. If an upgrade is available, click *Update*.
++
+image::cloud-experts-getting-started-cluster-upgrade.png[]
+
+. Select the version to which you want to upgrade in the new window.
+. Schedule a time for the upgrade or begin it immediately.
+
+== Setting up automatic recurring upgrades
+. Log in to the OpenShift Cluster Manager, and select the cluster you want to upgrade.
+. Click *Settings*.
+1. Under *Update Strategy*, click *Recurring updates*.
+. Set the day and time for the upgrade to occur.
+. Under *Node draining*, select a grace period to allow the nodes to drain before pod eviction.
+. Click *Save*.
\ No newline at end of file
diff --git a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-what-is-rosa.adoc b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-what-is-rosa.adoc
new file mode 100644
index 000000000000..1e7f43672e36
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-what-is-rosa.adoc
@@ -0,0 +1,47 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="cloud-experts-getting-started-what-is-rosa"]
+= Tutorial: What is ROSA
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: cloud-experts-getting-started-what-is-rosa
+
+toc::[]
+
+//rosaworkshop.io content metadata
+//Brought into ROSA product docs 2024-01-18
+
+Red Hat OpenShift Service on AWS (ROSA) is a fully-managed turnkey application platform that allows you to focus on what matters most, delivering value to your customers by building and deploying applications. Red Hat and AWS SRE experts manage the underlying platform so you do not have to worry about infrastructure management. ROSA provides seamless integration with a wide range of AWS compute, database, analytics, machine learning, networking, mobile, and other services to further accelerate the building and delivering of differentiating experiences to your customers.
+
+ROSA makes use of AWS Security Token Service (STS) to obtain credentials to manage infrastructure in your AWS account. AWS STS is a global web service that creates temporary credentials for IAM users or federated users. ROSA uses this to assign short-term, limited-privilege, security credentials. These credentials are associated with IAM roles that are specific to each component that makes AWS API calls. This method aligns with the principals of least privilege and secure practices in cloud service resource management. The ROSA command line interface (CLI) tool manages the STS credentials that are assigned for unique tasks and takes action on AWS resources as part of OpenShift functionality.
+//For a detailed explanation, see "ROSA with STS Explained" (add xref when page is migrated).
+
+A list of the account-wide and per-cluster roles is provided in the xref:../../rosa_architecture/rosa-sts-about-iam-resources.adoc#rosa-sts-account-wide-roles-and-policies-creation-methods_rosa-sts-about-iam-resources[ROSA documentation].
+
+//== Creating your first ROSA cluster
+
+//Watch this demo for a short preview of the cluster deployment process:
+//link:https://youtu.be/KbzUbXWs6Ck
+
+//If you want an easy guide for creating your first ROSA cluster:
+
+//. Review the xref:../../rosa_planning/rosa-sts-aws-prereqs.adoc#rosa-sts-aws-prereqs[prerequisites].
+//. Visit the quickstart guide.
+
+.Additional Resources
+
+* ROSA product pages:
+** link:https://www.openshift.com/products/amazon-openshift[Red Hat product page]
+** link:https://aws.amazon.com/rosa/[AWS product page]
+** link:https://access.redhat.com/products/red-hat-openshift-service-aws[Red Hat customer portal]
+* ROSA specific resources
+** link:https://docs.aws.amazon.com/ROSA/latest/userguide/getting-started.html[AWS ROSA getting started guide]
+** xref:../../welcome/index.adoc#welcome-index[ROSA documentation]
+** xref:../../rosa_architecture/rosa_policy_service_definition/rosa-service-definition.adoc#rosa-service-definition[ROSA service definition]
+** xref:../../rosa_architecture/rosa_policy_service_definition/rosa-policy-responsibility-matrix.adoc#rosa-policy-responsibility-matrix[ROSA responsibility assignment matrix]
+** xref:../../rosa_architecture/rosa_policy_service_definition/rosa-policy-process-security.adoc#rosa-policy-process-security[Understanding Process and Security]
+** xref:../../rosa_architecture/rosa_policy_service_definition/rosa-policy-understand-availability.adoc#rosa-policy-understand-availability[About Availability]
+** xref:../../rosa_architecture/rosa_policy_service_definition/rosa-life-cycle.adoc#rosa-life-cycle[Updates Lifecycle]
+** xref:../../rosa_planning/rosa-limits-scalability.adoc#rosa-limits-scalability[Limits and Scalability]
+** link:https://red.ht/rosa-roadmap[ROSA roadmap]
+* link:https://learn.openshift.com[Learn about OpenShift]
+* link:https://console.redhat.com/OpenShift[OpenShift Cluster Manager]
+* link:https://support.redhat.com[Red Hat Support]
\ No newline at end of file
diff --git a/cloud_experts_tutorials/cloud-experts-getting-started/images b/cloud_experts_tutorials/cloud-experts-getting-started/images
new file mode 120000
index 000000000000..e4c5bd02a10a
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-getting-started/images
@@ -0,0 +1 @@
+../images/
\ No newline at end of file
diff --git a/sd_support/modules b/cloud_experts_tutorials/cloud-experts-getting-started/modules
similarity index 100%
rename from sd_support/modules
rename to cloud_experts_tutorials/cloud-experts-getting-started/modules
diff --git a/cloud_experts_tutorials/cloud-experts-getting-started/snippets b/cloud_experts_tutorials/cloud-experts-getting-started/snippets
new file mode 120000
index 000000000000..9d58b92e5058
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-getting-started/snippets
@@ -0,0 +1 @@
+../snippets/
\ No newline at end of file
diff --git a/cloud_experts_tutorials/cloud-experts-rosa-cloudwatch-sts.adoc b/cloud_experts_tutorials/cloud-experts-rosa-cloudwatch-sts.adoc
new file mode 100644
index 000000000000..c4d731d360d8
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-rosa-cloudwatch-sts.adoc
@@ -0,0 +1,308 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="cloud-experts-rosa-cloudwatch-sts"]
+= Configuring log forwarding for CloudWatch logs and STS
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: cloud-experts-rosa-cloudwatch-sts
+
+toc::[]
+
+//Mobb content metadata
+//Brought into ROSA product docs 2023-09-18
+//---
+// date: '2022-08-19'
+// date:
+// title: Configuring the Cluster Log Forwarder for CloudWatch Logs and STS
+// tags: ["AWS", "ROSA"]
+// authors:
+// - Paul Czarkowski
+// - Connor Wooley
+// ---
+
+Use this tutorial to deploy the {clo} and configure it to use Security Token Services (STS) authentication to forward logs to CloudWatch.
+
+[id="cloud-experts-rosa-cloudwatch-sts-prerequisites"]
+.Prerequisites
+
+* A {product-title} (ROSA) Classic cluster
+* The `jq` command-line interface (CLI)
+* The Amazon Web Services (AWS) CLI (`aws`)
+
+[id="cloud-experts-rosa-cloudwatch-sts-environment-setup"]
+== Setting up your environment
+
+. Configure the following environment variables, changing the cluster name to suit your cluster:
++
+[NOTE]
+====
+You must be logged in as an administrator.
+====
++
+[source,terminal]
+----
+$ export ROSA_CLUSTER_NAME=$(oc get infrastructure cluster -o=jsonpath="{.status.infrastructureName}" | sed 's/-[a-z0-9]\{5\}$//')
+$ export REGION=$(rosa describe cluster -c ${ROSA_CLUSTER_NAME} --output json | jq -r .region.id)
+$ export OIDC_ENDPOINT=$(oc get authentication.config.openshift.io cluster -o json | jq -r .spec.serviceAccountIssuer | sed 's|^https://||')
+$ export AWS_ACCOUNT_ID=`aws sts get-caller-identity --query Account --output text`
+$ export AWS_PAGER=""
+$ export SCRATCH="/tmp/${ROSA_CLUSTER_NAME}/clf-cloudwatch-sts"
+$ mkdir -p ${SCRATCH}
+----
+
+. Ensure all fields output correctly before moving to the next section:
++
+[source,terminal]
+----
+$ echo "Cluster: ${ROSA_CLUSTER_NAME}, Region: ${REGION}, OIDC Endpoint: ${OIDC_ENDPOINT}, AWS Account ID: ${AWS_ACCOUNT_ID}"
+----
+
+[id="cloud-experts-rosa-cloudwatch-sts-prep-aws"]
+== Preparing your AWS account
+
+. Create an Identity Access Management (IAM) policy for {logging}:
++
+[source,terminal]
+----
+$ POLICY_ARN=$(aws iam list-policies --query "Policies[?PolicyName=='RosaCloudWatch'].{ARN:Arn}" --output text)
+$ if [[ -z "${POLICY_ARN}" ]]; then
+cat << EOF > ${SCRATCH}/policy.json
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "logs:CreateLogGroup",
+ "logs:CreateLogStream",
+ "logs:DescribeLogGroups",
+ "logs:DescribeLogStreams",
+ "logs:PutLogEvents",
+ "logs:PutRetentionPolicy"
+ ],
+ "Resource": "arn:aws:logs:*:*:*"
+ }
+]
+}
+EOF
+POLICY_ARN=$(aws iam create-policy --policy-name "RosaCloudWatch" \
+--policy-document file:///${SCRATCH}/policy.json --query Policy.Arn --output text)
+fi
+$ echo ${POLICY_ARN}
+----
+
+. Create an IAM role trust policy for the cluster:
++
+[source,terminal]
+----
+$ cat < ${SCRATCH}/trust-policy.json
+{
+ "Version": "2012-10-17",
+ "Statement": [{
+ "Effect": "Allow",
+ "Principal": {
+ "Federated": "arn:aws:iam::${AWS_ACCOUNT_ID}:oidc-provider/${OIDC_ENDPOINT}"
+ },
+ "Action": "sts:AssumeRoleWithWebIdentity",
+ "Condition": {
+ "StringEquals": {
+ "${OIDC_ENDPOINT}:sub": "system:serviceaccount:openshift-logging:logcollector"
+ }
+ }
+ }]
+}
+EOF
+$ ROLE_ARN=$(aws iam create-role --role-name "${ROSA_CLUSTER_NAME}-RosaCloudWatch" \
+ --assume-role-policy-document file://${SCRATCH}/trust-policy.json \
+ --query Role.Arn --output text)
+$ echo ${ROLE_ARN}
+----
+
+. Attach the IAM policy to the IAM role:
++
+[source,terminal]
+----
+$ aws iam attach-role-policy --role-name "${ROSA_CLUSTER_NAME}-RosaCloudWatch" \
+ --policy-arn ${POLICY_ARN}
+----
+
+[id="cloud-experts-rosa-cloudwatch-sts-deploy-Os"]
+== Deploying Operators
+
+. Deploy the {clo}:
++
+[source,terminal]
+----
+$ cat << EOF | oc apply -f -
+ apiVersion: operators.coreos.com/v1alpha1
+ kind: Subscription
+ metadata:
+ labels:
+ operators.coreos.com/cluster-logging.openshift-logging: ""
+ name: cluster-logging
+ namespace: openshift-logging
+ spec:
+ channel: stable
+ installPlanApproval: Automatic
+ name: cluster-logging
+ source: redhat-operators
+ sourceNamespace: openshift-marketplace
+EOF
+----
+
+. Create a secret:
++
+[source,terminal]
+----
+$ cat << EOF | oc apply -f -
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: cloudwatch-credentials
+ namespace: openshift-logging
+ stringData:
+ role_arn: $ROLE_ARN
+EOF
+----
+
+[id="cloud-experts-rosa-cloudwatch-sts-configure-cluster-logging"]
+== Configuring cluster logging
+
+. Create a `ClusterLogForwarder` custom resource (CR):
++
+[source,terminal]
+----
+$ cat << EOF | oc apply -f -
+ apiVersion: "logging.openshift.io/v1"
+ kind: ClusterLogForwarder
+ metadata:
+ name: instance
+ namespace: openshift-logging
+ spec:
+ outputs:
+ - name: cw
+ type: cloudwatch
+ cloudwatch:
+ groupBy: namespaceName
+ groupPrefix: rosa-${ROSA_CLUSTER_NAME}
+ region: ${REGION}
+ secret:
+ name: cloudwatch-credentials
+ pipelines:
+ - name: to-cloudwatch
+ inputRefs:
+ - infrastructure
+ - audit
+ - application
+ outputRefs:
+ - cw
+EOF
+----
+
+. Create a `ClusterLogging` CR:
++
+[source,terminal]
+----
+$ cat << EOF | oc apply -f -
+ apiVersion: logging.openshift.io/v1
+ kind: ClusterLogging
+ metadata:
+ name: instance
+ namespace: openshift-logging
+ spec:
+ collection:
+ logs:
+ type: vector
+ managementState: Managed
+EOF
+----
+
+[id="cloud-experts-rosa-cloudwatch-sts-check-aws"]
+== Checking CloudWatch for logs
+
+* Use either the AWS console or the AWS CLI to validate that there are log streams from the cluster.
+** To validate the logs in the AWS CLI, run the following command:
++
+[source,terminal]
+----
+$ aws logs describe-log-groups --log-group-name-prefix rosa-${ROSA_CLUSTER_NAME}
+----
++
+.Sample output
++
+[source,c]
+----
+{
+ "logGroups": [
+ {
+ "logGroupName": "rosa-xxxx.audit",
+ "creationTime": 1661286368369,
+ "metricFilterCount": 0,
+ "arn": "arn:aws:logs:us-east-2:xxxx:log-group:rosa-xxxx.audit:*",
+ "storedBytes": 0
+ },
+ {
+ "logGroupName": "rosa-xxxx.infrastructure",
+ "creationTime": 1661286369821,
+ "metricFilterCount": 0,
+ "arn": "arn:aws:logs:us-east-2:xxxx:log-group:rosa-xxxx.infrastructure:*",
+ "storedBytes": 0
+ }
+ ]
+}
+----
++
+[NOTE]
+====
+If this is a new cluster, you might not see a log group for `application` logs as applications are not yet running.
+====
+
+[id="cloud-experts-rosa-cloudwatch-sts-clean-up"]
+== Cleaning up your resources
+
+. Delete the `ClusterLogForwarder` CR:
++
+[source,terminal]
+----
+$ oc delete -n openshift-logging clusterlogforwarder instance
+----
+
+. Delete the `ClusterLogging` CR:
++
+[source,terminal]
+----
+$ oc delete -n openshift-logging clusterlogging instance
+----
+
+. Detach the IAM policy to the IAM role:
++
+[source,terminal]
+----
+$ aws iam detach-role-policy --role-name "${ROSA_CLUSTER_NAME}-RosaCloudWatch" \
+ --policy-arn "${POLICY_ARN}"
+----
+
+. Delete the IAM role:
++
+[source,terminal]
+----
+$ aws iam delete-role --role-name "${ROSA_CLUSTER_NAME}-RosaCloudWatch"
+----
+
+. Delete the IAM policy:
++
+[IMPORTANT]
+====
+Only delete the IAM policy if there are no other resources using the policy.
+====
++
+[source,terminal]
+----
+$ aws iam delete-policy --policy-arn "${POLICY_ARN}"
+----
+
+. Delete the CloudWatch log groups:
++
+[source,terminal]
+----
+$ aws logs delete-log-group --log-group-name "rosa-${ROSA_CLUSTER_NAME}.audit"
+$ aws logs delete-log-group --log-group-name "rosa-${ROSA_CLUSTER_NAME}.infrastructure"
+----
diff --git a/cloud_experts_tutorials/cloud-experts-rosa-osd-change-default-domain.adoc b/cloud_experts_tutorials/cloud-experts-rosa-osd-change-default-domain.adoc
new file mode 100644
index 000000000000..a2633cf45335
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-rosa-osd-change-default-domain.adoc
@@ -0,0 +1,199 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="cloud-experts-rosa-osd-change-default-domain"]
+= Tutorial: Changing the Console, OAuth, and Downloads domains and TLS certificate on ROSA and OSD
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: cloud-experts-rosa-osd-change-default-domain
+
+toc::[]
+
+//Content metadata
+//Brought into ROSA product docs 2023-12-14
+//---
+//date: '2022-12-07'
+//title: Changing the Console, OAuth, and Downloads Domain and TLS Certificate on ROSA and OSD
+//weight: 1
+//tags: ["AWS", "ROSA", "OSD"]
+//authors:
+// Hector Kemp
+//---
+
+//Footnote definitions
+:fn-supported-cli: footnote:[The example commands in this guide use the ROSA CLI, but similar commands with the same function are available in the OCM CLI version 0.1.68 and higher for OpenShift Dedicated clusters that run on Google Cloud Platform.]
+:fn-supported-versions: footnote:[Modifying these routes on ROSA and OSD versions prior to 4.14 is not typically supported. However, if you have a cluster using version 4.13, you can request for Red Hat Support to enable support for this feature on your version 4.13 cluster.]
+:fn-term-component-routes: footnote:[We use the term `component routes` to refer to the `OAuth`, `Console`, and `Downloads` routes that are provided when ROSA and OSD are first installed. The ROSA CLI also uses the term `cluster routes` to refer to these resources.]
+
+//Article text
+This guide demonstrates how to modify the Console, Downloads, OAuth domain, and TLS certificate keypair on Red Hat Openshift on AWS (ROSA) and Red Hat Openshift Dedicated (OSD) versions 4.14 and above. {fn-supported-versions}
+
+////
+The changes that we make to the component routes {fn-term-component-routes} in this guide are described in greater detail in the following documentation:
+
+* link:https://docs.openshift.com/container-platform/latest/authentication/configuring-internal-oauth.html#customizing-the-oauth-server-url_configuring-internal-oauth[Customizing the internal OAuth server URL]
+* link:https://docs.openshift.com/container-platform/latest/web_console/customizing-the-web-console.html#customizing-the-console-route_customizing-web-console[Customizing the console route]
+* link:https://docs.openshift.com/container-platform/latest/web_console/customizing-the-web-console.html#customizing-the-download-route_customizing-web-console[Customizing the download route]
+////
+
+[id="prerequisites_{context}"]
+== Prerequisites
+
+* ROSA CLI (`rosa`) version 1.2.27 or higher {fn-supported-cli}
+* AWS CLI (`aws`)
+* OpenShift CLI (`oc`)
+* A ROSA or OSD cluster (STS, non-STS, or PrivateLink)
+* OpenSSL (for generating the demonstration SSL certificate), which can be downloaded and installed from link:https://www.openssl.org/source/[OpenSSL.org]
+* Access to the cluster as a user with the `cluster-admin` role.
+
+[id="find-current-routes_{context}"]
+== Find the current routes
+
+Before we make any configuration changes, we need to know the current routes in the cluster.
+
+* Verify that you can reach the component routes on their default hostnames.
++
+You can find the hostnames by querying the lists of routes in `openshift-console` and `openshift-authentication`.
++
+[source,bash]
+----
+$ oc get routes -n openshift-console
+NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD
+console console-openshift-console.apps.my-example-cluster-aws.z9a9.p1.openshiftapps.com ... 1 more console https reencrypt/Redirect None
+downloads downloads-openshift-console.apps.my-example-cluster-aws.z9a9.p1.openshiftapps.com ... 1 more downloads http edge/Redirect None
+----
++
+[source,bash]
+----
+$ oc get routes -n openshift-authentication
+NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD
+oauth-openshift oauth-openshift.apps.my-example-cluster-aws.z9a9.p1.openshiftapps.com ... 1 more oauth-openshift 6443 passthrough/Redirect None
+----
++
+From these commands you can see that our base hostname is `z9a9.p1.openshiftapps.com`.
+
+* Verify that the default ingress exists, and ensure that the base hostname matches that of the component routes.
++
+[source,bash]
+----
+$ rosa list ingress -c
+ID API ENDPOINT PRIVATE
+r3l6 https://apps.my-example-cluster-aws.z9a9.p1.openshiftapps.com external true
+----
++
+Our ingress route shares the base hostname of `z9a9.p1.openshiftapps.com`.
++
+Note the ID of the default ingress: `r316`. We will need this to set up new DNS records later.
+
+By running these commands you can see that the default component routes for our cluster are:
+
+* `console-openshift-console.apps.my-example-cluster-aws.z9a9.p1.openshiftapps.com` for Console
+* `downloads-openshift-console.apps.my-example-cluster-aws.z9a9.p1.openshiftapps.com` for Downloads
+* `oauth-openshift.apps.my-example-cluster-aws.z9a9.p1.openshiftapps.com` for OAuth
+
+We can use the `rosa edit ingress` command to change this base hostname and add a TLS certificate for all of our component routes. The relevant parameters are shown in this excerpt of the command line help for the `rosa edit ingress` command:
+
+[source,bash]
+----
+$ rosa edit ingress -h
+Edit a cluster ingress for a cluster. Usage:
+ rosa edit ingress ID [flags]
+ [...]
+ --cluster-routes-hostname string Components route hostname for oauth, console, download.
+ --cluster-routes-tls-secret-ref string Components route TLS secret reference for oauth, console, download.
+----
+
+Note that when we use this command to change the component routes, it will change the base domain for all three routes at the same time.
+
+If we choose a new base domain of `my-new-domain.dev`, our new component routes for our cluster will be:
+
+* `console-openshift-console.my-new-domain.dev` for Console
+* `downloads-openshift-console.my-new-domain.dev` for Downloads
+* `oauth-openshift.my-new-domain.dev` for OAuth
+
+[id="create-tls-certificate-for-routes_{context}"]
+== Create a valid TLS certificate for each component route
+
+In this section, we create a self-signed certificate key pair and then trust it to verify that we can access our new component routes using a real web browser. This is for demonstration purposes only, and is not recommended as a solution for production workloads. Consult your certificate authority to understand how to create a certificate with similar attributes for your production workloads.
+
+To work correctly, the certificate we create needs the following:
+
+* a Common Name (CN) that matches the **wildcard** DNS of the `--cluster-routes-hostname` parameter
+* a Subject Alternative Name (SAN) for each component route that **matches** the routes generated by our new hostname
+
+For a base domain of `my-new-domain.dev`, our certificate's subject (`-subj`) looks like this:
+
+----
+/CN=*.my-new-domain.dev
+----
+
+We also need a SAN for each of our component routes:
+
+----
+subjectAltName = DNS:console-openshift-console.my-new-domain.dev
+subjectAltName = DNS:downloads-openshift-console.my-new-domain.dev
+subjectAltName = DNS:oauth-openshift.my-new-domain.dev
+----
+
+We can generate our certificate by running the following `openssl` command:
+
+[source,bash]
+----
+$ openssl req -newkey rsa:2048 -new -nodes -x509 -days 365 -keyout key-my-new-domain.pem -out cert-my-new-domain.pem -subj "/CN=*.my-new-domain.dev" -addext "subjectAltName = DNS:console-openshift-console.my-new-domain.dev, DNS:oauth-openshift.my-new-domain.dev, DNS:downloads-openshift-console.my-new-domain.dev"
+----
+
+This generates two `.pem` files, `key-my-new-domain.pem` and `cert-my-new-domain.pem`.
+
+[id="add-certificate-as-cluster-secret_{context}"]
+== Add the certificate to the cluster as a secret
+
+. Log in to the cluster as a user with the `cluster-admin` role.
+
+. Generate a TLS secret in the `openshift-config` namespace.
++
+This becomes your secret reference when you update the component routes later in this guide.
++
+[source,bash]
+----
+$ oc create secret tls component-tls --cert=cert-my-new-domain.pem --key=key-my-new-domain.pem -n openshift-config
+----
+
+[id="find-lb-hostname_{context}"]
+== Find the hostname of the load balancer in your cluster
+
+When you create a cluster, ROSA and OSD create a load balancer and generate a hostname for that load balancer. We need to know the load balancer hostname in order to create DNS records for our cluster.
+
+You can find the hostname by running the `oc get svc` command against the `openshift-ingress` namespace. The hostname of the load balancer is the `EXTERNAL-IP` associated with the `router-default` service in the `openshift-ingress` namespace.
+
+[source,bash]
+----
+$ oc get svc -n openshift-ingress
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+router-default LoadBalancer 172.30.237.88 a234gsr3242rsfsfs-1342r624.us-east-1.elb.amazonaws.com 80:31175/TCP,443:31554/TCP 76d
+----
+
+In our case, the hostname is `a234gsr3242rsfsfs-1342r624.us-east-1.elb.amazonaws.com`.
+
+Save this value for later, as we will need it to configure DNS records for our new component route hostnames.
+
+[id="add-routes-to-dns_{context}"]
+== Add component route DNS records to your hosting provider
+
+In your hosting provider, add DNS records that map the `CNAME` of your new component route hostnames to the load balancer hostname we found in the previous step.
+
+//.Need an image for this
+//image::[Picture goes here]
+
+[id="update-routes-tls-using-rosa-cli_{context}"]
+== Update the component routes and TLS secret using the ROSA CLI
+
+When your DNS records have been updated, you can use the ROSA CLI to change the component routes.
+
+Use the `rosa edit ingress` command to update your default ingress route with the new base domain and the secret reference associated with it.
+
+[source,bash]
+----
+$ rosa edit ingress -c r3l6 --cluster-routes-hostname="my-new-domain.dev" --cluster-routes-tls-secret-ref="component-tls"
+
+ID APPLICATION ROUTER PRIVATE DEFAULT [...] LB-TYPE [...] WILDCARD POLICY NAMESPACE OWNERSHIP HOSTNAME TLS SECRET REF
+r3l6 https://apps.my-example-cluster-aws.z9a9.p1.openshiftapps.com yes yes [...] nlb [...] WildcardsDisallowed Strict my-new-domain.dev component-tls
+----
+
+Add your certificate to the trust store on your local system, then confirm that you can access your components at their new routes using your local web browser.
diff --git a/cloud_experts_tutorials/cloud-experts-template-tutorial.adoc b/cloud_experts_tutorials/cloud-experts-template-tutorial.adoc
new file mode 100644
index 000000000000..316475cf7ec0
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-template-tutorial.adoc
@@ -0,0 +1,295 @@
+//Duplicate this file and use it as a template to write your content.
+//Look for TODO statements and angle brackets to identify content to add or replace.
+
+// Basic document metadata
+// TODO: replace anything in angle brackets in this metadata block
+:_mod-docs-content-type: ASSEMBLY
+[id=“cloud-experts--tutorial”]
+= Tutorial:
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: cloud-experts--tutorial
+
+//This automatically adds a table of contents so readers can jump to a specific heading.
+toc::[]
+
+//TODO: The following block must remain commented out, but add your name to the authors list in whatever format you want to be publicly visible in the openshift-docs repo.
+//Mobb content metadata
+//Brought into ROSA product docs 2023-09-18
+//---
+//date: '2021-06-10'
+//title: ROSA Prerequisites
+//weight: 1
+//tags: ["AWS", "ROSA", "Quickstarts"]
+//authors:
+// -
+//---
+
+// Docs will remove this support statement once this content has gone through regular QE validation.
+// TODO: IF YOU UPDATED THIS DOCUMENT AND THIS STATEMENT IS COMMENTED OUT, make sure you uncomment it again so QE can review your updates.
+include::snippets/mobb-support-statement.adoc[leveloffset=+1]
+
+//TODO: Write a few sentences about what the people who use this doc want to accomplish and what that might let them do.
+Introductory sentence or two about this whole file and why a customer might want to follow these steps.
+
+//TODO: Write up the information you want to publish for customers.
+//This documentation uses basic asciidoc syntax.
+
+//TODO: replace anything in angle brackets and add your content below
+[id='cloud-experts--tutorial-']
+==
+
+Introductory sentence about the purpose of this section.
+
+Steps or information that the customer should follow.
+
+Check out the syntax guide below for examples of what you can add here.
+
+//TODO: replace anything in angle brackets and add your content below
+[id='cloud-experts--tutorial-']
+==
+
+Introductory sentence about the purpose of this section.
+
+Steps or information that a customer should follow.
+
+Check out the syntax guide below for examples of what you can add here.
+
+//TODO: Provide links to any next steps or additional information the user is likely to want at this point.
+//TODO: If no other resources are likely to be needed, just delete this block.
+[id='cloud-experts--tutorial-additional-resources']
+== Additional resources
+* link:https://cloud.redhat.com/experts/rosa/verify-permissions[Verify required permissions for a ROSA STS deployment]
+* link:https://cloud.redhat.com/experts/rosa/ecr[Configure a ROSA cluster to pull images from AWS Elastic Container Registry]
+
+//TODO: When you are finished writing your tutorial, delete everything below this line.
+// These are just some basic syntax examples so you can copy and paste easily.
+== AsciiDoc Syntax Basics
+
+For more information, refer to link:https://asciidoctor.org/docs/asciidoc-writers-guide/[AsciiDoc Writer's Guide]
+
+=== Headings
+
+To add a heading, use equals signs at the beginning of the line:
+
+[source]
+----
+== Second level heading text
+=== Third level heading text
+----
+
+Up to 5 levels of heading are available, but this usually won't be required.
+
+.Block element heading
+You can also add a small header to a block element by adding a full-stop at the start of the line. Do not add a space or this will become an ordered list item.
+
+[source]
+----
+.Block element heading
+----
+
+=== Paragraphs, links, and inline elements
+
+A paragraph is just a plain line of text like this.
+A single line break will be rendered as part of the same paragraph.
+
+A double line break will be rendered as a separate paragraph.
+
+You can _emphasize_ or *strengthen* text in a paragraph, or add a link to a link:http://www.redhat.com[Red Hat website].
+
+[source]
+----
+You can _emphasize_ or *strengthen* text in a paragraph, or add a link to a link:http://www.redhat.com[Red Hat website].
+----
+
+Link to non-Red Hat websites if necessary, but remember they won't necessarily open in a new tab, and might lead users away from your document.
+
+=== Lists
+
+Unordered lists use asterisks followed by a space:
+
+.My unordered list
+* First level list item
+** Use multiple asterisks to indent second level list items
+* Another first level list item
+
+[source]
+----
+.My unordered list
+* First level list item
+** Use multiple asterisks to indent second level list items
+* Another first level list item
+----
+
+Ordered lists use full stops followed by a space:
+
+.My ordered list
+. Step one
+.. Step 1.a.
+. Step two
+
+[source]
+----
+.My ordered list
+. Step one
+.. Step 1.a.
+. Step two
+----
+
+You can also use two colons to get a term and definition list:
+
+.Important definitions
+term:: definition goes here
+cat:: A cute and cuddly carnivorous companion animal with hidden foot knives.
+dog:: A cute and cuddly omnivorous companion animal with clearly visible foot knives.
+
+[source]
+----
+.Important definitions
+term:: definition goes here
+cat:: A cute and cuddly carnivorous companion animal with hidden foot knives.
+dog:: A cute and cuddly omnivorous companion animal with clearly visible foot knives.
+----
+
+=== Code
+
+You can use `backticks` to indicate a literal value inline, but for commands, code, and terminal output you want a code block:
+
+[source,terminal]
+----
+$ rosa version
+1.2.23
+I: There is a newer release version '1.2.26', please consider updating: https://mirror.openshift.com/pub/openshift-v4/clients/rosa/latest/
+----
+
+[source]
+------
+[source,terminal]
+----
+$ rosa version
+1.2.23
+I: There is a newer release version '1.2.26', please consider updating: https://mirror.openshift.com/pub/openshift-v4/clients/rosa/latest/
+----
+------
+
+The `[source]` part ahead of the code block specifies the kind of code in the block, and enables syntax highlighting based on the language provided after the comma.
+
+
+=== Tables
+
+Tables use a combination of pipes and equals signs for their basic structure. If you want to include more complex content, you can add an `a` before the pipe indicating the start of the complex cell, and use other kinds of asciidoc syntax in that cell as required.
+
+.Tables need a title
+|===
+|Left column |Right column
+
+| Row 1 left column cell
+| Row 1 right column cell
+
+a| Row 2 left column cell
+
+[NOTE]
+====
+With a note!
+====
+
+| Row 2 right column cell
+
+|===
+
+[source]
+------
+.Tables need a title
+|===
+|Left column |Right column
+
+| Row 1 left column cell
+| Row 1 right column cell
+
+a| Row 2 left column cell
+
+[NOTE]
+=====
+With a note!
+====
+
+| Row 2 right column cell
+
+|===
+------
+
+=== Images
+
+Place the image you want to use in the `cloud_experts_tutorials/images` sub-directory, and use only the file name in the `image::` block (not the relative path). For accessibility support, add a title for the image and describe the contents of the image. We recommend using PNG and SVG image formats.
+
+.The perspectives menu in OpenShift Web Console
+image::web_console_perspectives.png[The perspectives menu in OpenShift Web Console showing the Developer and Administrator persoectives]
+
+[source]
+----
+.The perspectives menu in OpenShift Web Console
+image::web_console_perspectives.png[The perspectives menu in OpenShift Web Console showing the Developer and Administrator persoectives]
+----
+
+=== Warnings and admonition blocks
+
+If you want to highlight some information you can place it in a callout block.
+Only do this for important things a user might miss, or our users will start ignoring them.
+
+[NOTE]
+====
+You can use other elements inside an admonition, but don't go overboard.
+We typically use three kinds:
+
+* NOTE is for information that is useful, and can often be just a regular sentence instead.
+* IMPORTANT is for information that a customer should be aware of but won't cause serious problems if it's ignored.
+* WARNING is for information that a customer needs to obey to avoid data loss or other critical failures.
+====
+
+[source]
+----
+
+[NOTE]
+====
+You can use other elements inside an admonition, but don't go overboard.
+We typically use three kinds:
+
+* NOTE is for information that is useful to know, and can often be just a regular sentence instead.
+* IMPORTANT is for information that a customer should be aware of but won't cause serious problems if it's ignored.
+* WARNING is for information that a customer needs to obey to avoid data loss or other critical failures.
+====
+----
+
+=== Combining block level elements
+
+You can chain multiple block-level asciidoc elements together with the plus sign. You'll most often do this with steps and code blocks, so that the code block stays at the same indent level as the instruction text, for example:
+
+. Open a terminal and log in to the ROSA CLI using your personal token:
++
+[source,terminal]
+----
+$ rosa login --token=
+----
+
+. Check your ROSA CLI version:
++
+[source,terminal]
+----
+$ rosa version
+----
+
+[source]
+------
+
+. Open a terminal and log in to the ROSA CLI using your personal token:
++
+[source,terminal]
+----
+$ rosa login --token=
+----
+. Check your ROSA CLI version:
++
+[source,terminal]
+----
+$ rosa version
+----
+------
\ No newline at end of file
diff --git a/cloud_experts_tutorials/cloud-experts-using-alb-and-waf.adoc b/cloud_experts_tutorials/cloud-experts-using-alb-and-waf.adoc
new file mode 100644
index 000000000000..4227ab842be8
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-using-alb-and-waf.adoc
@@ -0,0 +1,443 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="cloud-experts-using-alb-and-waf"]
+= Tutorial: Using AWS WAF and AWS ALBs to protect ROSA workloads
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: cloud-experts-using-alb-and-waf
+
+toc::[]
+
+// Mobb content metadata
+// Brought into ROSA product docs 2023-09-21
+// ---
+// date: '2021-06-17'
+// title: AWS ALB
+// aliases: ['/docs/aws/waf/cloud-front.md']
+// tags: ["AWS", "ROSA", "OSD"]
+// authors:
+// - 'Connor Wooley'
+// ---
+
+AWS WAF is a web application firewall that lets you monitor the HTTP and HTTPS requests that are forwarded to your protected web application resources.
+
+You can use an AWS Application Load Balancer (ALB) to add a Web Application Firewall (WAF) to your {product-title} (ROSA) workloads. Using an external solution protects ROSA resources from experiencing denial of service due to handling the WAF.
+
+[NOTE]
+====
+It is recommended that you use the xref:../cloud_experts_tutorials/cloud-experts-using-cloudfront-and-waf.adoc#cloud-experts-using-cloudfront-and-waf[CloudFront method] unless you absolutely must use an ALB based solution.
+====
+
+//[Here](https://iamondemand.com/blog/elb-vs-alb-vs-nlb-choosing-the-best-aws-load-balancer-for-your-needs/)'s a good overview of AWS LB types and what they support
+
+// Loosely based off EKS instructions here - https://aws.amazon.com/premiumsupport/knowledge-center/eks-alb-ingress-aws-waf/
+
+[id="prerequisites_{context}"]
+== Prerequisites
+
+[NOTE]
+====
+AWS ALBs require a multi-AZ cluster, as well as three public subnets split across three AZs in the same VPC as the cluster.
+====
+
+* xref:../rosa_install_access_delete_clusters/rosa-sts-creating-a-cluster-quickly.adoc#rosa-sts-creating-a-cluster-quickly[A multi-AZ ROSA Classic cluster].
+* You have access to the OpenShift CLI (`oc`).
+* You have access to the AWS CLI (`aws`).
+
+[id="environment-setup_{context}"]
+=== Environment setup
+
+* Prepare the environment variables:
++
+[source,terminal]
+----
+$ export AWS_PAGER=""
+$ export CLUSTER_NAME=$(oc get infrastructure cluster -o=jsonpath="{.status.infrastructureName}" | sed 's/-[a-z0-9]\{5\}$//')
+$ export REGION=$(oc get infrastructure cluster -o=jsonpath="{.status.platformStatus.aws.region}")
+$ export OIDC_ENDPOINT=$(oc get authentication.config.openshift.io cluster -o jsonpath='{.spec.serviceAccountIssuer}' | sed 's|^https://||')
+$ export AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text)
+$ export SCRATCH="/tmp/${CLUSTER_NAME}/alb-waf"
+$ mkdir -p ${SCRATCH}
+$ echo "Cluster: ${CLUSTER_NAME}, Region: ${REGION}, OIDC Endpoint: ${OIDC_ENDPOINT}, AWS Account ID: ${AWS_ACCOUNT_ID}"
+----
+
+[id="aws-vpc-and-subnets_{context}"]
+=== AWS VPC and subnets
+
+[NOTE]
+====
+This section only applies to clusters that were deployed into existing VPCs. If you did not deploy your cluster into an existing VPC, skip this section and proceed to the installation section below.
+====
+
+. Set the below variables to the proper values for your ROSA deployment:
++
+[source,terminal]
+----
+$ export VPC_ID=
+$ export PUBLIC_SUBNET_IDS=
+$ export PRIVATE_SUBNET_IDS=
+----
++
+. Add a tag to your cluster's VPC with the cluster name:
++
+[source,terminal]
+----
+$ aws ec2 create-tags --resources ${VPC_ID} --tags Key=kubernetes.io/cluster/${CLUSTER_NAME},Value=owned --region ${REGION}
+----
++
+. Add a tag to your public subnets:
++
+[source,terminal]
+----
+$ aws ec2 create-tags \
+ --resources ${PUBLIC_SUBNET_IDS} \
+ --tags Key=kubernetes.io/role/elb,Value='' \
+ --region ${REGION}
+----
++
+. Add a tag to your private subnets:
++
+[source,terminal]
+----
+$ aws ec2 create-tags \
+ --resources "${PRIVATE_SUBNET_IDS}" \
+ --tags Key=kubernetes.io/role/internal-elb,Value='' \
+ --region ${REGION}
+----
+
+[id="deploy-aws-load-balancer-operator_{context}"]
+== Deploy the AWS Load Balancer Operator
+
+The link:https://github.com/openshift/aws-load-balancer-operator[AWS Load Balancer Operator] is used to used to install, manage and configure an instance of `aws-load-balancer-controller` in a ROSA cluster. To deploy ALBs in ROSA, we need to first deploy the AWS Load Balancer Operator.
+
+. Create an AWS IAM policy for the AWS Load Balancer Controller:
++
+[NOTE]
+====
+The policy is sourced from link:https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.4.4/docs/install/iam_policy.json[the upstream AWS Load Balancer Controller policy] plus permission to create tags on subnets. This is required by the operator to function.
+====
++
+[source,terminal]
+----
+$ oc new-project aws-load-balancer-operator
+$ POLICY_ARN=$(aws iam list-policies --query \
+ "Policies[?PolicyName=='aws-load-balancer-operator-policy'].{ARN:Arn}" \
+ --output text)
+$ if [[ -z "${POLICY_ARN}" ]]; then
+ wget -O "${SCRATCH}/load-balancer-operator-policy.json" \
+ https://raw.githubusercontent.com/rh-mobb/documentation/main/content/docs/rosa/aws-load-balancer-operator/load-balancer-operator-policy.json
+ POLICY_ARN=$(aws --region "$REGION" --query Policy.Arn \
+ --output text iam create-policy \
+ --policy-name aws-load-balancer-operator-policy \
+ --policy-document "file://${SCRATCH}/load-balancer-operator-policy.json")
+fi
+$ echo $POLICY_ARN
+----
++
+. Create an AWS IAM trust policy for AWS Load Balancer Operator:
++
+[source,terminal]
+----
+$ cat < "${SCRATCH}/trust-policy.json"
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Condition": {
+ "StringEquals" : {
+ "${OIDC_ENDPOINT}:sub": ["system:serviceaccount:aws-load-balancer-operator:aws-load-balancer-operator-controller-manager", "system:serviceaccount:aws-load-balancer-operator:aws-load-balancer-controller-cluster"]
+ }
+ },
+ "Principal": {
+ "Federated": "arn:aws:iam::$AWS_ACCOUNT_ID:oidc-provider/${OIDC_ENDPOINT}"
+ },
+ "Action": "sts:AssumeRoleWithWebIdentity"
+ }
+ ]
+}
+EOF
+----
++
+. Create an AWS IAM role for the AWS Load Balancer Operator:
++
+[source,terminal]
+----
+$ ROLE_ARN=$(aws iam create-role --role-name "${CLUSTER_NAME}-alb-operator" \
+ --assume-role-policy-document "file://${SCRATCH}/trust-policy.json" \
+ --query Role.Arn --output text)
+$ echo $ROLE_ARN
+
+$ aws iam attach-role-policy --role-name "${CLUSTER_NAME}-alb-operator" \
+ --policy-arn $POLICY_ARN
+----
++
+. Create a secret for the AWS Load Balancer Operator to assume our newly created AWS IAM role:
++
+[source,terminal]
+----
+$ cat << EOF | oc apply -f -
+apiVersion: v1
+kind: Secret
+metadata:
+ name: aws-load-balancer-operator
+ namespace: aws-load-balancer-operator
+stringData:
+ credentials: |
+ [default]
+ role_arn = $ROLE_ARN
+ web_identity_token_file = /var/run/secrets/openshift/serviceaccount/token
+EOF
+----
++
+. Install the Red Hat AWS Load Balancer Operator:
++
+[source,terminal]
+----
+$ cat << EOF | oc apply -f -
+apiVersion: operators.coreos.com/v1
+kind: OperatorGroup
+metadata:
+ name: aws-load-balancer-operator
+ namespace: aws-load-balancer-operator
+spec:
+ upgradeStrategy: Default
+---
+apiVersion: operators.coreos.com/v1alpha1
+kind: Subscription
+metadata:
+ name: aws-load-balancer-operator
+ namespace: aws-load-balancer-operator
+spec:
+ channel: stable-v1.0
+ installPlanApproval: Automatic
+ name: aws-load-balancer-operator
+ source: redhat-operators
+ sourceNamespace: openshift-marketplace
+ startingCSV: aws-load-balancer-operator.v1.0.0
+EOF
+----
++
+. Deploy an instance of the AWS Load Balancer Controller using the operator:
++
+[NOTE]
+====
+If you get an error here wait a minute and try again, it means the Operator has not completed installing yet.
+====
++
+[source,terminal]
+----
+$ cat << EOF | oc apply -f -
+apiVersion: networking.olm.openshift.io/v1
+kind: AWSLoadBalancerController
+metadata:
+ name: cluster
+spec:
+ credentials:
+ name: aws-load-balancer-operator
+ enabledAddons:
+ - AWSWAFv2
+EOF
+----
++
+. Check the that the operator and controller pods are both running:
++
+[source,terminal]
+----
+$ oc -n aws-load-balancer-operator get pods
+----
++
+You should see the following, if not wait a moment and retry:
++
+[source,terminal]
+----
+NAME READY STATUS RESTARTS AGE
+aws-load-balancer-controller-cluster-6ddf658785-pdp5d 1/1 Running 0 99s
+aws-load-balancer-operator-controller-manager-577d9ffcb9-w6zqn 2/2 Running 0 2m4s
+----
+
+[id="deploy-sample-application_{context}"]
+== Deploy a sample application
+
+. Create a new project for our sample application:
++
+[source,terminal]
+----
+$ oc new-project hello-world
+----
++
+. Deploy a hello world application:
++
+[source,terminal]
+----
+$ oc new-app -n hello-world --image=docker.io/openshift/hello-openshift
+----
++
+. Convert the pre-created service resource to a NodePort service type:
++
+[source,terminal]
+----
+$ oc -n hello-world patch service hello-openshift -p '{"spec":{"type":"NodePort"}}'
+----
++
+. Deploy an AWS ALB using the AWS Load Balancer Operator:
++
+[source,terminal]
+----
+$ cat << EOF | oc apply -f -
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: hello-openshift-alb
+ namespace: hello-world
+ annotations:
+ alb.ingress.kubernetes.io/scheme: internet-facing
+spec:
+ ingressClassName: alb
+ rules:
+ - http:
+ paths:
+ - path: /
+ pathType: Exact
+ backend:
+ service:
+ name: hello-openshift
+ port:
+ number: 8080
+EOF
+----
++
+. Curl the AWS ALB Ingress endpoint to verify the hello world application is accessible:
++
+[NOTE]
+====
+AWS ALB provisioning takes a few minutes. If you receive an error that says `curl: (6) Could not resolve host`, please wait and try again.
+====
++
+[source,terminal]
+----
+$ INGRESS=$(oc -n hello-world get ingress hello-openshift-alb -o jsonpath='{.status.loadBalancer.ingress[0].hostname}')
+$ curl "http://${INGRESS}"
+----
++
+.Example output
+[source,text]
+----
+Hello OpenShift!
+----
+
+[id="configure-aws-waf_{context}"]
+=== Configure the AWS WAF
+
+The link:https://aws.amazon.com/waf/[AWS WAF] service is a web application firewall that lets you monitor, protect, and control the HTTP and HTTPS requests that are forwarded to your protected web application resources, like ROSA.
+
+. Create a AWS WAF rules file to apply to our web ACL:
++
+[source,terminal]
+----
+$ cat << EOF > ${SCRATCH}/waf-rules.json
+[
+ {
+ "Name": "AWS-AWSManagedRulesCommonRuleSet",
+ "Priority": 0,
+ "Statement": {
+ "ManagedRuleGroupStatement": {
+ "VendorName": "AWS",
+ "Name": "AWSManagedRulesCommonRuleSet"
+ }
+ },
+ "OverrideAction": {
+ "None": {}
+ },
+ "VisibilityConfig": {
+ "SampledRequestsEnabled": true,
+ "CloudWatchMetricsEnabled": true,
+ "MetricName": "AWS-AWSManagedRulesCommonRuleSet"
+ }
+ },
+ {
+ "Name": "AWS-AWSManagedRulesSQLiRuleSet",
+ "Priority": 1,
+ "Statement": {
+ "ManagedRuleGroupStatement": {
+ "VendorName": "AWS",
+ "Name": "AWSManagedRulesSQLiRuleSet"
+ }
+ },
+ "OverrideAction": {
+ "None": {}
+ },
+ "VisibilityConfig": {
+ "SampledRequestsEnabled": true,
+ "CloudWatchMetricsEnabled": true,
+ "MetricName": "AWS-AWSManagedRulesSQLiRuleSet"
+ }
+ }
+]
+EOF
+----
++
+This will enable the Core (Common) and SQL AWS Managed Rule Sets.
++
+. Create an AWS WAF Web ACL using the rules we specified above:
++
+[source,terminal]
+----
+$ WAF_ARN=$(aws wafv2 create-web-acl \
+ --name ${CLUSTER_NAME}-waf \
+ --region ${REGION} \
+ --default-action Allow={} \
+ --scope REGIONAL \
+ --visibility-config SampledRequestsEnabled=true,CloudWatchMetricsEnabled=true,MetricName=${CLUSTER_NAME}-waf-metrics \
+ --rules file://${SCRATCH}/waf-rules.json \
+ --query 'Summary.ARN' \
+ --output text)
+----
++
+. Annotate the Ingress resource with the AWS WAF Web ACL ARN:
++
+[source,terminal]
+----
+$ oc annotate -n hello-world ingress.networking.k8s.io/hello-openshift-alb \
+ alb.ingress.kubernetes.io/wafv2-acl-arn=${WAF_ARN}
+----
+
+. Wait for 10 seconds for the rules to propagate and test that the app still works:
++
+[source,terminal]
+----
+$ curl "http://${INGRESS}"
+----
++
+.Example output
+[source,text]
+----
+Hello OpenShift!
+----
+
+. Test that the WAF denies a bad request:
++
+[source,terminal]
+----
+$ curl -X POST "http://${INGRESS}" \
+ -F "user=''"
+----
++
+.Example output
++
+[source,text]
+----
+
+403 Forbidden
+
+403 Forbidden
+
+ "${SCRATCH}/trust-policy.json"
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Condition": {
+ "StringEquals" : {
+ "${OIDC_ENDPOINT}:sub": "system:serviceaccount:ack-system:${ACK_SERVICE_ACCOUNT}"
+ }
+ },
+ "Principal": {
+ "Federated": "arn:aws:iam::$AWS_ACCOUNT_ID:oidc-provider/${OIDC_ENDPOINT}"
+ },
+ "Action": "sts:AssumeRoleWithWebIdentity"
+ }
+ ]
+}
+EOF
+----
++
+. Create an AWS IAM role for the ACK Operator to assume with the `AmazonS3FullAccess` policy attached:
++
+[NOTE]
+====
+You can find the recommended policy in each project's GitHub repository, for example https://github.com/aws-controllers-k8s/s3-controller/blob/main/config/iam/recommended-policy-arn.
+====
++
+[source,terminal]
+----
+$ ROLE_ARN=$(aws iam create-role --role-name "ack-${ACK_SERVICE}-controller" \
+ --assume-role-policy-document "file://${SCRATCH}/trust-policy.json" \
+ --query Role.Arn --output text)
+$ echo $ROLE_ARN
+
+$ aws iam attach-role-policy --role-name "ack-${ACK_SERVICE}-controller" \
+ --policy-arn ${POLICY_ARN}
+----
+
+[id="cloud-experts-using-aws-ack-install-ack"]
+== Installing the ACK S3 Controller
+
+. Create a project to install the ACK S3 Operator into:
++
+[source,terminal]
+----
+$ oc new-project ack-system
+----
++
+. Create a file with the ACK S3 Operator configuration:
++
+[NOTE]
+====
+`ACK_WATCH_NAMESPACE` is purposefully left blank so the controller can properly watch all namespaces in the cluster.
+====
++
+[source,terminal]
+----
+$ cat < "${SCRATCH}/config.txt"
+ACK_ENABLE_DEVELOPMENT_LOGGING=true
+ACK_LOG_LEVEL=debug
+ACK_WATCH_NAMESPACE=
+AWS_REGION=${REGION}
+AWS_ENDPOINT_URL=
+ACK_RESOURCE_TAGS=${CLUSTER_NAME}
+ENABLE_LEADER_ELECTION=true
+LEADER_ELECTION_NAMESPACE=
+EOF
+----
++
+. Use the file from the previous step to create a ConfigMap:
++
+[source,terminal]
+----
+$ oc -n ack-system create configmap \
+ --from-env-file=${SCRATCH}/config.txt ack-${ACK_SERVICE}-user-config
+----
++
+. Install the ACK S3 Operator from OperatorHub:
++
+[source,terminal]
+----
+$ cat << EOF | oc apply -f -
+apiVersion: operators.coreos.com/v1
+kind: OperatorGroup
+metadata:
+ name: ack-${ACK_SERVICE}-controller
+ namespace: ack-system
+spec:
+ upgradeStrategy: Default
+---
+apiVersion: operators.coreos.com/v1alpha1
+kind: Subscription
+metadata:
+ name: ack-${ACK_SERVICE}-controller
+ namespace: ack-system
+spec:
+ channel: alpha
+ installPlanApproval: Automatic
+ name: ack-${ACK_SERVICE}-controller
+ source: community-operators
+ sourceNamespace: openshift-marketplace
+EOF
+----
++
+. Annotate the ACK S3 Operator service account with the AWS IAM role to assume and restart the deployment:
++
+[source,terminal]
+----
+$ oc -n ack-system annotate serviceaccount ${ACK_SERVICE_ACCOUNT} \
+ eks.amazonaws.com/role-arn=${ROLE_ARN} && \
+ oc -n ack-system rollout restart deployment ack-${ACK_SERVICE}-controller
+----
++
+. Verify that the ACK S3 Operator is running:
++
+[source,terminal]
+----
+$ oc -n ack-system get pods
+----
+.Example output
++
+[source,text]
+----
+NAME READY STATUS RESTARTS AGE
+ack-s3-controller-585f6775db-s4lfz 1/1 Running 0 51s
+----
+
+[id="cloud-experts-using-aws-ack-valid-deploy"]
+== Validating the deployment
+
+. Deploy an S3 bucket resource:
++
+[source,terminal]
+----
+$ cat << EOF | oc apply -f -
+apiVersion: s3.services.k8s.aws/v1alpha1
+kind: Bucket
+metadata:
+ name: ${CLUSTER-NAME}-bucket
+ namespace: ack-system
+spec:
+ name: ${CLUSTER-NAME}-bucket
+EOF
+----
++
+. Verify the S3 bucket was created in AWS:
++
+[source,terminal]
+----
+$ aws s3 ls | grep ${CLUSTER_NAME}-bucket
+----
++
+.Example output
+[source,text]
+----
+2023-10-04 14:51:45 mrmc-test-maz-bucket
+----
+
+[id="cloud-experts-using-aws-ack-clean-up"]
+== Cleaning up
+
+. Delete the S3 bucket resource:
++
+[source,terminal]
+----
+$ oc -n ack-system delete bucket.s3.services.k8s.aws/${CLUSTER-NAME}-bucket
+----
++
+. Delete the ACK S3 Operator and the AWS IAM roles:
++
+[source,terminal]
+----
+$ oc -n ack-system delete subscription ack-${ACK_SERVICE}-controller
+$ aws iam detach-role-policy \
+ --role-name "ack-${ACK_SERVICE}-controller" \
+ --policy-arn ${POLICY_ARN}
+$ aws iam delete-role \
+ --role-name "ack-${ACK_SERVICE}-controller"
+----
++
+. Delete the `ack-system` project:
++
+[source,terminal]
+----
+$ oc delete project ack-system
+----
diff --git a/cloud_experts_tutorials/cloud-experts-using-cloudfront-and-waf.adoc b/cloud_experts_tutorials/cloud-experts-using-cloudfront-and-waf.adoc
new file mode 100644
index 000000000000..ab876415ad50
--- /dev/null
+++ b/cloud_experts_tutorials/cloud-experts-using-cloudfront-and-waf.adoc
@@ -0,0 +1,359 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="cloud-experts-using-cloudfront-and-waf"]
+= Tutorial: Using AWS WAF and Amazon CloudFront to protect ROSA workloads
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: cloud-experts-using-cloudfront-and-waf
+
+toc::[]
+
+// Mobb content metadata
+// Brought into ROSA product docs 2023-09-21
+// ---
+// date: '2021-06-17'
+// title: Using CloudFront + WAF
+// aliases: ['/docs/aws/waf/cloud-front.md']
+// tags: ["AWS", "ROSA"]
+// authors:
+// - 'Connor Wooley'
+// ---
+
+AWS WAF is a web application firewall that lets you monitor the HTTP and HTTPS requests that are forwarded to your protected web application resources.
+
+You can use an Amazon CloudFront to add a Web Application Firewall (WAF) to your {product-title} (ROSA) workloads. Using an external solution protects ROSA resources from experiencing denial of service due to handling the WAF.
+
+[id="prerequisites_{context}"]
+== Prerequisites
+
+* xref:../rosa_install_access_delete_clusters/rosa-sts-creating-a-cluster-quickly.adoc#rosa-sts-creating-a-cluster-quickly[A ROSA Classic cluster].
+* You have access to the OpenShift CLI (`oc`).
+* You have access to the AWS CLI (`aws`).
+
+[id="environment-setup_{context}"]
+=== Environment setup
+
+* Prepare the environment variables:
++
+[source,terminal]
+----
+$ export AWS_PAGER=""
+$ export CLUSTER_NAME=$(oc get infrastructure cluster -o=jsonpath="{.status.infrastructureName}" | sed 's/-[a-z0-9]\{5\}$//')
+$ export REGION=$(oc get infrastructure cluster -o=jsonpath="{.status.platformStatus.aws.region}")
+$ export AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text)
+$ export SCRATCH="/tmp/${CLUSTER_NAME}/cloudfront-waf"
+$ mkdir -p ${SCRATCH}
+$ echo "Cluster: ${CLUSTER_NAME}, Region: ${REGION}, AWS Account ID: ${AWS_ACCOUNT_ID}"
+----
+
+[id="custom_domain_setup{context}"]
+== Custom domain setup
+
+It is necessary to configure a secondary ingress controller to segment your external WAF-protected traffic from your standard (and default) cluster ingress controller. In ROSA, we do this using the Custom Domain Operator.
+
+.Prerequisites
+
+* A unique domain, such as `*.apps..io`
+* A custom SAN or wildcard certificate, such as `CN=*.apps..io`
+
+.Procedure
+
+. Create a new project
++
+[source,terminal]
+----
+$ oc new-project waf-demo
+----
+
+. Create a new TLS secret from a private key and a public certificate, where `fullchain.pem` is your full wildcard certificate chain (including any intermediaries) and `privkey.pem` is your wildcard certificate's private key.
++
+.Example
+[source,terminal]
+----
+$ oc -n waf-demo create secret tls waf-tls --cert=fullchain.pem --key=privkey.pem
+----
+
+. Create a new `CustomDomain` custom resource (CR):
++
+.Example `waf-custom-domain.yaml`
+[source,yaml]
+----
+apiVersion: managed.openshift.io/v1alpha1
+kind: CustomDomain
+metadata:
+ name: cloudfront-waf
+spec:
+ domain: apps..io <1>
+ scope: External
+ loadBalancerType: NLB
+ certificate:
+ name: waf-tls
+ namespace: waf-demo
+ routeSelector: <2>
+ matchLabels:
+ route: waf
+----
+<1> The custom domain.
+<2> Filters the set of routes serviced by the CustomDomain ingress. In this tutorial, we will use the `waf` route selector, but if no value was to be provided, no filtering would occur.
+
+. Apply the CR:
++
+.Example
+[source,terminal]
+----
+$ oc apply -f waf-custom-domain.yaml
+----
+
+. Verify that your custom domain ingress controller has been deployed and is `Ready`:
++
+[source,terminal]
+----
+$ oc get customdomains
+----
++
+.Example output
+[source,terminal]
+----
+NAME ENDPOINT DOMAIN STATUS
+cloudfront-waf xxrywp..cluster-01.opln.s1.openshiftapps.com *.apps..io Ready
+----
+
+[id="configure-aws-waf_{context}"]
+=== Configure the AWS WAF
+
+The link:https://aws.amazon.com/waf/[AWS WAF] service is a web application firewall that lets you monitor, protect, and control the HTTP and HTTPS requests that are forwarded to your protected web application resources, like ROSA.
+
+. Create a AWS WAF rules file to apply to our web ACL:
++
+[source,terminal]
+----
+$ cat << EOF > ${SCRATCH}/waf-rules.json
+[
+ {
+ "Name": "AWS-AWSManagedRulesCommonRuleSet",
+ "Priority": 0,
+ "Statement": {
+ "ManagedRuleGroupStatement": {
+ "VendorName": "AWS",
+ "Name": "AWSManagedRulesCommonRuleSet"
+ }
+ },
+ "OverrideAction": {
+ "None": {}
+ },
+ "VisibilityConfig": {
+ "SampledRequestsEnabled": true,
+ "CloudWatchMetricsEnabled": true,
+ "MetricName": "AWS-AWSManagedRulesCommonRuleSet"
+ }
+ },
+ {
+ "Name": "AWS-AWSManagedRulesSQLiRuleSet",
+ "Priority": 1,
+ "Statement": {
+ "ManagedRuleGroupStatement": {
+ "VendorName": "AWS",
+ "Name": "AWSManagedRulesSQLiRuleSet"
+ }
+ },
+ "OverrideAction": {
+ "None": {}
+ },
+ "VisibilityConfig": {
+ "SampledRequestsEnabled": true,
+ "CloudWatchMetricsEnabled": true,
+ "MetricName": "AWS-AWSManagedRulesSQLiRuleSet"
+ }
+ }
+]
+EOF
+----
++
+This will enable the Core (Common) and SQL AWS Managed Rule Sets.
++
+. Create an AWS WAF Web ACL using the rules we specified above:
++
+[source,terminal]
+----
+$ WAF_WACL=$(aws wafv2 create-web-acl \
+ --name cloudfront-waf \
+ --region ${REGION} \
+ --default-action Allow={} \
+ --scope CLOUDFRONT \
+ --visibility-config SampledRequestsEnabled=true,CloudWatchMetricsEnabled=true,MetricName=${CLUSTER_NAME}-waf-metrics \
+ --rules file://${SCRATCH}/waf-rules.json \
+ --query 'Summary.Name' \
+ --output text)
+----
+
+[id="configure_amazon_cloudfront_{context}"]
+== Configure Amazon CloudFront
+
+. Retrieve the newly created custom domain ingress controller's NLB hostname:
++
+[source,terminal]
+----
+$ NLB=$(oc -n openshift-ingress get service router-cloudfront-waf \
+ -o jsonpath='{.status.loadBalancer.ingress[0].hostname}')
+$ echo "Origin domain: ${NLB}"
+----
+
+. Import your certificate into Amazon Certificate Manager, where `cert.pem` is your wildcard certificate, `fullchain.pem` is your wildcard certificate's chain and `privkey.pem` is your wildcard certificate’s private key.
++
+[NOTE]
+====
+Regardless of what region your cluster is deployed, you must import this certificate to `us-east-1` as Amazon CloudFront is a global AWS service.
+====
++
+.Example
+[source,terminal]
+----
+$ aws acm import-certificate --certificate file://cert.pem \
+ --certificate-chain file://fullchain.pem \
+ --private-key file://privkey.pem \
+ --region us-east-1
+----
+
+. Log into the link:https://us-east-1.console.aws.amazon.com/cloudfront/v3/home#/distributions/create[AWS console] to create a CloudFront distribution.
++
+. Configure the CloudFront distribution by using the following information:
++
+[NOTE]
+====
+If an option is not specified in the table below, leave them the default (which may be blank).
+====
++
+[cols="2",options="header"]
+|===
+|Option
+|Value
+
+|Origin domain
+|Output from the command above ^[1]^
+
+|Name
+|rosa-waf-ingress ^[2]^
+
+|Viewer protocol policy
+|Redirect HTTP to HTTPS
+
+|Allowed HTTP methods
+|GET, HEAD, OPTIONS, PUT, POST, PATCH, DELETE
+
+|Cache policy
+|CachingDisabled
+
+|Origin request policy
+|AllViewer
+
+|Web Application Firewall (WAF)
+|Enable security protections
+
+|Use existing WAF configuration
+|true
+
+|Choose a web ACL
+|`cloudfront-waf`
+
+|Alternate domain name (CNAME)
+|*.apps..io ^[3]^
+
+|Custom SSL certificate
+|Select the certificate you imported from the step above ^[4]^
+|===
++
+[.small]
+--
+1. Run `echo ${NLB}` to get the origin domain.
+2. If you have multiple clusters, ensure the origin name is unique.
+3. This should match the wildcard domain you used to create the custom domain ingress controller.
+4. This should match the alternate domain name entered above.
+--
++
+. Retrieve the Amazon CloudFront Distribution endpoint:
++
+[source,terminal]
+----
+$ aws cloudfront list-distributions --query "DistributionList.Items[?Origins.Items[?DomainName=='${NLB}']].DomainName" --output text
+----
+
+. Update the DNS of your custom wildcard domain with a CNAME to the Amazon CloudFront Distribution endpoint from the step above.
++
+.Example
+[source,text]
+----
+*.apps..io CNAME d1b2c3d4e5f6g7.cloudfront.net
+----
+
+[id="deploy-sample-application_{context}"]
+== Deploy a sample application
+
+. Deploy a hello world application:
++
+[source,terminal]
+----
+$ oc -n waf-demo new-app --image=docker.io/openshift/hello-openshift
+----
++
+. Create a route for the application specifying your custom domain name:
++
+.Example
+[source,terminal]
+----
+$ oc -n waf-demo create route edge --service=hello-openshift hello-openshift-tls \
+--hostname hello-openshift.apps..io
+----
++
+. Label the route to admit it to your custom domain ingress controller:
++
+[source,terminal]
+----
+$ oc -n waf-demo label route.route.openshift.io/hello-openshift-tls route=waf
+----
+
+[id="test-waf_{context}"]
+== Test the WAF
+
+. Test that the app is accessible behind Amazon CloudFront:
++
+.Example
++
+[source,terminal]
+----
+$ curl "https://hello-openshift.apps..io"
+----
++
+.Example output
+[source,text]
+----
+Hello OpenShift!
+----
+
+. Test that the WAF denies a bad request:
++
+.Example
++
+[source,terminal]
+----
+$ curl -X POST "https://hello-openshift.apps..io" \
+ -F "user=''"
+----
++
+.Example output
++
+[source,text]
+----
+
+403 Forbidden
+
+403 Forbidden
+
+
+
+
+If you prefer a more visual medium, you can watch [Steve Mirman](https://twitter.com/stevemirman) walk through this quickstart on [YouTube](https://www.youtube.com/watch?v=IFNig_Z_p2Y).
+VIDEO
+////
+
+== Prerequisites
+
+=== AWS
+
+You must have an AWS account with the link:https://console.aws.amazon.com/rosa/home?#/get-started[AWS ROSA Prerequisites] met.
+
+image::rosa-aws-pre.png[AWS console rosa requisites]
+
+**MacOS**
+
+//See [AWS Docs](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-mac.html) for alternative install options.
+
+* Install AWS CLI using the macOS command line:
+
+[source,bash]
+----
+$ curl "https://awscli.amazonaws.com/AWSCLIV2.pkg" -o "AWSCLIV2.pkg"
+sudo installer -pkg AWSCLIV2.pkg -target /
+----
+
+**Linux**
+
+// See [AWS Docs](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-linux.html) for alternative install options.
+
+* Install AWS CLI using the Linux command line:
+
+[source,bash]
+----
+$ curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
+unzip awscliv2.zip
+sudo ./aws/install
+----
+
+**Windows**
+
+// See [AWS Docs](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-windows.html) for alternative install options.
+
+* Install AWS CLI using the Windows command line
+
+[source,bash]
+----
+$ C:\> msiexec.exe /i https://awscli.amazonaws.com/AWSCLIV2.msi
+----
+
+////
+**Docker**
+
+> See [AWS Docs](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-docker.html) for alternative install options.
+
+1. To run the AWS CLI version 2 Docker image, use the docker run command.
+
+ ```bash
+ docker run --rm -it amazon/aws-cli command
+ ```
+////
+
+=== Prepare AWS Account for OpenShift
+
+* Configure the AWS CLI by running the following command:
+
+[source,bash]
+----
+$ aws configure
+----
+
+2. You will be required to enter an `AWS Access Key ID` and an `AWS Secret Access Key` along with a default region name and output format
+
+ ```bash
+ % aws configure
+ AWS Access Key ID []:
+ AWS Secret Access Key []:
+ Default region name [us-east-2]:
+ Default output format [json]:
+ ```
+ The `AWS Access Key ID` and `AWS Secret Access Key` values can be obtained by logging in to the AWS console and creating an **Access Key** in the **Security Credentials** section of the IAM dashboard for your user
+
+3. Validate your credentials
+
+ ```bash
+ aws sts get-caller-identity
+ ```
+
+ You should receive output similar to the following
+ ```
+ {
+ "UserId": ,
+ "Account": ,
+ "Arn":
+ }
+ ```
+
+4. If this is a brand new AWS account that has never had a AWS Load Balancer installed in it, you should run the following
+
+ ```bash
+ aws iam create-service-linked-role --aws-service-name \
+ "elasticloadbalancing.amazonaws.com"
+ ```
+
+### Get a Red Hat Offline Access Token
+
+1. Log into cloud.redhat.com
+
+2. Browse to https://cloud.redhat.com/openshift/token/rosa
+
+3. Copy the **Offline Access Token** and save it for the next step
+
+
+### Set up the OpenShift CLI (oc)
+
+1. Download the OS specific OpenShift CLI from [Red Hat](https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest/)
+
+2. Unzip the downloaded file on your local machine
+
+3. Place the extracted `oc` executable in your OS path or local directory
+
+
+### Set up the ROSA CLI
+
+1. Download the OS specific ROSA CLI from [Red Hat](https://www.openshift.com/products/amazon-openshift/download)
+
+2. Unzip the downloaded file on your local machine
+
+3. Place the extracted `rosa` and `kubectl` executables in your OS path or local directory
+
+4. Log in to ROSA
+
+ ```bash
+ rosa login
+ ```
+
+ You will be prompted to enter in the **Red Hat Offline Access Token** you retrieved earlier and should receive the following message
+
+ ```
+ Logged in as on 'https://api.openshift.com'
+ ```
+
+### Verify ROSA privileges
+
+Verify that ROSA has the minimal permissions
+
+ ```bash
+ rosa verify permissions
+ ```
+>Expected output: `AWS SCP policies ok`
+
+
+Verify that ROSA has the minimal quota
+
+ ```bash
+ rosa verify quota
+ ```
+>Expected output: `AWS quota ok`
+
+
+### Initialize ROSA
+
+Initialize the ROSA CLI to complete the remaining validation checks and configurations
+
+ ```bash
+ rosa init
+ ```
+
+## Deploy Red Hat OpenShift on AWS (ROSA)
+
+### Interactive Installation
+
+ROSA can be installed using command line parameters or in interactive mode. For an interactive installation run the following command
+
+ ```bash
+ rosa create cluster --interactive --mode auto
+ ```
+
+ As part of the interactive install you will be required to enter the following parameters or accept the default values (if applicable)
+
+ ```
+ Cluster name:
+ Multiple availability zones (y/N):
+ AWS region: (select)
+ OpenShift version: (select)
+ Install into an existing VPC (y/N):
+ Compute nodes instance type (optional): (select)
+ Enable autoscaling (y/N):
+ Compute nodes [2]:
+ Additional Security Group IDs (optional): (select)
+ Machine CIDR [10.0.0.0/16]:
+ Service CIDR [172.30.0.0/16]:
+ Pod CIDR [10.128.0.0/14]:
+ Host prefix [23]:
+ Private cluster (y/N):
+ ```
+ >Note: the installation process should take between 30 - 45 minutes
+
+### Get the web console link to the ROSA cluster
+
+To get the web console link run the following command.
+
+>Substitute your actual cluster name for ``
+
+ ```bash
+ rosa describe cluster --cluster=
+ ```
+
+### Create cluster-admin user
+
+By default, only the OpenShift SRE team will have access to the ROSA cluster. To add a local admin user, run the following command to create the `cluster-admin` account in your cluster.
+
+>Substitute your actual cluster name for ``
+
+ ```bash
+ rosa create admin --cluster=
+ ```
+>Refresh your web browser and you should see the `cluster-admin` option to log in
+
+## Delete Red Hat OpenShift on AWS (ROSA)
+
+Deleting a ROSA cluster consists of two parts
+
+1. Delete the cluster instance, including the removal of AWS resources.
+
+>Substitute your actual cluster name for ``
+
+ ```bash
+ rosa delete cluster --cluster=
+ ```
+ Delete Cluster's operator-roles and oidc-provider as shown in the above delete cluster command's output. For e.g.
+
+ ```bash
+ rosa delete operator-roles -c
+ rosa delete oidc-provider -c
+ ```
+
+2. Delete the CloudFormation stack, including the removal of the `osdCcsAdmin` user
+
+ ```bash
+ rosa init --delete-stack
+ ```
\ No newline at end of file
diff --git a/cloud_experts_tutorials/rosa-mobb-prerequisites-tutorial.adoc b/cloud_experts_tutorials/rosa-mobb-prerequisites-tutorial.adoc
new file mode 100644
index 000000000000..a1474b9faf81
--- /dev/null
+++ b/cloud_experts_tutorials/rosa-mobb-prerequisites-tutorial.adoc
@@ -0,0 +1,231 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="rosa-mobb-prerequisites-tutorial"]
+= Tutorial: ROSA prerequisites
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: rosa-mobb-prerequisites-tutorial
+
+toc::[]
+
+//Mobb content metadata
+//Brought into ROSA product docs 2023-09-18
+//---
+//date: '2021-06-10'
+//title: ROSA Prerequisites
+//weight: 1
+//tags: ["AWS", "ROSA", "Quickstarts"]
+//authors:
+// - Steve Mirman
+// - Paul Czarkowski
+//---
+//This file is not being built as of 2023-09-22 based on a conversation with Michael McNeill.
+
+This document contains a set of prerequisites that must be run once before you can create your first ROSA cluster.
+
+== AWS
+
+An AWS account with the link:https://console.aws.amazon.com/rosa/home?#/get-started[AWS ROSA prerequisites] met.
+
+
+image::rosa-aws-pre.png[AWS console ROSA prequisites]
+
+== AWS CLI
+
+.MacOS
+
+* Install AWS CLI using the MacOS command line:
++
+[source,terminal]
+----
+$ curl "https://awscli.amazonaws.com/AWSCLIV2.pkg" -o "AWSCLIV2.pkg"
+$ sudo installer -pkg AWSCLIV2.pkg -target /
+----
++
+[NOTE]
+====
+See link:https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-mac.html[AWS Documentation] for alternative install options.
+====
+
+.Linux
+
+* Install AWS CLI using the Linux command line:
++
+[source,terminal]
+----
+$ curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
+$ unzip awscliv2.zip
+$ sudo ./aws/install
+----
++
+[NOTE]
+====
+See link:https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-linux.html[AWS Documentation] for alternative install options.
+====
+
+.Windows
+
+* Install AWS CLI using the Windows command line:
++
+[source,terminal]
+----
+$ C:\> msiexec.exe /i https://awscli.amazonaws.com/AWSCLIV2.msi
+----
++
+[NOTE]
+====
+See link:https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-windows.html[AWS Documentation] for alternative install options.
+====
+
+////
+.Docker
+
+* To run the AWS CLI version 2 Docker image, use the docker run command:
++
+[source,terminal]
+----
+$ docker run --rm -it amazon/aws-cli command
+----
++
+[NOTE]
+====
+See link:https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-docker.html[AWS Documentation] for alternative install options.
+====
+////
+
+== Prepare AWS Account for OpenShift
+
+. Configure the AWS CLI by running:
++
+[source,terminal]
+----
+$ aws configure
+----
++
+. You will be required to enter an `AWS Access Key ID` and an `AWS Secret Access Key` along with a default region name and output format:
++
+[source,terminal]
+----
+$ aws configure
+----
++
+.Sample output
+[source,terminal]
+----
+AWS Access Key ID []:
+AWS Secret Access Key []:
+Default region name [us-east-2]:
+Default output format [json]:
+----
++
+The `AWS Access Key ID` and `AWS Secret Access Key` values can be obtained by logging in to the AWS console and creating an *Access Key* in the *Security Credentials* section of the IAM dashboard for your user.
++
+. Validate your credentials:
++
+[source,terminal]
+----
+$ aws sts get-caller-identity
+----
++
+You should receive output similar to the following:
++
+.Sample output
+[source,terminal]
+----
+{
+ "UserId": ,
+ "Account": ,
+ "Arn":
+}
+----
++
+. If this is a new AWS account that has never had a AWS Load Balancer (ALB) installed in it, run the following:
++
+[source,terminal]
+----
+$ aws iam create-service-linked-role --aws-service-name \
+ "elasticloadbalancing.amazonaws.com"
+----
+
+== Get a Red Hat Offline Access Token
+
+. Log into {cluster-manager-url}.
+. Navigate to link:https://cloud.redhat.com/openshift/token/rosa[OpenShift Cluster Manager API Token].
+. Copy the *Offline Access Token* and save it for the next step.
+
+
+== Set up the OpenShift CLI (oc)
+
+. Download the operating system specific OpenShift CLI from link:https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest/[Red Hat].
+. Extract the downloaded file on your local machine.
+. Place the extracted `oc` executable in your operating system path or local directory.
+
+== Set up the ROSA CLI (rosa)
+
+. Download the operating system specific ROSA CLI from link:https://www.openshift.com/products/amazon-openshift/download[Red Hat].
+. Extract the downloaded file on your local machine.
+. Place the extracted `rosa` and `kubectl` executables in your operating system path or local directory.
+. Log in to ROSA:
++
+[source,terminal]
+----
+$ rosa login
+----
++
+You will be prompted to enter in the *Red Hat Offline Access Token* you retrieved earlier and should receive the following message:
++
+[source,terminal]
+----
+Logged in as on 'https://api.openshift.com'
+----
++
+. Verify that ROSA has the minimal quota:
++
+[source,terminal]
+----
+$ rosa verify quota
+----
++
+Expected output:
++
+[source,terminal]
+----
+AWS quota ok
+----
+
+== Associate your AWS account with your Red Hat account
+
+To perform ROSA cluster provisioning tasks, you must create `ocm-role` and `user-role` IAM resources in your AWS account and link them to your Red Hat organization.
+
+. Create the `ocm-role` which the OpenShift Cluster Manager will use to be able to administer and Create ROSA clusters. If this has already been done for your OpenShift Cluster Manager Organization, you can skip to creating the user-role:
++
+[TIP]
+====
+If you have multiple AWS accounts that you want to associate with your Red Hat Organization, you can use the `--profile` option to specify the AWS profile you want to associate.
+====
++
+[source,terminal]
+----
+$ rosa create ocm-role --mode auto --yes
+----
++
+. Create the User Role that allows OpenShift Cluster Manager to verify that users creating a cluster have access to the current AWS account:
++
+[TIP]
+====
+If you have multiple AWS accounts that you want to associate with your Red Hat Organization, you can use the `--profile` option to specify the AWS profile you want to associate.
+====
++
+[source,terminal]
+----
+$ rosa create user-role --mode auto --yes
+----
++
+. Create the ROSA Account Roles which give the ROSA installer and machines permissions to perform actions in your account:
++
+[source,terminal]
+----
+$ rosa create account-roles --mode auto --yes
+----
+
+== Conclusion
+
+You are now ready to create your first cluster.
diff --git a/cloud_experts_tutorials/rosa-mobb-verify-permissions-sts-deployment.adoc b/cloud_experts_tutorials/rosa-mobb-verify-permissions-sts-deployment.adoc
new file mode 100644
index 000000000000..80d543002c0f
--- /dev/null
+++ b/cloud_experts_tutorials/rosa-mobb-verify-permissions-sts-deployment.adoc
@@ -0,0 +1,112 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="rosa-mobb-verify-permissions-sts-deployment"]
+= Tutorial: Verifying Permissions for a ROSA STS Deployment
+include::_attributes/attributes-openshift-dedicated.adoc[]
+:context: rosa-mobb-verify-permissions-sts-deployment
+
+toc::[]
+
+// ---
+// date: '2022-10-04'
+// title: "Verify Permissions for ROSA STS Deployment"
+// authors:
+// - Tyler Stacey
+// - Kumudu Herath
+// tags: ["AWS", "ROSA", "STS"]
+// ---
+
+To proceed with the deployment of a ROSA cluster, an account must support the required roles and permissions.
+AWS Service Control Policies (SCPs) cannot block the API calls made by the installer or operator roles.
+
+Details about the IAM resources required for an STS-enabled installation of ROSA can be found here: xref:../rosa_architecture/rosa-sts-about-iam-resources.adoc#rosa-sts-about-iam-resources[About IAM resources for ROSA clusters that use STS]
+
+This guide is validated for ROSA v4.11.X.
+
+== Prerequisites
+
+* link:https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html[AWS CLI]
+* xref:../cli_reference/rosa_cli/rosa-get-started-cli.adoc#rosa-get-started-cli[ROSA CLI] v1.2.6
+* link:https://stedolan.github.io/jq/[jq CLI]
+* link:https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_testing-policies.html[AWS role with required permissions]
+
+[id="verify-ROSA-permissions_{context}"]
+== Verifying ROSA permissions
+To verify the permissions required for ROSA, we can run the script included in the following section without ever creating any AWS resources.
+
+The script uses the `rosa`, `aws`, and `jq` CLI commands to create files in the working directory that will be used to verify permissions in the account connected to the current AWS configuration.
+
+The AWS Policy Simulator is used to verify the permissions of each role policy against the API calls extracted by `jq`; results are then stored in a text file appended with `.results`.
+
+This script is designed to verify the permissions for the current account and region.
+
+[id="usage-instructions_{context}"]
+== Usage Instructions
+
+. To use the script, run the following commands in a `bash` terminal (the -p option defines a prefix for the roles):
++
+[source,terminal]
+----
+$ mkdir scratch
+$ cd scratch
+$ cat << 'EOF' > verify-permissions.sh
+#!/bin/bash
+while getopts 'p:' OPTION; do
+ case "$OPTION" in
+ p)
+ PREFIX="$OPTARG"
+ ;;
+ ?)
+ echo "script usage: $(basename \$0) [-p PREFIX]" >&2
+ exit 1
+ ;;
+ esac
+done
+shift "$(($OPTIND -1))"
+rosa create account-roles --mode manual --prefix $PREFIX
+INSTALLER_POLICY=$(cat sts_installer_permission_policy.json | jq )
+CONTROL_PLANE_POLICY=$(cat sts_instance_controlplane_permission_policy.json | jq)
+WORKER_POLICY=$(cat sts_instance_worker_permission_policy.json | jq)
+SUPPORT_POLICY=$(cat sts_support_permission_policy.json | jq)
+simulatePolicy () {
+ outputFile="${2}.results"
+ echo $2
+ aws iam simulate-custom-policy --policy-input-list "$1" --action-names $(jq '.Statement | map(select(.Effect == "Allow"))[].Action | if type == "string" then . else .[] end' "$2" -r) --output text > $outputFile
+}
+simulatePolicy "$INSTALLER_POLICY" "sts_installer_permission_policy.json"
+simulatePolicy "$CONTROL_PLANE_POLICY" "sts_instance_controlplane_permission_policy.json"
+simulatePolicy "$WORKER_POLICY" "sts_instance_worker_permission_policy.json"
+simulatePolicy "$SUPPORT_POLICY" "sts_support_permission_policy.json"
+EOF
+$ chmod +x verify-permissions.sh
+$ ./verify-permissions.sh -p SimPolTest
+----
+
+. After the script completes, review each results file to ensure that none of the required API calls are blocked:
++
+[source,terminal]
+----
+$ for file in $(ls *.results); do echo $file; cat $file; done
+----
++
+The output will look similar to the following:
++
+[source,terminal]
+----
+sts_installer_permission_policy.json.results
+EVALUATIONRESULTS autoscaling:DescribeAutoScalingGroups allowed *
+MATCHEDSTATEMENTS PolicyInputList.1 IAM Policy
+ENDPOSITION 6 195
+STARTPOSITION 17 3
+EVALUATIONRESULTS ec2:AllocateAddress allowed *
+MATCHEDSTATEMENTS PolicyInputList.1 IAM Policy
+ENDPOSITION 6 195
+STARTPOSITION 17 3
+EVALUATIONRESULTS ec2:AssociateAddress allowed *
+MATCHEDSTATEMENTS PolicyInputList.1 IAM Policy
+...
+----
++
+[NOTE]
+====
+If any actions are blocked, review the error provided by AWS and consult with your Administrator to determine if SCPs are blocking the required API calls.
+====
\ No newline at end of file
diff --git a/cloud_experts_tutorials/snippets b/cloud_experts_tutorials/snippets
new file mode 120000
index 000000000000..9d58b92e5058
--- /dev/null
+++ b/cloud_experts_tutorials/snippets
@@ -0,0 +1 @@
+../snippets/
\ No newline at end of file
diff --git a/contributing_to_docs/doc_guidelines.adoc b/contributing_to_docs/doc_guidelines.adoc
index 337cd36370e1..fac0b326a28c 100644
--- a/contributing_to_docs/doc_guidelines.adoc
+++ b/contributing_to_docs/doc_guidelines.adoc
@@ -1,6 +1,7 @@
[id="contributing-to-docs-doc-guidelines"]
= Documentation guidelines
include::_attributes/common-attributes.adoc
+
:toc: macro
The documentation guidelines for OpenShift 4 build on top of the
@@ -35,7 +36,7 @@ In the Atom editor, you can use `Ctrl`+`J` to undo hard wrapping on a paragraph.
Every assembly file should contain the following metadata at the top, with no line spacing in between, except where noted:
----
-:_content-type: ASSEMBLY <1>
+:_mod-docs-content-type: ASSEMBLY <1>
[id=""] <2>
= Assembly title <3>
include::_attributes/common-attributes.adoc[] <4>
@@ -43,21 +44,26 @@ include::_attributes/common-attributes.adoc[] <4>
<6>
toc::[] <7>
----
-
-<1> The content type for the file. For assemblies, always use `:_content-type: ASSEMBLY`. Place this attribute before the anchor ID or, if present, the conditional that contains the anchor ID.
+<1> The content type for the file. For assemblies, always use `:_mod-docs-content-type: ASSEMBLY`. Place this attribute before the anchor ID or, if present, the conditional that contains the anchor ID.
<2> A unique (within OpenShift docs) anchor ID for this assembly. Use lowercase. Example: cli-developer-commands
<3> Human readable title (notice the `=` top-level header)
<4> Includes attributes common to OpenShift docs.
+
[NOTE]
====
-The `{product-title}` and `{product-version}` common attributes are not defined in the `_attributes/common-attributes.adoc` file. Those attributes are pulled by AsciiBinder from the distro mapping definitions in the https://github.com/openshift/openshift-docs/blob/main/_distro_map.yml[_distro_map.yml] file. See xref:product-name-and-version[Product title and version] and xref:attribute-files[attribute files] for more information on this topic.
+* The `{product-title}` and `{product-version}` common attributes are not defined in the `_attributes/common-attributes.adoc` file. Those attributes are pulled by AsciiBinder from the distro mapping definitions in the https://github.com/openshift/openshift-docs/blob/main/_distro_map.yml[_distro_map.yml] file. See xref:product-name-and-version[Product title and version] and xref:attribute-files[attribute files] for more information on this topic.
+* If you use a variable in the title of the first assembly in a section, move the include attributes directive above the title in this assembly. Otherwise, the variable will not render correctly on access.redhat.com.
====
+
<5> Context used for identifying headers in modules that is the same as the anchor ID. Example: cli-developer-commands.
<6> A blank line. You *must* have a blank line here before the toc.
<7> The table of contents for the current assembly.
+[NOTE]
+====
+Do not use backticks or other markup in assembly or module headings. You can use backticks or other markup in the title for a block, such as a code block `.Example` or a table `.Description` title.
+====
+
After the heading block and a single whitespace line, you can include any content for this assembly.
[NOTE]
@@ -76,13 +82,13 @@ Every module should be placed in the modules folder and should contain the follo
//
// * list of assemblies where this module is included <1>
-:_content-type: <2>
+:_mod-docs-content-type: <2>
[id="_{context}"] <3>
= Module title <4>
----
-<1> The content type for the file. Replace `` with the actual type of the module, `CONCEPT`, `REFERENCE`, or `PROCEDURE`. Place this attribute before the anchor ID or, if present, the conditional that contains the anchor ID.
-<2> List of assemblies in which this module is included.
+<1> List of assemblies in which this module is included.
+<2> The content type for the file. Replace `` with the actual type of the module, `CONCEPT`, `REFERENCE`, or `PROCEDURE`. Place this attribute before the anchor ID or, if present, the conditional that contains the anchor ID.
<3> A module anchor with {context} that must be lowercase and must match the module's file name.
<4> Human readable title. To ensure consistency in the results of the
leveloffset values in include statements, you must use a level one heading
@@ -95,11 +101,16 @@ Example:
//
// * cli_reference/openshift_cli/developer-cli-commands.adoc
-:_content-type: REFERENCE
+:_mod-docs-content-type: REFERENCE
[id="cli-basic-commands_{context}"]
= Basic CLI commands
----
+[NOTE]
+====
+Do not use backticks or other markup in assembly or module headings. You can use backticks or other markup in the title for a block, such as a code block `.Example` or a table `.Description` title.
+====
+
[id="snippet-file-metadata"]
== Text snippet file metadata
Every text snippet should be placed in the `snippets/` folder and should contain the following metadata at the top:
@@ -113,11 +124,11 @@ Every text snippet should be placed in the `snippets/` folder and should contain
//
// * list of modules where this text snippet is included
-:_content-type: SNIPPET <3>
+:_mod-docs-content-type: SNIPPET <3>
----
<1> List of assemblies in which this text snippet is included.
<2> List of modules in which this text snippet is included.
-<3> The content type for the file. For snippets, always use `:_content-type: SNIPPET`. Place this attribute before the anchor ID, the conditional that contains the anchor ID, or the first line of body text.
+<3> The content type for the file. For snippets, always use `:_mod-docs-content-type: SNIPPET`. Place this attribute before the anchor ID, the conditional that contains the anchor ID, or the first line of body text.
[NOTE]
====
@@ -133,21 +144,21 @@ Example:
// * installing/installing_azure/installing-azure-default.adoc
// * installing/installing_gcp/installing-gcp-default.adoc
-:_content-type: SNIPPET
+:_mod-docs-content-type: SNIPPET
In {product-title} version {product-version}, you can install a cluster on {cloud-provider-first} ({cloud-provider}) that uses the default configuration options.
----
== Content type attributes
-Each `.adoc` file must contain a `:_content-type:` attribute in its metadata that indicates its file type. This information is used by some publication processes to sort and label files.
+Each `.adoc` file must contain a `:_mod-docs-content-type:` attribute in its metadata that indicates its file type. This information is used by some publication processes to sort and label files.
Add the attribute from the following list that corresponds to your file type:
-* `:_content-type: ASSEMBLY`
-* `:_content-type: CONCEPT`
-* `:_content-type: PROCEDURE`
-* `:_content-type: REFERENCE`
-* `:_content-type: SNIPPET`
+* `:_mod-docs-content-type: ASSEMBLY`
+* `:_mod-docs-content-type: CONCEPT`
+* `:_mod-docs-content-type: PROCEDURE`
+* `:_mod-docs-content-type: REFERENCE`
+* `:_mod-docs-content-type: SNIPPET`
Place the attribute in the file metadata. The following list describes the best attribute placement options:
@@ -184,6 +195,11 @@ ifdef::openshift-origin[]
endif::[]
----
+[NOTE]
+====
+When backporting content that contains IBM-related attributes to `enterprise-4.13` and earlier branches, you might need to create manual cherry picks because IBM-related attributes names are not consistent with later release branches.
+====
+
== Assembly/module file names
Try to shorten the file name as much as possible _without_ abbreviating important terms that may cause confusion. For example, the `managing-authorization-policies.adoc` file name would be appropriate for an assembly titled "Managing Authorization Policies".
@@ -192,11 +208,6 @@ Try to shorten the file name as much as possible _without_ abbreviating importan
If you create a directory with a multiple-word name, separate each word with an underscore, for example `backup_and_restore`.
-[NOTE]
-====
-Do not italicize user-replaced values. This guideline is an exception to the link:https://redhat-documentation.github.io/supplementary-style-guide/#user-replaced-values[_Red Hat supplementary style guide for product documentation_].
-====
-
Do not create or rename a top-level directory in the repository and topic map without checking with the docs program manager first.
Avoid creating two levels of subdirectories because the link:https://github.com/openshift/openshift-docs/issues/52149[breadcrumb bar on docs.openshift.com breaks]. If you have a valid use case for two levels of subdirectories, talk with your DPM/CS (and, for aligned teams, the OpenShift DPM) for approval before creating it.
@@ -251,6 +262,8 @@ Do not use "Overview" as a heading.
Do not use backticks or other markup in assembly or module headings.
+Do not use special characters or symbols in titles. Symbols and special characters in titles can cause rendering errors in the HTML output.
+
Use only one level 1 heading (`=`) in any file.
=== Discrete headings
@@ -371,12 +384,12 @@ For more information about creating concept modules, see the
link:https://redhat-documentation.github.io/modular-docs/#creating-concept-modules[_Red Hat modular docs reference guide_] and the link:https://raw.githubusercontent.com/redhat-documentation/modular-docs/master/modular-docs-manual/files/TEMPLATE_CONCEPT_concept-explanation.adoc[concept template].
== Writing procedures
-A _procedure_ contains the steps that users follow to complete a process or task. Procedures contain ordered steps and explicit commands. In most cases, create your procedures as individual modules and include them in appropriate assemblies.
+A _procedure_ contains the steps that users follow to complete a single process or task. Procedures contain ordered steps and explicit commands. In most cases, create your procedures as individual modules and include them in appropriate assemblies.
Use a gerund in the procedure title, such as "Creating".
For more information about writing procedures, see the
-link:https://redhat-documentation.github.io/modular-docs/#creating-procedure-modules[_Red Hat modular docs reference guide_] and the link:https://raw.githubusercontent.com/redhat-documentation/modular-docs/master/modular-docs-manual/files/TEMPLATE_PROCEDURE_doing-one-procedure.adoc[procedure template].
+link:https://redhat-documentation.github.io/modular-docs/#con-creating-procedure-modules_writing-mod-docs[_Red Hat modular docs reference guide_] and the link:https://raw.githubusercontent.com/redhat-documentation/modular-docs/master/modular-docs-manual/files/TEMPLATE_PROCEDURE_doing-one-procedure.adoc[procedure template].
[NOTE]
====
@@ -387,6 +400,11 @@ When needed, use `.Prerequisites`, `.Next steps`, or `.Additional resources` syn
== Writing text snippets
A _text snippet_ is an optional component that lets you reuse content in multiple modules and assemblies. Text snippets are not a substitute for modules but instead are a more granular form of content reuse. While a module is content that a reader can understand on its own (like an article) or as part of a larger body of work (like an assembly), a text snippet is not self-contained and is not intended to be published or cross referenced on its own.
+[IMPORTANT]
+====
+Only include entire snippets in an assembly or module. Including link:https://docs.asciidoctor.org/asciidoc/latest/directives/include-lines/[lines by content ranges] can lead to content errors when the included file is subsequently updated and is not permitted.
+====
+
In the context of modules and assemblies, text snippets do not include headings or anchor IDs. Text snippets also cannot contain xrefs. This type of component is text only. Examples include the following:
* Admonitions that appear in multiple modules.
@@ -488,22 +506,22 @@ possible values for `{product-title}` and `{product-version}`, depending on the
|`openshift-origin`
|OKD
a|* 3.6, 3.7, 3.9, 3.10, 3.11
-* 4.8, 4.9, 4.10, 4.11, 4.12, 4.13
+* 4.8, 4.9, 4.10, 4.11, 4.12, 4.13, 4.14
* 4 for the `latest/` build from the `main` branch
|`openshift-enterprise`
|OpenShift Container Platform
a|* 3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.9, 3.10, 3.11
-* 4.1, 4.2, 4.3, 4.4, 4.5, 4.6, 4.7, 4.8, 4.9, 4.10, 4.11, 4.12, 4.13, 4.14
+* 4.1, 4.2, 4.3, 4.4, 4.5, 4.6, 4.7, 4.8, 4.9, 4.10, 4.11, 4.12, 4.13, 4.14, 4.15
|`openshift-dedicated`
|OpenShift Dedicated
-a|* No value set for the latest `dedicated/` build from the `enterprise-4.13` branch
+a|* No value set for the latest `dedicated/` build from the `enterprise-4.14` branch
* 3 for the `dedicated/3` build from the `enterprise-3.11` branch
|`openshift-rosa`
|Red Hat OpenShift Service on AWS
-|No value set for the `rosa/` build from the `enterprise-4.13` branch
+|No value set for the `rosa/` build from the `enterprise-4.14` branch
|`openshift-online`
|OpenShift Online
@@ -537,6 +555,28 @@ If it makes more sense in context to refer to the major version of the product i
Other common attribute values are defined in the `_attributes/common-attributes.adoc` file. Where possible, generalize references to those values by using the common attributes. For example, use `{cluster-manager-first}` to refer to Red Hat OpenShift Cluster Manager. If you need to add an attribute to the `_attributes/common-attributes.adoc` file, open a pull request to add it to the attribute list. Do not create a separate attributes file without first consulting the docs team.
====
+[id="third-party-vendor-product-names"]
+== Third-party vendor product names
+
+Red Hat integrates with many third-party vendor products. For certain integrated products, third-party vendor staff might have access to certain Red Hat resources and be contactable within Red Hat. On other occasions, common open-source products might be widely used across IT infrastructure providers, so Red Hat might not have direct contacts to organizations that own these products.
+
+Depending on the third-party vendor's requirements, you might need to add a registered trademark symbol to all of the vendor's product names or only on the first occurence of referencing the product name in an assembly, a module, or a document.
+
+Choose any of the following sources for clarification on using the symbol for a specific third-party vendor product name:
+
+* Visit the third-party vendor's website and contact them directly.
+* Contact internal Red Hat product teams or integrated third-party vendor teams.
+* Contact the Red Hat Legal team. Only consider this option when the other two options did not provide clear context for your query.
+
+[IMPORTANT]
+====
+Do not use Asciidoctor character replacement substitutions, which rely on a Unicode code point such as _®_, to set the registered symbol in an Asciidoc file. Instead use _(R)_ beside the product name. For example, `IBM(R) LinuxONE`.
+
+Do not apply any superscript, such as `(R)`, or subscript formatting to module or assembly headings.
+====
+
+For more information about contacting the Red Hat's Legal team, see link:https://source.redhat.com/departments/legal/redhatintellectualproperty/trademarks/trademarks_and_domain_names_wiki/copyright_notices_and_trademark_legends[Copyright Notices and Trademark Legends] on the The Source.
+
//CANARY
[id="conditional-content"]
== Conditional content
@@ -691,6 +731,7 @@ In OpenShift docs:
* All links to internal content is created using `xref` and **must have an anchor ID**.
* Only use `xref` in assemblies, not in modules.
* All links to external websites are created using `link`.
+* Links between different distros, such as from ROSA to OpenShift Container Platform, are external links and not cross-references. These external links use `link` instead of `xref` and need to be in distro-specific files or conditionalized to apply to the relevant distros.
[IMPORTANT]
====
@@ -863,10 +904,7 @@ Comment out the callout in the YAML file to ensure that file can still be parsed
Asciidoctor recognises the commented callout and renders it correctly in the output.
For example:
-[source,yaml]
-----
-apiVersion: v1 # <1>
-----
+`apiVersion: v1 # <1>`
[discrete]
=== Version and upgrade implications
@@ -910,7 +948,7 @@ To indicate that a feature is deprecated, include the `modules/deprecated-featur
For more information on how this is applied, see link:https://github.com/openshift/openshift-docs/pull/31776/files[this example PR].
== Verification of your content
-All documentation changes must be verified by a QE team associate before merging. This includes executing all "Procedure" changes and confirming expected results. There are exceptions for typo-level changes, formatting-only changes, and other negotiated documentation sets and distributions.
+All documentation changes that update or add technical content must be verified by a QE team associate before merging. This QE verification process includes executing all procedures, confirming expected results, and confirming the accuracy of conceptual and reference content. The only exceptions are for typo-level changes, formatting-only changes, and other negotiated documentation sets and distributions.
If a documentation change is due to a Bugzilla bug or Jira issue, the bug/issue should be put on ON_QA when you have a PR ready. After QE approval is given (either in the bug/issue or in the PR), the QE associate should move the bug/issue status to VERIFIED, at which point the associated PR can be merged. It is also ok for the assigned writer to change the status of the bug/issue to VERIFIED if approval for the changes has been provided in another forum (slack, PR, or email). The writer should indicate that the QE team approved the change as a comment in the bug/issue.
@@ -998,16 +1036,17 @@ You can use backticks or other markup in the title for a block, such as a code b
=== Code blocks, command syntax, and example output
-Code blocks are generally used to show examples of command syntax, example
-screen output, and configuration files.
+Code blocks are generally used to show examples of command syntax, example screen output, and configuration files.
-The main distinction between showing command syntax and a command example is
-that a command syntax shows readers how to use the command without real values.
-An example command, however, shows the command with actual values with an
-example output of that command, where applicable.
+//redundant with 1080
+The main distinction between showing command syntax and a command example is that a command syntax shows readers how to use the command without real values. An example command, however, shows the command with actual values with an example output of that command, where applicable.
+[[use-source-terminal]]
+==== Source tags for terminal commands and output
+* Use `[source,terminal]` for `oc` commands or any terminal commands to enable syntax highlighting. If you are also showing a code block for the output of the command, use `[source,terminal]` for that code block as well. Separate a command and its related example output into individual code blocks. See <>.
++
For example:
-
++
....
In the following example, the `oc get` operation returns a complete list of services that are currently defined:
@@ -1025,7 +1064,7 @@ kubernetes-ro component=apiserver,provider=kubernetes
docker-registry name=registrypod 172.30.17.158 5001
----
....
-
++
This renders as:
> In the following example, the `oc get` operation returns a complete list of services that are currently defined:
@@ -1042,26 +1081,86 @@ This renders as:
> docker-registry name=registrypod 172.30.17.158 5001
> ----
-The following guidelines go into more detail about specific requirements and
-recommendations when using code blocks:
+* Any `[source]` metadata must go on the line directly before the code block. Also, do not insert a space in between the `[source]` tag and the metadata.
+//Context: https://github.com/openshift/openshift-docs/pull/64373
++
+For example:
++
+....
-* If a step in a procedure is to run a command, make sure that the step
-text includes an explicit instruction to "run" or "enter" the command. In most cases,
-use one of the following patterns to introduce the code block:
+[source,terminal]
+----
+$ oc get se
+----
+
+[source,yaml]
+----
+apiVersion: config.openshift.io/v1
+kind: Scheduler
+metadata:
+ name: cluster
+# ...
+spec:
+ defaultNodeSelector: node-role.kubernetes.io/app=
+# ...
+----
+....
+
+* For Bash "here" documents use `[source,terminal]`, such as the following example:
++
+....
+[source,terminal]
+----
+$ cat < by running the following command:
** by entering the following command:
** , run the following command:
** , enter the following command:
-* Do NOT use any markup in code blocks; code blocks generally do not accept any markup.
+[[no-markup-codeblock]]
+==== No markup in code blocks
+Do NOT use any markup in code blocks; code blocks generally do not accept any markup.
+
+[[empty-line-before-codeblock]]
+==== Empty line before code blocks
+For all code blocks, you must include an empty line above a code block (unless that line is introducing block metadata, such as `[source,terminal]` for syntax highlighting).
-* For all code blocks, you must include an empty line above a code block (unless
-that line is introducing block metadata, such as `[source,terminal]` for syntax
-highlighting).
-+
Acceptable:
-+
+
....
Lorem ipsum
@@ -1069,43 +1168,36 @@ Lorem ipsum
$ lorem.sh
----
....
-+
+
Not acceptable:
-+
+
....
Lorem ipsum
----
$ lorem.sh
----
....
-+
-Without the line spaces, the content is likely to be not parsed correctly.
-* Use `[source,terminal]` for `oc` commands or any terminal commands to enable
-syntax highlighting. Any `[source]` metadata must go on the line directly before
-the code block. For example:
-+
-....
-[source,terminal]
-----
-$ oc get nodes
-----
-....
-+
-If you are also showing a code block for the output of the command, use
-`[source,terminal]` for that code block as well.
+Without the line spaces, the content is likely to be not parsed correctly.
-* Use source tags for the programming language used in the code block to enable
-syntax highlighting. For example:
+[[source-tags-for-programming-language]]
+==== Source tags for programming languages
+Use source tags for the programming language used in the code block to enable syntax highlighting. For example:
** `[source,yaml]`
** `[source,go]`
** `[source,javascript]`
** `[source,jsx]`
+** `[source,bash]`
+
+[[single-command-per-code-block]]
+==== Single command per code block
+Do not use more than one command per code block.
+
+When commands are bunched together, the copy to clipboard functionality might not break the lines up correctly. Using single command per code block makes it copy-and-paste friendly.
+
+For example, the following must be split up into three separate code blocks:
-* Do not use more than one command per code block. For example, the following must
-be split up into three separate code blocks:
-+
....
To create templates you can modify, run the following commands:
@@ -1125,51 +1217,17 @@ $ oc adm create-error-template > errors.html
----
....
-* If your command contains multiple lines and uses callout annotations, you must comment out the callout(s) in the codeblock, as shown in the following example:
-+
-....
-To scale based on the percent of CPU utilization, create a `HorizontalPodAutoscaler` object for an existing object:
+[[command-syntax-replaceable-values]]
+==== Command syntax for replaceable values
+To mark up command syntax, use the code block and wrap any replaceable values in angle brackets (`<>`) with the required command parameter, using underscores (`_`) between words as necessary for legibility.
-[source,terminal]
-----
-$ oc autoscale / \// <1>
- --min