diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 8921efc0d141..000000000000 --- a/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM registry.access.redhat.com/ubi8-minimal:latest - -ENV LANG=en_US.UTF-8 - -WORKDIR /go/src/github.com/openshift/openshift-docs - -CMD ["/bin/bash"] diff --git a/Gemfile b/Gemfile deleted file mode 100644 index e3d79c72e70f..000000000000 --- a/Gemfile +++ /dev/null @@ -1,4 +0,0 @@ -source "https://rubygems.org" - -gem 'ascii_binder', '~>1.0' - diff --git a/Guardfile b/Guardfile deleted file mode 100644 index 037efaaaaf9f..000000000000 --- a/Guardfile +++ /dev/null @@ -1,3 +0,0 @@ -require 'ascii_binder' -gem_dir = Gem::Specification.find_by_name("ascii_binder").lib_dirs_glob -instance_eval(File.read(File.join(gem_dir, 'ascii_binder/tasks/guards.rb'))) diff --git a/Jenkinsfile b/Jenkinsfile deleted file mode 100644 index d0f1ef8cb8df..000000000000 --- a/Jenkinsfile +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/env groovy - -// Pipeline variables -def isPR=false // true if the branch being tested belongs to a PR -def project="" // project where build and deploy will occur -def projectCreated=false // true if a project was created by this build and needs to be cleaned up -def repoUrl="" // the URL of this project's repository -def appName="openshift-docs" // name of application to create -def approved=false // true if the preview was approved - -// uniqueName returns a name with a 16-character random character suffix -def uniqueName = { String prefix -> - sh "cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 16 | head -n 1 > suffix" - suffix = readFile("suffix").trim() - return prefix + suffix -} - -// setBuildStatus sets a status item on a GitHub commit -def setBuildStatus = { String url, String context, String message, String state, String backref -> - step([ - $class: "GitHubCommitStatusSetter", - reposSource: [$class: "ManuallyEnteredRepositorySource", url: url ], - contextSource: [$class: "ManuallyEnteredCommitContextSource", context: context ], - errorHandlers: [[$class: "ChangingBuildStatusErrorHandler", result: "UNSTABLE"]], - statusBackrefSource: [ $class: "ManuallyEnteredBackrefSource", backref: backref ], - statusResultSource: [ $class: "ConditionalStatusResultSource", results: [ - [$class: "AnyBuildResult", message: message, state: state]] ] - ]); -} - -// getRepoURL retrieves the origin URL of the current source repository -def getRepoURL = { - sh "git config --get remote.origin.url > originurl" - return readFile("originurl").trim() -} - -// getRouteHostname retrieves the host name from the given route in an -// OpenShift namespace -def getRouteHostname = { String routeName, String projectName -> - sh "oc get route ${routeName} -n ${projectName} -o jsonpath='{ .spec.host }' > apphost" - return readFile("apphost").trim() -} - -// setPreviewStatus sets a status item for each openshift-docs release -def setPreviewStatus = { String url, String message, String state, String host, boolean includeLink -> - setBuildStatus(url, "ci/app-preview/origin", message, state, includeLink ? "http://${host}/openshift-origin/latest/welcome/index.html" : "") - setBuildStatus(url, "ci/app-preview/enterprise", message, state, includeLink ? "http://${host}/openshift-enterprise/master/welcome/index.html" : "") - setBuildStatus(url, "ci/app-preview/online", message, state, includeLink ? "http://${host}/openshift-online/master/welcome/index.html" : "") - setBuildStatus(url, "ci/app-preview/dedicated", message, state, includeLink ? "http://${host}/openshift-dedicated/master/welcome/index.html" : "") -} - -try { // Use a try block to perform cleanup in a finally block when the build fails - - node { - // Initialize variables in default node context - isPR = env.BRANCH_NAME ? env.BRANCH_NAME.startsWith("PR") : false - baseProject = env.PROJECT_NAME - project = env.PROJECT_NAME - - stage ('Checkout') { - checkout scm - repoUrl = getRepoURL() - } - - // When testing a PR, create a new project to perform the build - // and deploy artifacts. - if (isPR) { - stage ('Create PR Project') { - setPreviewStatus(repoUrl, "Building application", "PENDING", "", false) - setBuildStatus(repoUrl, "ci/approve", "Aprove after testing", "PENDING", "") - project = uniqueName("${appName}-") - sh "oc new-project ${project}" - projectCreated=true - sh "oc policy add-role-to-group view system:authenticated -n ${project}" - } - } - - stage ('Apply object configurations') { - sh "oc process -f _openshift/docs-template.yaml -n ${project} | oc apply -f - -n ${project}" - } - - stage ('Build') { - sh "oc start-build ${appName} -n ${project} --from-repo=. --follow" - } - - - if (isPR) { - stage ('Verify Service') { - openshiftVerifyService serviceName: appName, namespace: project - } - def appHostName = getRouteHostname(appName, project) - setPreviewStatus(repoUrl, "The application is available", "SUCCESS", "${appHostName}", true) - setBuildStatus(repoUrl, "ci/approve", "Approve after testing", "PENDING", "${env.BUILD_URL}input/") - stage ('Manual Test') { - timeout(time:2, unit:'DAYS') { - input "Is everything OK?" - } - } - approved = true - setPreviewStatus(repoUrl, "Application previewed", "SUCCESS", "", false) - setBuildStatus(repoUrl, "ci/approve", "Manually approved", "SUCCESS", "") - } - } -} -finally { - if (projectCreated) { - node { - stage('Delete PR Project') { - if (!approved) { - setPreviewStatus(repoUrl, "Application previewed", "FAILURE", "", false) - setBuildStatus(repoUrl, "ci/approve", "Rejected", "FAILURE", "") - } - sh "oc delete project ${project}" - } - } - } -} diff --git a/Makefile b/Makefile deleted file mode 100644 index 9e2354a24328..000000000000 --- a/Makefile +++ /dev/null @@ -1,4 +0,0 @@ -all: - ./_update_rest_api.py - -.PHONY: all diff --git a/NewPSC.png b/NewPSC.png deleted file mode 100644 index 323a80c70720..000000000000 Binary files a/NewPSC.png and /dev/null differ diff --git a/OWNERS b/OWNERS deleted file mode 100644 index aa16f7a0b724..000000000000 --- a/OWNERS +++ /dev/null @@ -1,8 +0,0 @@ -# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md -# This file just uses aliases defined in OWNERS_ALIASES. -# Do not change indents. Incorrect indents break the Prow CI - -reviewers: -- merge-rights -approvers: -- approve-rights diff --git a/README.adoc b/README.adoc deleted file mode 100644 index 0588a5b273bf..000000000000 --- a/README.adoc +++ /dev/null @@ -1,44 +0,0 @@ -= OpenShift documentation - -* https://www.okd.io/[OKD] -* https://www.openshift.com/products/online/[OpenShift Online] -* https://www.openshift.com/products/container-platform/[OpenShift Container Platform] -* https://www.openshift.com/products/dedicated/[OpenShift Dedicated] - -All OpenShift documentation is sourced in https://asciidoc.org/[AsciiDoc] and transformed into HTML/CSS and other formats through automation that is based on https://asciidoctor.org/[AsciiDoctor]. - -The documentation published from these source files can be viewed at https://docs.openshift.com. - -== Contributing to OpenShift documentation -// NOTE: This text is mirrored in ./CONTRIBUTING.adoc -// If you update one, update both. - -If you are interested in contributing to OpenShift technical documentation, you can view all our link:./contributing_to_docs[resources] that will help you get set up and provide more information. - - -The following table provides quick links to help you get started. - -[options="header"] -|=== - -|Question |Link - -|I'm interested, how do I contribute? -|See the link:/contributing_to_docs/contributing.adoc[contributing] topic to learn more about this repository and how you can contribute. - -|Are there any basic guidelines to help me? -|The link:/contributing_to_docs/doc_guidelines.adoc[documentation guidelines] topic provides some basic guidelines to help us keep our content consistent, and includes other style information. - -|How do I set up my workstation? -|See the link:/contributing_to_docs/tools_and_setup.adoc[tools and setup] topic to set up your workstation. - -|How do I edit an existing topic, or create new content? -|See the link:/contributing_to_docs/create_or_edit_content.adoc[create or edit content] topic to get started. - -|=== - -== Contacts - -For questions or comments about OpenShift documentation: - -* Send an email to the OpenShift documentation team at openshift-docs@redhat.com. diff --git a/Rakefile b/Rakefile deleted file mode 100644 index fe2b378488ed..000000000000 --- a/Rakefile +++ /dev/null @@ -1 +0,0 @@ -require 'ascii_binder/tasks/tasks' diff --git a/_attributes/attributes-microshift.adoc b/_attributes/attributes-microshift.adoc deleted file mode 100644 index ef5c576b2ef0..000000000000 --- a/_attributes/attributes-microshift.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// common attributes -:toc: -:toc-title: -:experimental: -:imagesdir: images -:OCP: OpenShift Container Platform -:ocp-version: 4.19 -:oc-first: pass:quotes[OpenShift CLI (`oc`)] -//OpenShift Kubernetes Engine -:oke: OpenShift Kubernetes Engine -:product-title-first: Red Hat build of MicroShift (MicroShift) -:microshift-short: MicroShift -:product-registry: OpenShift image registry -:product-version: 4.19 -:rhel-major: rhel-9 -:op-system-base-full: Red Hat Enterprise Linux (RHEL) -:op-system-base: RHEL -:op-system-ostree-first: Red Hat Enterprise Linux for Edge (RHEL for Edge) -:op-system-ostree: RHEL for Edge -:op-system-rt-kernel: Red Hat Enterprise Linux for Real Time (real-time kernel) -:op-system-rtk: real-time kernel -:op-system-image: image mode for RHEL -:op-system-version: 9.6 -:op-system-version-major: 9 -:op-system-bundle: Red Hat Device Edge -:rpm-repo-version: rhocp-4.19 -:rhde-version: 4 -:VirtProductName: OpenShift Virtualization diff --git a/_attributes/attributes-openshift-dedicated.adoc b/_attributes/attributes-openshift-dedicated.adoc deleted file mode 100644 index 9a3cd800719a..000000000000 --- a/_attributes/attributes-openshift-dedicated.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// common attributes -:product-short-name: OpenShift Dedicated -:toc: -:toc-title: -:experimental: -:imagesdir: images -:OCP: OpenShift Container Platform -:OCP-short: OpenShift -:ocp-version: 4.14 -:op-system-first: Red Hat Enterprise Linux CoreOS (RHCOS) -:oc-first: pass:quotes[OpenShift CLI (`oc`)] -:cluster-manager-first: Red Hat OpenShift Cluster Manager -:cluster-manager: OpenShift Cluster Manager -:cluster-manager-url: link:https://console.redhat.com/openshift[OpenShift Cluster Manager] -:cluster-manager-url-pull: link:https://console.redhat.com/openshift/install/pull-secret[pull secret from Red Hat OpenShift Cluster Manager] -:hybrid-console: Red Hat Hybrid Cloud Console -:hybrid-console-second: Hybrid Cloud Console -:hybrid-console-url: link:https://console.redhat.com[Red Hat Hybrid Cloud Console] -:AWS: Amazon Web Services (AWS) -:GCP: Google Cloud Platform (GCP) -:openshift-networking: Red Hat OpenShift Networking -:product-registry: OpenShift image registry -:kebab: image:kebab.png[title="Options menu"] -:rhq-short: Red Hat Quay -:SMProductName: Red Hat OpenShift Service Mesh -:pipelines-title: Red Hat OpenShift Pipelines -:sts-first: Security Token Service (STS) -:sts-full: Security Token Service -:sts-short: STS -//logging -:logging-title: logging for Red Hat OpenShift -:logging-title-uc: Logging for Red Hat OpenShift -:logging: logging -:logging-uc: Logging -:clo: Red Hat OpenShift Logging Operator -:loki-op: Loki Operator -:es-op: OpenShift Elasticsearch Operator -:logging-sd: Red Hat OpenShift Logging -:log-plug: logging Console Plugin -// -:ServerlessProductName: OpenShift Serverless -:rh-openstack-first: Red Hat OpenStack Platform (RHOSP) -:rh-openstack: RHOSP -:rhoda: Red Hat OpenShift Database Access -:rhoda-short: RHODA -:rhods: Red Hat OpenShift Data Science -:osd: OpenShift Dedicated -:VirtProductName: OpenShift Virtualization -//Formerly known as CodeReady Containers and CodeReady Workspaces -:openshift-local-productname: Red Hat OpenShift Local -:openshift-dev-spaces-productname: Red Hat OpenShift Dev Spaces -:hcp: hosted control planes -:hcp-title: ROSA with HCP -:hcp-title-first: {product-title} (ROSA) with {hcp} (HCP) -:rosa-classic: ROSA (classic architecture) -:rosa-classic-first: {product-title} (ROSA) (classic architecture) -//ROSA CLI variables diff --git a/_attributes/common-attributes.adoc b/_attributes/common-attributes.adoc index c0860e3800c1..0aa0d55145a2 100644 --- a/_attributes/common-attributes.adoc +++ b/_attributes/common-attributes.adoc @@ -10,367 +10,45 @@ :toc-title: :imagesdir: images :prewrap!: -// n-1 and n+1 OCP versions relative to the current branch's {product-version} attr -:ocp-nminus1: 4.17 -:ocp-nplus1: 4.19 -// Operating system attributes -:op-system-first: Red Hat Enterprise Linux CoreOS (RHCOS) -:op-system: RHCOS -:op-system-lowercase: rhcos -:op-system-base: RHEL -:op-system-base-full: Red Hat Enterprise Linux (RHEL) -:op-system-version: 9.x -:op-system-version-9: 9 -ifdef::openshift-origin[] -:op-system-first: Fedora CoreOS (FCOS) -:op-system: FCOS -:op-system-lowercase: fcos -:op-system-base: Fedora -:op-system-base-full: Fedora -:op-system-version: 35 -endif::[] -:tsb-name: Template Service Broker + :kebab: image:kebab.png[title="Options menu"] -:ai-full: Assisted Installer -:cluster-manager-first: Red Hat OpenShift Cluster Manager -:cluster-manager: OpenShift Cluster Manager -:cluster-manager-url: link:https://console.redhat.com/openshift[OpenShift Cluster Manager] -:cluster-manager-url-pull: link:https://console.redhat.com/openshift/install/pull-secret[pull secret from Red Hat OpenShift Cluster Manager] -:insights-advisor-url: link:https://console.redhat.com/openshift/insights/advisor/[Insights Advisor] -:hybrid-console: Red Hat Hybrid Cloud Console -:hybrid-console-second: Hybrid Cloud Console -:hybrid-console-url: link:https://console.redhat.com[Red Hat Hybrid Cloud Console] -// OADP attributes -:oadp-first: OpenShift API for Data Protection (OADP) -:oadp-full: OpenShift API for Data Protection -:oadp-short: OADP -:oadp-version: 1.4.4 -:oadp-version-1-3: 1.3.6 -:oadp-version-1-4: 1.4.4 -:oadp-bsl-api: backupstoragelocations.velero.io -:oc-first: pass:quotes[OpenShift CLI (`oc`)] -:product-registry: OpenShift image registry -:product-mirror-registry: Mirror registry for Red Hat OpenShift -:rh-storage-first: Red Hat OpenShift Data Foundation -:rh-storage: OpenShift Data Foundation -:rh-rhacm-title: Red Hat Advanced Cluster Management -:rh-rhacm-first: Red Hat Advanced Cluster Management (RHACM) + +:rh-rhacm-title: Red{nbsp}Hat Advanced Cluster Management +:rh-rhacm-first: Red{nbsp}Hat Advanced Cluster Management (RHACM) :rh-rhacm: RHACM :rh-rhacm-version: 2.13 -:osc: OpenShift sandboxed containers -:cert-manager-operator: cert-manager Operator for Red Hat OpenShift -:secondary-scheduler-operator-full: Secondary Scheduler Operator for Red Hat OpenShift -:secondary-scheduler-operator: Secondary Scheduler Operator -:descheduler-operator: Kube Descheduler Operator -:cli-manager: CLI Manager Operator -// Backup and restore -:velero-domain: velero.io -:velero-version: 1.14 + :launch: image:app-launcher.png[title="Application Launcher"] -:mtc-first: Migration Toolkit for Containers (MTC) -:mtc-short: MTC -:mtc-full: Migration Toolkit for Containers -:mtc-version: 1.8 -:mtc-version-z: 1.8.5 -:mtc-legacy-image: 1.7 -:mtv-first: Migration Toolkit for Virtualization (MTV) -:mtv-short: MTV -:mtv-full: Migration Toolkit for Virtualization -:mtv-version: 2.8 -// builds (Valid only in 4.11 and later) -:builds-v2title: Builds for Red Hat OpenShift -:builds-v2shortname: OpenShift Builds v2 -:builds-v1shortname: OpenShift Builds v1 -ifdef::openshift-origin[] -:builds-v2title: Shipwright -:builds-v2shortname: Shipwright -:builds-v1shortname: Builds v1 -endif::[] -//gitops -:gitops-title: Red Hat OpenShift GitOps -:gitops-shortname: GitOps -:gitops-ver: 1.1 -:rh-app-icon: image:red-hat-applications-menu-icon.jpg[title="Red Hat applications"] -//pipelines -:pipelines-title: Red Hat OpenShift Pipelines -:pipelines-shortname: OpenShift Pipelines -:pipelines-ver: pipelines-1.18 -:pipelines-version-number: 1.18 -:tekton-chains: Tekton Chains -:tekton-hub: Tekton Hub -:artifact-hub: Artifact Hub -:pac: Pipelines as Code -//odo -:odo-title: odo -//OpenShift Kubernetes Engine -:oke: OpenShift Kubernetes Engine -//OpenShift Platform Plus -:opp: OpenShift Platform Plus -//openshift virtualization (cnv) -:VirtProductName: OpenShift Virtualization -:VirtVersion: 4.17 -:HCOVersion: 4.17.0 -:CNVNamespace: openshift-cnv -:CNVOperatorDisplayName: OpenShift Virtualization Operator -:CNVSubscriptionSpecSource: redhat-operators -:CNVSubscriptionSpecName: kubevirt-hyperconverged -:delete: image:delete.png[title="Delete"] -ifdef::openshift-origin[] -:VirtProductName: OKD Virtualization -:CNVNamespace: kubevirt-hyperconverged -:CNVOperatorDisplayName: KubeVirt HyperConverged Cluster Operator -:CNVSubscriptionSpecSource: community-operators -:CNVSubscriptionSpecName: community-kubevirt-hyperconverged -endif::[] -// openshift virtualization engine (ove) -:ove-first: Red Hat OpenShift Virtualization Engine -:ove: OpenShift Virtualization Engine -//distributed tracing -:DTProductName: Red Hat OpenShift distributed tracing platform -:DTShortName: distributed tracing platform -:DTProductVersion: 3.1 -:JaegerName: Red Hat OpenShift distributed tracing platform (Jaeger) -:JaegerOperator: Red Hat OpenShift distributed tracing platform -:JaegerShortName: distributed tracing platform (Jaeger) -:JaegerOperator: Red Hat OpenShift distributed tracing platform -:JaegerVersion: 1.53.0 -:OTELName: Red Hat build of OpenTelemetry -:OTELShortName: Red Hat build of OpenTelemetry -:OTELOperator: Red Hat build of OpenTelemetry Operator -:OTELVersion: 0.93.0 -:TempoName: Red Hat OpenShift distributed tracing platform (Tempo) -:TempoShortName: distributed tracing platform (Tempo) -:TempoOperator: Tempo Operator -:TempoVersion: 2.3.1 -//telco -ifdef::telco-ran[] -:rds: telco RAN DU -:rds-caps: Telco RAN DU -:rds-first: Telco RAN distributed unit (DU) -endif::[] -ifdef::telco-core[] -:rds: telco core -:rds-caps: Telco core -endif::[] -//lightspeed -:ols-official: Red Hat OpenShift Lightspeed -:ols: OpenShift Lightspeed + //logging :logging: logging :logging-uc: Logging -:for: for Red Hat OpenShift -:clo: Red Hat OpenShift Logging Operator +:for: for Red{nbsp}Hat OpenShift +:clo: Red{nbsp}Hat OpenShift Logging Operator :loki-op: Loki Operator :es-op: OpenShift Elasticsearch Operator :log-plug: logging Console plugin + //observability :ObservabilityLongName: Red Hat OpenShift Observability :ObservabilityShortName: Observability + // Cluster Monitoring Operator :cmo-first: Cluster Monitoring Operator (CMO) :cmo-full: Cluster Monitoring Operator :cmo-short: CMO -//power monitoring -:PM-title-c: Power monitoring for Red Hat OpenShift -:PM-title: power monitoring for Red Hat OpenShift -:PM-shortname: power monitoring -:PM-shortname-c: Power monitoring -:PM-operator: Power monitoring Operator -:PM-kepler: Kepler -//serverless -:ServerlessProductName: OpenShift Serverless -:ServerlessProductShortName: Serverless -:ServerlessOperatorName: OpenShift Serverless Operator -:FunctionsProductName: OpenShift Serverless Functions -//service mesh v2 -:product-dedicated: Red Hat OpenShift Dedicated -:product-rosa: Red Hat OpenShift Service on AWS -:SMProductName: Red Hat OpenShift Service Mesh -:SMProductShortName: Service Mesh -:SMProductVersion: 2.6.6 -:MaistraVersion: 2.6 -:KialiProduct: Kiali Operator provided by Red Hat -:SMPlugin: OpenShift Service Mesh Console (OSSMC) plugin -:SMPluginShort: OSSMC plugin -//Service Mesh v1 -:SMProductVersion1x: 1.1.18.2 -//Windows containers -:productwinc: Red Hat OpenShift support for Windows Containers -// Red Hat Quay Container Security Operator -:rhq-cso: Red Hat Quay Container Security Operator -// Red Hat Quay -:quay: Red Hat Quay -:sno: single-node OpenShift -:sno-caps: Single-node OpenShift -:sno-okd: single-node OKD -:sno-caps-okd: Single-node OKD -//TALO and Redfish events Operators -:cgu-operator-first: Topology Aware Lifecycle Manager (TALM) -:cgu-operator-full: Topology Aware Lifecycle Manager -:cgu-operator: TALM -:redfish-operator: Bare Metal Event Relay -//Formerly known as CodeReady Containers and CodeReady Workspaces -:openshift-local-productname: Red Hat OpenShift Local -:openshift-dev-spaces-productname: Red Hat OpenShift Dev Spaces -:factory-prestaging-tool: factory-precaching-cli tool -:factory-prestaging-tool-caps: Factory-precaching-cli tool -:openshift-networking: Red Hat OpenShift Networking -// TODO - this probably needs to be different for OKD -//ifdef::openshift-origin[] -//:openshift-networking: OKD Networking -//endif::[] -// logical volume manager storage -:lvms-first: Logical Volume Manager (LVM) Storage -:lvms: LVM Storage -//Operator SDK version -:osdk_ver: 1.38.0 -//Operator SDK version that shipped with the previous OCP 4.x release -:osdk_ver_n1: 1.36.1 -//Version-agnostic OLM -:olm-first: Operator Lifecycle Manager (OLM) -:olm: OLM -//Initial version of OLM that shipped with OCP 4, aka "v0" and f/k/a "existing" during OLM v1's pre-4.18 TP phase -:olmv0: OLM (Classic) -:olmv0-caps: OLM (Classic) -:olmv0-first: Operator Lifecycle Manager (OLM) Classic -:olmv0-first-caps: Operator Lifecycle Manager (OLM) Classic -//Next-gen (OCP 4.14+) Operator Lifecycle Manager, f/k/a "1.0" -:olmv1: OLM v1 -:olmv1-first: Operator Lifecycle Manager (OLM) v1 -// -:ztp-first: GitOps Zero Touch Provisioning (ZTP) -:ztp: GitOps ZTP -:3no: three-node OpenShift -:3no-caps: Three-node OpenShift -:run-once-operator: Run Once Duration Override Operator -// Web terminal -:web-terminal-op: Web Terminal Operator -:devworkspace-op: DevWorkspace Operator -:secrets-store-driver: Secrets Store CSI driver -:secrets-store-operator: Secrets Store CSI Driver Operator + +// Observability Signal Correlation +:korrel8r: https://github.com/korrel8r/korrel8r[Korrel8r] +:sync: image:fa-sync-alt.svg[Sync,20] + // Cluster Observability Operator :coo-first: Cluster Observability Operator (COO) :coo-full: Cluster Observability Operator :coo-short: COO -// ODF -:odf-first: Red Hat OpenShift Data Foundation (ODF) -:odf-full: Red Hat OpenShift Data Foundation -:odf-short: ODF -:rh-dev-hub: Red Hat Developer Hub -// IBU -:lcao: Lifecycle Agent -// Cloud provider names -// Alibaba Cloud -:alibaba: Alibaba Cloud -// Amazon Web Services (AWS) -:aws-first: Amazon Web Services (AWS) -:aws-full: Amazon Web Services -:aws-short: AWS -// Google Cloud Platform (GCP) -:gcp-first: Google Cloud Platform (GCP) -:gcp-full: Google Cloud Platform -:gcp-short: GCP -// IBM general -:ibm-name: IBM(R) -:ibm-title: IBM -// IBM Cloud -:ibm-cloud-name: IBM Cloud(R) -:ibm-cloud-title: IBM Cloud -// IBM Cloud Bare Metal (Classic) -:ibm-cloud-bm: IBM Cloud(R) Bare Metal (Classic) -:ibm-cloud-bm-title: IBM Cloud Bare Metal (Classic) -//IBM Cloud Object Storage (COS) -:ibm-cloud-object-storage: IBM Cloud Object Storage (COS) -// IBM Power -:ibm-power-name: IBM Power(R) -:ibm-power-title: IBM Power -:ibm-power-server-name: IBM Power(R) Virtual Server -:ibm-power-server-title: IBM Power Virtual Server -// IBM zSystems -:ibm-z-name: IBM Z(R) -:ibm-z-title: IBM Z -:ibm-linuxone-name: IBM(R) LinuxONE -:ibm-linuxone-title: IBM LinuxONE -// Microsoft Azure -:azure-first: Microsoft Azure -:azure-full: Microsoft Azure -:azure-short: Azure -//Oracle -:oci-first: Oracle(R) Cloud Infrastructure (OCI) -:oci-first-no-rt: Oracle Cloud Infrastructure (OCI) -:oci: OCI -:oci-ccm-full: Oracle Cloud Controller Manager (CCM) -:oci-ccm: Oracle CCM -:oci-csi-full: Oracle Container Storage Interface (CSI) -:oci-csi: Oracle CSI -:ocid-first: Oracle(R) Cloud Identifier (OCID) -:ocid: OCID -:ocvs-first: Oracle(R) Cloud VMware Solution (OCVS) -:ocvs: OCVS -:oci-c3: Oracle(R) Compute Cloud@Customer -:oci-c3-no-rt: Oracle Compute Cloud@Customer -:oci-c3-short: Compute Cloud@Customer -:oci-pca: Oracle(R) Private Cloud Appliance -:oci-pca-no-rt: Oracle Private Cloud Appliance -:oci-pca-short: Private Cloud Appliance -// Red Hat OpenStack Platform (RHOSP)/OpenStack -ifndef::openshift-origin[] -:rh-openstack-first: Red Hat OpenStack Platform (RHOSP) -:rh-openstack: RHOSP -endif::openshift-origin[] -ifdef::openshift-origin[] -:rh-openstack-first: OpenStack -:rh-openstack: OpenStack -endif::openshift-origin[] -:rhoso-first: Red Hat OpenStack Services on OpenShift (RHOSO) -:rhoso: RHOSO -// VMware vSphere -:vmw-first: VMware vSphere -:vmw-full: VMware vSphere -:vmw-short: vSphere -//Token-based auth products -//AWS Security Token Service -:sts-first: Security Token Service (STS) -:sts-full: Security Token Service -:sts-short: STS -//Microsoft Entra Workload ID (FKA Azure Active Directory Workload Identities) -:entra-first: Microsoft Entra Workload ID -:entra-short: Workload ID -//Google Cloud Platform Workload Identity -:gcp-wid-first: Google Cloud Platform Workload Identity -:gcp-wid-short: GCP Workload Identity -// Cluster API terminology -// Cluster CAPI Operator -:cluster-capi-operator: Cluster CAPI Operator -// Cluster API Provider Amazon Web Services (AWS) -:cap-aws-first: Cluster API Provider Amazon Web Services (AWS) -:cap-aws-short: Cluster API Provider AWS -// Cluster API Provider Google Cloud Platform (GCP) -:cap-gcp-first: Cluster API Provider Google Cloud Platform (GCP) -:cap-gcp-short: Cluster API Provider GCP -// Cluster API Provider IBM Cloud -:cap-ibm-first: Cluster API Provider IBM Cloud -:cap-ibm-short: Cluster API Provider IBM Cloud -// Cluster API Provider Kubevirt -:cap-kubevirt-first: Cluster API Provider Kubevirt -:cap-kubevirt-short: Cluster API Provider Kubevirt -// Cluster API Provider Microsoft Azure -:cap-azure-first: Cluster API Provider Microsoft Azure -:cap-azure-short: Cluster API Provider Azure -// Cluster API Provider Nutanix -:cap-nutanix-first: Cluster API Provider Nutanix -:cap-nutanix-short: Cluster API Provider Nutanix -// Cluster API Provider OpenStack -:cap-openstack-first: Cluster API Provider OpenStack -:cap-openstack-short: Cluster API Provider OpenStack -// Cluster API Provider Oracle Cloud Infrastructure (OCI) -:cap-oci-first: Cluster API Provider Oracle Cloud Infrastructure (OCI) -:cap-oci-short: Cluster API Provider OCI -// Cluster API Provider VMware vSphere -:cap-vsphere-first: Cluster API Provider VMware vSphere -:cap-vsphere-short: Cluster API Provider vSphere -// Hosted control planes related attributes -:hcp-capital: Hosted control planes -:hcp: hosted control planes -:mce: multicluster engine for Kubernetes Operator -:mce-short: multicluster engine Operator + + +:COOProductName: Red Hat OpenShift Cluster Observability Operator +:COOProductShortName: COO + +:ocp-product-title: OpenShift Container Platform diff --git a/_attributes/servicebinding-document-attributes.adoc b/_attributes/servicebinding-document-attributes.adoc deleted file mode 100644 index 128980867238..000000000000 --- a/_attributes/servicebinding-document-attributes.adoc +++ /dev/null @@ -1,12 +0,0 @@ -// Standard document attributes to be used in the documentation -// -// The following are shared by all documents: -:toc: -:toclevels: 4 -:experimental: -// -// Product content attributes, that is, substitution variables in the files. -// -:servicebinding-title: Service Binding Operator -:servicebinding-shortname: Service Binding -:servicebinding-ver: servicebinding-1.0 diff --git a/_distro_map.yml b/_distro_map.yml index a15775b53dbf..aa5039c5c865 100644 --- a/_distro_map.yml +++ b/_distro_map.yml @@ -1,488 +1,11 @@ --- -openshift-origin: - name: OKD - author: OKD Documentation Project - site: community - site_name: Documentation - site_url: https://docs.okd.io/ - branches: - main: - name: 4 - dir: latest - enterprise-4.6: - name: '4.6' - dir: '4.6' - enterprise-4.7: - name: '4.7' - dir: '4.7' - enterprise-4.8: - name: '4.8' - dir: '4.8' - enterprise-4.9: - name: '4.9' - dir: '4.9' - enterprise-4.10: - name: '4.10' - dir: '4.10' - enterprise-4.11: - name: '4.11' - dir: '4.11' - enterprise-4.12: - name: '4.12' - dir: '4.12' - enterprise-4.13: - name: '4.13' - dir: '4.13' - enterprise-4.14: - name: '4.14' - dir: '4.14' - enterprise-4.15: - name: '4.15' - dir: '4.15' - enterprise-4.16: - name: '4.16' - dir: '4.16' - enterprise-4.17: - name: '4.17' - dir: '4.17' - enterprise-4.18: - name: '4.18' - dir: '4.18' - enterprise-3.6: - name: '3.6' - dir: '3.6' - enterprise-3.7: - name: '3.7' - dir: '3.7' - enterprise-3.9: - name: '3.9' - dir: '3.9' - enterprise-3.10: - name: '3.10' - dir: '3.10' - enterprise-3.11: - name: '3.11' - dir: '3.11' -openshift-enterprise: - name: OpenShift Container Platform - author: OpenShift Documentation Project - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - enterprise-3.0: - name: '3.0' - dir: enterprise/3.0 - distro-overrides: - name: OpenShift Enterprise - enterprise-3.1: - name: '3.1' - dir: enterprise/3.1 - distro-overrides: - name: OpenShift Enterprise - enterprise-3.2: - name: '3.2' - dir: enterprise/3.2 - distro-overrides: - name: OpenShift Enterprise - enterprise-3.3: - name: '3.3' - dir: container-platform/3.3 - enterprise-3.4: - name: '3.4' - dir: container-platform/3.4 - enterprise-3.5: - name: '3.5' - dir: container-platform/3.5 - enterprise-3.6: - name: '3.6' - dir: container-platform/3.6 - enterprise-3.7: - name: '3.7' - dir: container-platform/3.7 - enterprise-3.9: - name: '3.9' - dir: container-platform/3.9 - enterprise-3.10: - name: '3.10' - dir: container-platform/3.10 - enterprise-3.11: - name: '3.11' - dir: container-platform/3.11 - enterprise-4.1: - name: '4.1' - dir: container-platform/4.1 - enterprise-4.2: - name: '4.2' - dir: container-platform/4.2 - enterprise-4.3: - name: '4.3' - dir: container-platform/4.3 - enterprise-4.4: - name: '4.4' - dir: container-platform/4.4 - enterprise-4.5: - name: '4.5' - dir: container-platform/4.5 - enterprise-4.6: - name: '4.6' - dir: container-platform/4.6 - enterprise-4.7: - name: '4.7' - dir: container-platform/4.7 - enterprise-4.8: - name: '4.8' - dir: container-platform/4.8 - enterprise-4.9: - name: '4.9' - dir: container-platform/4.9 - enterprise-4.10: - name: '4.10' - dir: container-platform/4.10 - enterprise-4.11: - name: '4.11' - dir: container-platform/4.11 - enterprise-4.12: - name: '4.12' - dir: container-platform/4.12 - enterprise-4.13: - name: '4.13' - dir: container-platform/4.13 - enterprise-4.14: - name: '4.14' - dir: container-platform/4.14 - enterprise-4.15: - name: '4.15' - dir: container-platform/4.15 - enterprise-4.16: - name: '4.16' - dir: container-platform/4.16 - enterprise-4.17: - name: '4.17' - dir: container-platform/4.17 - enterprise-4.18: - name: '4.18' - dir: container-platform/4.18 -openshift-dedicated: - name: OpenShift Dedicated - author: OpenShift Documentation Project - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - enterprise-3.11: - name: '3' - dir: dedicated/3 - enterprise-4.18: - name: '' - dir: dedicated/ -openshift-aro: - name: Azure Red Hat OpenShift - author: OpenShift Documentation Project - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - enterprise-3.11: - name: '3' - dir: aro/3 - enterprise-4.3: - name: '4' - dir: aro/4 -openshift-rosa: - name: Red Hat OpenShift Service on AWS - author: OpenShift Documentation Project - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - enterprise-4.18: - name: '' - dir: rosa/ - rosa-preview: - name: '' - dir: rosa-preview/ -openshift-rosa-hcp: - name: Red Hat OpenShift Service on AWS - author: OpenShift Documentation Project - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - enterprise-4.18: - name: '' - dir: rosa-hcp/ - rosa-preview: - name: '' - dir: rosa-hcp-preview/ -openshift-rosa-portal: - name: Red Hat OpenShift Service on AWS - author: OpenShift Documentation Project - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - enterprise-4.18: - name: '' - dir: rosa-portal/ -openshift-webscale: - name: OpenShift Container Platform - author: OpenShift Documentation Project - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - enterprise-4.4: - name: '4.4' - dir: container-platform-ocp/4.4 - enterprise-4.5: - name: '4.5' - dir: container-platform-ocp/4.5 - enterprise-4.7: - name: '4.7' - dir: container-platform-ocp/4.7 - enterprise-4.8: - name: '4.8' - dir: container-platform-ocp/4.8 -openshift-dpu: - name: OpenShift Container Platform - author: OpenShift Documentation Project - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - enterprise-4.10: - name: '4.10' - dir: container-platform-dpu/4.10 -openshift-telco: - name: OpenShift Container Platform - author: OpenShift Documentation Project - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - enterprise-4.14: - name: '4.14' - dir: container-platform-telco/4.14 - enterprise-4.15: - name: '4.15' - dir: container-platform-telco/4.15 -openshift-acs: - name: Red Hat Advanced Cluster Security for Kubernetes - author: OpenShift documentation team - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - rhacs-docs-3.65: - name: '3.65' - dir: acs/3.65 - rhacs-docs-3.66: - name: '3.66' - dir: acs/3.66 - rhacs-docs-3.67: - name: '3.67' - dir: acs/3.67 - rhacs-docs-3.68: - name: '3.68' - dir: acs/3.68 - rhacs-docs-3.69: - name: '3.69' - dir: acs/3.69 - rhacs-docs-3.70: - name: '3.70' - dir: acs/3.70 - rhacs-docs-3.71: - name: '3.71' - dir: acs/3.71 - rhacs-docs-3.72: - name: '3.72' - dir: acs/3.72 - rhacs-docs-3.73: - name: '3.73' - dir: acs/3.73 - rhacs-docs-3.74: - name: '3.74' - dir: acs/3.74 - rhacs-docs-4.0: - name: '4.0' - dir: acs/4.0 - rhacs-docs-4.1: - name: '4.1' - dir: acs/4.1 - rhacs-docs-4.2: - name: '4.2' - dir: acs/4.2 - rhacs-docs-4.3: - name: '4.3' - dir: acs/4.3 - rhacs-docs-4.4: - name: '4.4' - dir: acs/4.4 - rhacs-docs-4.5: - name: '4.5' - dir: acs/4.5 - rhacs-docs-4.6: - name: '4.6' - dir: acs/4.6 - rhacs-docs-4.7: - name: '4.7' - dir: acs/4.7 -microshift: - name: Red Hat build of MicroShift - author: OpenShift Documentation Project - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - enterprise-4.12: - name: '4.12' - dir: microshift/4.12 - enterprise-4.13: - name: '4.13' - dir: microshift/4.13 -openshift-serverless: - name: Red Hat OpenShift Serverless - author: OpenShift documentation team - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - serverless-docs-1.28: - name: '1.28' - dir: serverless/1.28 - serverless-docs-1.29: - name: '1.29' - dir: serverless/1.29 - serverless-docs-1.30: - name: '1.30' - dir: serverless/1.30 - serverless-docs-1.31: - name: '1.31' - dir: serverless/1.31 - serverless-docs-1.32: - name: '1.32' - dir: serverless/1.32 - serverless-docs-1.33: - name: '1.33' - dir: serverless/1.33 - serverless-docs-1.34: - name: '1.34' - dir: serverless/1.34 - serverless-docs-1.35: - name: '1.35' - dir: serverless/1.35 - serverless-docs-1.36: - name: '1.36' - dir: serverless/1.36 -openshift-gitops: - name: Red Hat OpenShift GitOps +openshift-coo: + name: Red Hat OpenShift Cluster Observability Operator author: OpenShift documentation team site: commercial site_name: Documentation site_url: https://docs.openshift.com/ branches: - gitops-docs-1.8: - name: '1.8' - dir: gitops/1.8 - gitops-docs-1.9: - name: '1.9' - dir: gitops/1.9 - gitops-docs-1.10: - name: '1.10' - dir: gitops/1.10 - gitops-docs-1.11: - name: '1.11' - dir: gitops/1.11 - gitops-docs-1.12: - name: '1.12' - dir: gitops/1.12 - gitops-docs-1.13: - name: '1.13' - dir: gitops/1.13 - gitops-docs-1.14: - name: '1.14' - dir: gitops/1.14 - gitops-docs-1.15: - name: '1.15' - dir: gitops/1.15 -openshift-pipelines: - name: Red Hat OpenShift Pipelines - author: OpenShift documentation team - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - pipelines-docs-1.10: - name: '1.10' - dir: pipelines/1.10 - pipelines-docs-1.11: - name: '1.11' - dir: pipelines/1.11 - pipelines-docs-1.12: - name: '1.12' - dir: pipelines/1.12 - pipelines-docs-1.13: - name: '1.13' - dir: pipelines/1.13 - pipelines-docs-1.14: - name: '1.14' - dir: pipelines/1.14 - pipelines-docs-1.15: - name: '1.15' - dir: pipelines/1.15 - pipelines-docs-1.16: - name: '1.16' - dir: pipelines/1.16 - pipelines-docs-1.17: - name: '1.17' - dir: pipelines/1.17 - pipelines-docs-1.18: - name: '1.18' - dir: pipelines/1.18 -openshift-builds: - name: builds for Red Hat OpenShift - author: OpenShift documentation team - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - build-docs-1.0: - name: '1.0' - dir: builds/1.0 - build-docs-1.1: - name: '1.1' - dir: builds/1.1 - build-docs-1.2: - name: '1.2' - dir: builds/1.2 - build-docs-1.3: - name: '1.3' - dir: builds/1.3 - build-docs-1.4: - name: '1.4' - dir: builds/1.4 -openshift-lightspeed: - name: Red Hat OpenShift Lightspeed - author: OpenShift documentation team - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - lightspeed-docs-1.0tp1: - name: '1.0tp1' - dir: lightspeed/1.0tp1 - -openshift-service-mesh: - name: Red Hat OpenShift Service Mesh - author: OpenShift documentation team - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - service-mesh-docs-3.0.0tp1: - name: '3.0.0tp1' - dir: service-mesh/3.0.0tp1 - service-mesh-docs-3.0: - name: '3.0' - dir: service-mesh/3.0 + standalone-coo-docs-main: + name: '' + dir: coo \ No newline at end of file diff --git a/_topic_maps/_topic_map.yml b/_topic_maps/_topic_map.yml index de5f181d6cc0..ff6da868f339 100644 --- a/_topic_maps/_topic_map.yml +++ b/_topic_maps/_topic_map.yml @@ -1,4850 +1,51 @@ -# trunk-ignore-all(prettier) -# This configuration file dictates the organization of the topic groups and -# topics on the main page of the doc site for this branch. Each record -# consists of the following: -# -# --- <= Record delimiter -# Name: Origin of the Species <= Display name of topic group -# Dir: origin_of_the_species <= Directory name of topic group -# Topics: -# - Name: The Majestic Marmoset <= Topic name -# File: the_majestic_marmoset <= Topic file under group dir +/- -# - Name: The Curious Crocodile <= Topic 2 name -# File: the_curious_crocodile <= Topic 2 file -# - Name: The Numerous Nematodes <= Sub-topic group name -# Dir: the_numerous_nematodes <= Sub-topic group dir -# Topics: -# - Name: The Wily Worm <= Sub-topic name -# File: the_wily_worm <= Sub-topic file under / -# - Name: The Acrobatic Ascarid <= Sub-topic 2 name -# File: the_acrobatic_ascarid <= Sub-topic 2 file under / -# -# The ordering of the records in this document determines the ordering of the -# topic groups and topics on the main page. - --- -Name: About -Dir: welcome -Distros: openshift-enterprise,openshift-webscale,openshift-origin,openshift-online,openshift-dpu,openshift-telco +Name: About Red Hat OpenShift Cluster Observability Operator +Dir: about +Distros: openshift-coo Topics: -- Name: Welcome - File: index -- Name: Learn more about OpenShift Container Platform - File: learn_more_about_openshift -- Name: Providing documentation feedback - File: providing-feedback-on-red-hat-documentation - Distros: openshift-enterprise -- Name: About OpenShift Kubernetes Engine - File: oke_about - Distros: openshift-enterprise -- Name: Legal notice - File: legal-notice - Distros: openshift-enterprise,openshift-online +- Name: Cluster Observability Operator overview + File: cluster-observability-operator-overview --- -Name: What's new? -Dir: whats_new -Distros: openshift-origin +Name: Red Hat OpenShift Cluster Observability Operator release notes +Dir: release-notes +Distros: openshift-coo Topics: -- Name: New features and enhancements - File: new-features -- Name: Deprecated features - File: deprecated-features +- Name: Cluster Observability Operator release notes + File: cluster-observability-operator-release-notes --- -Name: Release notes -Dir: release_notes -Distros: openshift-enterprise -Topics: -- Name: OpenShift Container Platform release notes - File: ocp-release-notes -- Name: Additional release notes - File: addtl-release-notes ---- -Name: Tutorials -Dir: tutorials -Distros: openshift-enterprise -Topics: -# - Name: Tutorials overview -# File: index -- Name: Deploying an application by using the web console - File: dev-app-web-console -- Name: Deploying an application by using the CLI - File: dev-app-cli ---- -Name: Architecture -Dir: architecture -Distros: openshift-enterprise,openshift-origin,openshift-online -Topics: -- Name: Architecture overview - File: index -- Name: Product architecture - File: architecture -- Name: Installation and update - Distros: openshift-enterprise,openshift-origin - File: architecture-installation -- Name: Red Hat OpenShift Cluster Manager - Distros: openshift-enterprise - File: ocm-overview-ocp -- Name: About the multicluster engine for Kubernetes Operator - Distros: openshift-enterprise - File: mce-overview-ocp -- Name: Control plane architecture - File: control-plane - Distros: openshift-enterprise,openshift-origin,openshift-online -- Name: Understanding OpenShift development - File: understanding-development - Distros: openshift-enterprise -- Name: Understanding OKD development - File: understanding-development - Distros: openshift-origin -- Name: Fedora CoreOS - File: architecture-rhcos - Distros: openshift-origin -- Name: Red Hat Enterprise Linux CoreOS - File: architecture-rhcos - Distros: openshift-enterprise -- Name: Admission plugins - File: admission-plug-ins - Distros: openshift-enterprise,openshift-aro,openshift-origin ---- -Name: Disconnected environments -Dir: disconnected -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: About disconnected environments - File: about -- Name: Converting a connected cluster to a disconnected cluster - File: connected-to-disconnected -- Name: Mirroring in disconnected environments - Dir: mirroring - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: About disconnected installation mirroring - File: index - - Name: Creating a mirror registry with mirror registry for Red Hat OpenShift - File: installing-mirroring-creating-registry - - Name: Mirroring images for a disconnected installation using oc-mirror plugin v2 - File: about-installing-oc-mirror-v2 - - Name: Migrating from oc-mirror plugin v1 to v2 - File: oc-mirror-migration-v1-to-v2 - - Name: Mirroring images for a disconnected installation using the oc-mirror plugin v1 - File: installing-mirroring-disconnected - - Name: Mirroring images for a disconnected installation by using the oc adm command - File: installing-mirroring-installation-images -- Name: Installing a cluster in a disconnected environment - File: installing -- Name: Using OLM in disconnected environments - File: using-olm - Distros: openshift-origin,openshift-enterprise -- Name: Updating a cluster in a disconnected environment - Dir: updating - Topics: - - Name: About cluster updates in a disconnected environment - File: index - - Name: Mirroring OpenShift Container Platform images - File: mirroring-image-repository - - Name: Updating a cluster in a disconnected environment using OSUS - File: disconnected-update-osus - Distros: openshift-enterprise - - Name: Updating a cluster in a disconnected environment without OSUS - File: disconnected-update - Distros: openshift-enterprise - - Name: Updating a cluster in a disconnected environment by using the CLI - File: disconnected-update - Distros: openshift-origin - - Name: Uninstalling OSUS from a cluster - File: uninstalling-osus - Distros: openshift-enterprise ---- -Name: Installing +Name: Installing Red Hat OpenShift Cluster Observability Operator Dir: installing -Distros: openshift-origin,openshift-enterprise,openshift-webscale -Topics: -- Name: Installation overview - Dir: overview - Topics: - - Name: Installation overview - File: index - Distros: openshift-origin,openshift-enterprise - - Name: Selecting an installation method and preparing a cluster - File: installing-preparing - Distros: openshift-origin,openshift-enterprise - - Name: Cluster capabilities - File: cluster-capabilities - Distros: openshift-origin,openshift-enterprise - - Name: Support for FIPS cryptography - File: installing-fips - Distros: openshift-enterprise,openshift-online -- Name: Installing on Alibaba Cloud - Distros: openshift-origin,openshift-enterprise - Dir: installing_alibaba - Topics: - - Name: Installing a cluster on Alibaba Cloud using the Assisted Installer - File: installing-alibaba-assisted-installer -- Name: Installing on AWS - Dir: installing_aws - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Installation methods - File: preparing-to-install-on-aws - - Name: Configuring an AWS account - File: installing-aws-account - - Name: Installer-provisioned infrastructure - Dir: ipi - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Preparing to install a cluster - File: ipi-aws-preparing-to-install - - Name: Installing a cluster - File: installing-aws-default - - Name: Installing a cluster with customizations - File: installing-aws-customizations - - Name: Installing a cluster with network customizations - File: installing-aws-network-customizations - - Name: Installing a cluster in a disconnected environment - File: installing-restricted-networks-aws-installer-provisioned - - Name: Installing a cluster into an existing VPC - File: installing-aws-vpc - - Name: Installing a private cluster - File: installing-aws-private - - Name: Installing a cluster into a government region - File: installing-aws-government-region - - Name: Installing a cluster into a Secret or Top Secret Region - File: installing-aws-secret-region - - Name: Installing a cluster into a China region - File: installing-aws-china - - Name: Installing a cluster with compute nodes on Local Zones - File: installing-aws-localzone - - Name: Installing a cluster with compute nodes on Wavelength Zones - File: installing-aws-wavelength-zone - - Name: Extending an AWS VPC cluster into an AWS Outpost - File: installing-aws-outposts - - Name: Installing an AWS cluster with the support for configuring multi-architecture compute machines - File: installing-aws-multiarch-support - - Name: User-provisioned infrastructure - Dir: upi - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Preparing to install a cluster - File: upi-aws-preparing-to-install - - Name: Installation requirements - File: upi-aws-installation-reqs - - Name: Installing a cluster using CloudFormation templates - File: installing-aws-user-infra - - Name: Installing a cluster in a disconnected environment with user-provisioned infrastructure - File: installing-restricted-networks-aws - - Name: Installing an AWS cluster with the support for configuring multi-architecture compute machines - File: installing-aws-multiarch-support-upi - - Name: Installing a three-node cluster - File: installing-aws-three-node - - Name: Uninstalling a cluster - File: uninstalling-cluster-aws - - Name: Installation configuration parameters - File: installation-config-parameters-aws - - Name: AWS Local Zone or Wavelength Zone tasks - File: aws-compute-edge-zone-tasks - Distros: openshift-enterprise -- Name: Installing on Azure - Dir: installing_azure - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Installation methods - File: preparing-to-install-on-azure - - Name: Configuring an Azure account - File: installing-azure-account - - Name: Installer-provisioned infrastructure - Dir: ipi - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Preparing to install a cluster - File: installing-azure-preparing-ipi - - Name: Installing a cluster - File: installing-azure-default - - Name: Installing a cluster with customizations - File: installing-azure-customizations - - Name: Installing a cluster with network customizations - File: installing-azure-network-customizations - - Name: Installing a cluster in a disconnected environment - File: installing-restricted-networks-azure-installer-provisioned - - Name: Installing a cluster into an existing VNet - File: installing-azure-vnet - - Name: Installing a private cluster - File: installing-azure-private - - Name: Installing a cluster into a government region - File: installing-azure-government-region - - Name: User-provisioned infrastructure - Dir: upi - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Preparing to install a cluster - File: installing-azure-preparing-upi - - Name: Installing a cluster in a disconnected environment with user-provisioned infrastructure - File: installing-restricted-networks-azure-user-provisioned - - Name: Installing a cluster using ARM templates - File: installing-azure-user-infra - - Name: Installing a three-node cluster - File: installing-azure-three-node - - Name: Uninstalling a cluster - File: uninstalling-cluster-azure - - Name: Installation configuration parameters for Azure - File: installation-config-parameters-azure -- Name: Installing on Azure Stack Hub - Dir: installing_azure_stack_hub - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Installation methods - File: preparing-to-install-on-azure-stack-hub - - Name: Configuring an Azure Stack Hub account - File: installing-azure-stack-hub-account - - Name: Installer-provisioned infrastructure - Dir: ipi - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Preparing to install a cluster - File: ipi-ash-preparing-to-install - - Name: Installing a cluster - File: installing-azure-stack-hub-default - - Name: Installing a cluster with network customizations - File: installing-azure-stack-hub-network-customizations - - Name: User-provisioned infrastructure - Dir: upi - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Preparing to install a cluster - File: upi-ash-preparing-to-install - - Name: Installing a cluster using ARM templates - File: installing-azure-stack-hub-user-infra - - Name: Installation configuration parameters - File: installation-config-parameters-ash - - Name: Uninstalling a cluster - File: uninstalling-cluster-azure-stack-hub -- Name: Installing on GCP - Dir: installing_gcp - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Preparing to install on GCP - File: preparing-to-install-on-gcp - - Name: Configuring a GCP project - File: installing-gcp-account - - Name: Installing a cluster quickly on GCP - File: installing-gcp-default - - Name: Installing a cluster on GCP with customizations - File: installing-gcp-customizations - - Name: Installing a cluster on GCP with network customizations - File: installing-gcp-network-customizations - - Name: Installing a cluster on GCP in a disconnected environment - File: installing-restricted-networks-gcp-installer-provisioned - - Name: Installing a cluster on GCP into an existing VPC - File: installing-gcp-vpc - - Name: Installing a cluster on GCP into a shared VPC - File: installing-gcp-shared-vpc - - Name: Installing a private cluster on GCP - File: installing-gcp-private - - Name: Installing a cluster on GCP using Deployment Manager templates - File: installing-gcp-user-infra - - Name: Installing a cluster into a shared VPC on GCP using Deployment Manager templates - File: installing-gcp-user-infra-vpc - - Name: Installing a cluster on GCP in a disconnected environment with user-provisioned infrastructure - File: installing-restricted-networks-gcp - - Name: Installing a three-node cluster on GCP - File: installing-gcp-three-node - - Name: Installation configuration parameters for GCP - File: installation-config-parameters-gcp - - Name: Uninstalling a cluster on GCP - File: uninstalling-cluster-gcp - - Name: Installing a GCP cluster with the support for configuring multi-architecture compute machines - File: installing-gcp-multiarch-support -- Name: Installing on IBM Cloud - Dir: installing_ibm_cloud - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Preparing to install on IBM Cloud - File: preparing-to-install-on-ibm-cloud - - Name: Configuring an IBM Cloud account - File: installing-ibm-cloud-account - - Name: Configuring IAM for IBM Cloud - File: configuring-iam-ibm-cloud - - Name: User-managed encryption - File: user-managed-encryption-ibm-cloud - - Name: Installing a cluster on IBM Cloud with customizations - File: installing-ibm-cloud-customizations - - Name: Installing a cluster on IBM Cloud with network customizations - File: installing-ibm-cloud-network-customizations - - Name: Installing a cluster on IBM Cloud into an existing VPC - File: installing-ibm-cloud-vpc - - Name: Installing a private cluster on IBM Cloud - File: installing-ibm-cloud-private - - Name: Installing a cluster on IBM Cloud in a disconnected environment - File: installing-ibm-cloud-restricted - - Name: Installation configuration parameters for IBM Cloud - File: installation-config-parameters-ibm-cloud-vpc - - Name: Uninstalling a cluster on IBM Cloud - File: uninstalling-cluster-ibm-cloud -- Name: Installing on Nutanix - Dir: installing_nutanix - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Preparing to install on Nutanix - File: preparing-to-install-on-nutanix - - Name: Fault tolerant deployments - File: nutanix-failure-domains - - Name: Installing a cluster on Nutanix - File: installing-nutanix-installer-provisioned - - Name: Installing a cluster on Nutanix in a disconnected environment - File: installing-restricted-networks-nutanix-installer-provisioned - - Name: Installing a three-node cluster on Nutanix - File: installing-nutanix-three-node - - Name: Uninstalling a cluster on Nutanix - File: uninstalling-cluster-nutanix - - Name: Installation configuration parameters for Nutanix - File: installation-config-parameters-nutanix -- Name: Installing on-premise with Assisted Installer - Dir: installing_on_prem_assisted - Distros: openshift-enterprise - Topics: - - Name: Installing an on-premise cluster using the Assisted Installer - File: installing-on-prem-assisted -- Name: Installing an on-premise cluster with the Agent-based Installer - Dir: installing_with_agent_based_installer - Distros: openshift-enterprise - Topics: - - Name: Preparing to install with Agent-based Installer - File: preparing-to-install-with-agent-based-installer - - Name: Understanding disconnected installation mirroring - File: understanding-disconnected-installation-mirroring - - Name: Installing a cluster - File: installing-with-agent-basic - - Name: Installing a cluster with customizations - File: installing-with-agent-based-installer - - Name: Preparing PXE assets for OCP - File: prepare-pxe-assets-agent - - Name: Preparing installation assets for iSCSI booting - File: installing-using-iscsi - - Name: Preparing an Agent-based installed cluster for the multicluster engine for Kubernetes - File: preparing-an-agent-based-installed-cluster-for-mce - - Name: Installation configuration parameters for the Agent-based Installer - File: installation-config-parameters-agent -- Name: Installing on a single node - Dir: installing_sno - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Preparing to install OpenShift on a single node - File: install-sno-preparing-to-install-sno - - Name: Installing OpenShift on a single node - File: install-sno-installing-sno -- Name: Installing on bare metal - Dir: installing_bare_metal - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Preparing to install on bare metal - File: preparing-to-install-on-bare-metal - - Name: User-provisioned infrastructure - Dir: upi - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Installing a user-provisioned cluster on bare metal - File: installing-bare-metal - - Name: Installing a user-provisioned bare metal cluster with network customizations - File: installing-bare-metal-network-customizations - - Name: Installing a user-provisioned bare metal cluster on a disconnected environment - File: installing-restricted-networks-bare-metal - - Name: Scaling a user-provisioned installation with the bare metal operator - File: scaling-a-user-provisioned-cluster-with-the-bare-metal-operator - - Name: Installation configuration parameters for bare metal - File: installation-config-parameters-bare-metal - - Name: Installer-provisioned infrastructure - Dir: ipi - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Overview - File: ipi-install-overview - - Name: Prerequisites - File: ipi-install-prerequisites - - Name: Setting up the environment for an OpenShift installation - File: ipi-install-installation-workflow - - Name: Installing a cluster - File: ipi-install-installing-a-cluster - - Name: Troubleshooting the installation - File: ipi-install-troubleshooting - - Name: Postinstallation configuration - File: ipi-install-post-installation-configuration - - Name: Expanding the cluster - File: ipi-install-expanding-the-cluster -- Name: Installing IBM Cloud Bare Metal (Classic) - Dir: installing_ibm_cloud_classic - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Prerequisites for IBM Cloud (Classic) - File: install-ibm-cloud-prerequisites - - Name: Installation workflow for IBM Cloud Bare Metal (Classic) - File: install-ibm-cloud-installation-workflow -- Name: Installing on IBM Z and IBM LinuxONE - Dir: installing_ibm_z - Distros: openshift-enterprise - Topics: - - Name: Installation methods - File: preparing-to-install-on-ibm-z - - Name: User-provisioned infrastructure - Dir: upi - Distros: openshift-enterprise - Topics: - - Name: Installation requirements - File: installing-ibm-z-reqs - - Name: Preparing to install a cluster - File: upi-ibm-z-preparing-to-install - - Name: Installing a cluster with z/VM - File: installing-ibm-z - - Name: Installing a cluster with z/VM in a disconnected environment - File: installing-restricted-networks-ibm-z - - Name: Installing a cluster with RHEL KVM - File: installing-ibm-z-kvm - - Name: Installing a cluster with RHEL KVM in a disconnected environment - File: installing-restricted-networks-ibm-z-kvm - - Name: Installing a cluster in an LPAR - File: installing-ibm-z-lpar - - Name: Installing a cluster in an LPAR in a disconnected environment - File: installing-restricted-networks-ibm-z-lpar - - Name: Installation configuration parameters - File: installation-config-parameters-ibm-z - - Name: Configuring additional devices - File: ibmz-post-install -- Name: Installing on IBM Power - Dir: installing_ibm_power - Distros: openshift-enterprise - Topics: - - Name: Preparing to install on IBM Power - File: preparing-to-install-on-ibm-power - - Name: Installing a cluster on IBM Power - File: installing-ibm-power - - Name: Installing a cluster on IBM Power in a disconnected environment - File: installing-restricted-networks-ibm-power - - Name: Installation configuration parameters for IBM Power - File: installation-config-parameters-ibm-power -- Name: Installing on IBM Power Virtual Server - Dir: installing_ibm_powervs - Distros: openshift-enterprise - Topics: - - Name: Preparing to install on IBM Power Virtual Server - File: preparing-to-install-on-ibm-power-vs - - Name: Configuring an IBM Cloud account - File: installing-ibm-cloud-account-power-vs - - Name: Creating an IBM Power Virtual Server workspace - File: creating-ibm-power-vs-workspace - - Name: Installing a cluster on IBM Power Virtual Server with customizations - File: installing-ibm-power-vs-customizations - - Name: Installing a cluster on IBM Power Virtual Server into an existing VPC - File: installing-ibm-powervs-vpc - - Name: Installing a private cluster on IBM Power Virtual Server - File: installing-ibm-power-vs-private-cluster - - Name: Installing a cluster on IBM Power Virtual Server in a disconnected environment - File: installing-restricted-networks-ibm-power-vs - - Name: Uninstalling a cluster on IBM Power Virtual Server - File: uninstalling-cluster-ibm-power-vs - - Name: Installation configuration parameters for IBM Power Virtual Server - File: installation-config-parameters-ibm-power-vs -- Name: Installing on OpenStack - Dir: installing_openstack - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Preparing to install on OpenStack - File: preparing-to-install-on-openstack - - Name: Preparing to install a cluster that uses SR-IOV or OVS-DPDK on OpenStack - File: installing-openstack-nfv-preparing -# - Name: Installing a cluster on OpenStack -# File: installing-openstack-installer - - Name: Installing a cluster on OpenStack with customizations - File: installing-openstack-installer-custom - - Name: Installing a cluster on OpenStack on your own infrastructure - File: installing-openstack-user - - Name: Installing a cluster on OpenStack in a disconnected environment - File: installing-openstack-installer-restricted - - Name: Installing a three-node cluster on OpenStack - File: installing-openstack-three-node - - Name: Configuring network settings after installing OpenStack - File: installing-openstack-network-config - - Name: OpenStack Cloud Controller Manager reference guide - File: installing-openstack-cloud-config-reference - - Name: Deploying on OpenStack with rootVolume and etcd on local disk - File: deploying-openstack-with-rootVolume-etcd-on-local-disk - # - Name: Load balancing deployments on OpenStack - # File: installing-openstack-load-balancing - - Name: Uninstalling a cluster on OpenStack - File: uninstalling-cluster-openstack - - Name: Uninstalling a cluster on OpenStack from your own infrastructure - File: uninstalling-openstack-user - - Name: Installation configuration parameters for OpenStack - File: installation-config-parameters-openstack -- Name: Installing on OCI - Dir: installing_oci - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Installing a cluster on Oracle Cloud Infrastructure by using the Assisted Installer - File: installing-oci-assisted-installer - - Name: Installing a cluster on Oracle Cloud Infrastructure by using the Agent-based Installer - File: installing-oci-agent-based-installer - - Name: Installing a cluster on Oracle Compute Cloud@Customer by using the Agent-based Installer - File: installing-c3-agent-based-installer - - Name: Installing a cluster on Oracle Private Cloud Appliance by using the Agent-based Installer - File: installing-pca-agent-based-installer - - Name: Installing a cluster on Oracle Compute Cloud@Customer by using the Assisted Installer - File: installing-c3-assisted-installer -- Name: Installing on VMware vSphere - Dir: installing_vsphere - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Installation methods - File: preparing-to-install-on-vsphere - - Name: Installer-provisioned infrastructure - Dir: ipi - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: vSphere installation requirements - File: ipi-vsphere-installation-reqs - - Name: Preparing to install a cluster - File: ipi-vsphere-preparing-to-install - - Name: Installing a cluster - File: installing-vsphere-installer-provisioned - - Name: Installing a cluster with customizations - File: installing-vsphere-installer-provisioned-customizations - - Name: Installing a cluster with network customizations - File: installing-vsphere-installer-provisioned-network-customizations - - Name: Installing a cluster in a disconnected environment - File: installing-restricted-networks-installer-provisioned-vsphere - - Name: User-provisioned infrastructure - Dir: upi - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: vSphere installation requirements - File: upi-vsphere-installation-reqs - - Name: Preparing to install a cluster - File: upi-vsphere-preparing-to-install - - Name: Installing a cluster - File: installing-vsphere - - Name: Installing a cluster with network customizations - File: installing-vsphere-network-customizations - - Name: Installing a cluster in a disconnected environment - File: installing-restricted-networks-vsphere - - Name: Assisted Installer - Distros: openshift-enterprise - File: installing-vsphere-assisted-installer - - Name: Agent-based Installer - Distros: openshift-enterprise - File: installing-vsphere-agent-based-installer - - Name: Installing a three-node cluster - File: installing-vsphere-three-node - - Name: Uninstalling a cluster - File: uninstalling-cluster-vsphere-installer-provisioned - - Name: Using the vSphere Problem Detector Operator - File: using-vsphere-problem-detector-operator - - Name: Installation configuration parameters - File: installation-config-parameters-vsphere - - Name: Regions and zones for a VMware vCenter - File: post-install-vsphere-zones-regions-configuration - - Name: Enabling encryption on a vSphere cluster - File: vsphere-post-installation-encryption - - Name: Configuring the vSphere connection settings after an installation - File: installing-vsphere-post-installation-configuration -- Name: Installing on any platform - Dir: installing_platform_agnostic - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Installing a cluster on any platform - File: installing-platform-agnostic -- Name: Installation configuration - Dir: install_config - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Customizing nodes - File: installing-customizing - - Name: Configuring your firewall - File: configuring-firewall - - Name: Enabling Linux control group version 1 (cgroup v1) - File: enabling-cgroup-v1 - Distros: openshift-enterprise -- Name: Validation and troubleshooting - Dir: validation_and_troubleshooting - Topics: - - Name: Validating an installation - File: validating-an-installation - Distros: openshift-origin,openshift-enterprise - - Name: Troubleshooting installation issues - File: installing-troubleshooting - Distros: openshift-origin,openshift-enterprise ---- -Name: Postinstallation configuration -Dir: post_installation_configuration -Distros: openshift-origin,openshift-enterprise -Topics: -- Name: Postinstallation configuration overview - Distros: openshift-enterprise - File: index -- Name: Configuring a private cluster - Distros: openshift-enterprise,openshift-origin - File: configuring-private-cluster -- Name: Configuring multi-architecture compute machines on an OpenShift cluster - Dir: configuring-multi-arch-compute-machines - Distros: openshift-enterprise - Topics: - - Name: About clusters with multi-architecture compute machines - File: multi-architecture-configuration - - Name: Creating a cluster with multi-architecture compute machines on Azure - File: creating-multi-arch-compute-nodes-azure - - Name: Creating a cluster with multi-architecture compute machines on AWS - File: creating-multi-arch-compute-nodes-aws - - Name: Creating a cluster with multi-architecture compute machines on GCP - File: creating-multi-arch-compute-nodes-gcp - - Name: Creating a cluster with multi-architecture compute machines on bare metal, IBM Power, or IBM Z - File: creating-multi-arch-compute-nodes-bare-metal - - Name: Creating a cluster with multi-architecture compute machines on IBM Z and IBM LinuxONE with z/VM - File: creating-multi-arch-compute-nodes-ibm-z - - Name: Creating a cluster with multi-architecture compute machines on IBM Z and IBM LinuxONE in an LPAR - File: creating-multi-arch-compute-nodes-ibm-z-lpar - - Name: Creating a cluster with multi-architecture compute machines on IBM Z and IBM LinuxONE with RHEL KVM - File: creating-multi-arch-compute-nodes-ibm-z-kvm - - Name: Creating a cluster with multi-architecture compute machines on IBM Power - File: creating-multi-arch-compute-nodes-ibm-power - - Name: Managing your cluster with multi-architecture compute machines - File: multi-architecture-compute-managing - - Name: Managing workloads on multi-architecture clusters by using the Multiarch Tuning Operator - File: multiarch-tuning-operator - - Name: Multiarch Tuning Operator release notes - File: multi-arch-tuning-operator-release-notes -- Name: Cluster tasks - File: cluster-tasks -- Name: Node tasks - File: node-tasks -- Name: Postinstallation network configuration - File: post-install-network-configuration -- Name: Configuring image streams and image registries - File: post-install-image-config -- Name: Storage configuration - File: post-install-storage-configuration -- Name: Preparing for users - File: preparing-for-users -- Name: Changing the cloud provider credentials configuration - File: changing-cloud-credentials-configuration -- Name: Configuring alert notifications - File: configuring-alert-notifications -- Name: Converting a connected cluster to a disconnected cluster - File: converting-to-disconnected ---- -Name: Updating clusters -Dir: updating -Distros: openshift-origin,openshift-enterprise -Topics: -- Name: Updating clusters overview - File: index - Distros: openshift-origin -- Name: Understanding OpenShift updates - Dir: understanding_updates - Topics: - - Name: Introduction to OpenShift updates - File: intro-to-updates - - Name: How cluster updates work - File: how-updates-work - Distros: openshift-enterprise - - Name: Understanding update channels and releases - File: understanding-update-channels-release - Distros: openshift-enterprise - - Name: Understanding OpenShift update duration - File: understanding-openshift-update-duration -- Name: Preparing to update a cluster - Dir: preparing_for_updates - Topics: - - Name: Preparing to update to OpenShift Container Platform 4.19 - File: updating-cluster-prepare - Distros: openshift-enterprise - - Name: Preparing to update to OKD 4.19 - File: updating-cluster-prepare - Distros: openshift-origin - - Name: Preparing to update a cluster with manually maintained credentials - File: preparing-manual-creds-update - - Name: Preflight validation for Kernel Module Management (KMM) Modules - File: kmm-preflight-validation -- Name: Performing a cluster update - Dir: updating_a_cluster - Topics: - - Name: Updating a cluster using the CLI - File: updating-cluster-cli - - Name: Updating a cluster using the web console - File: updating-cluster-web-console - - Name: Performing a Control Plane Only update - File: control-plane-only-update - Distros: openshift-enterprise - - Name: Performing a canary rollout update - File: update-using-custom-machine-config-pools - - Name: Updating a cluster that includes RHEL compute machines - File: updating-cluster-rhel-compute - Distros: openshift-enterprise - - Name: Updating a cluster in a disconnected environment - File: disconnected-update - - Name: Updating hardware on nodes running on vSphere - File: updating-hardware-on-nodes-running-on-vsphere - - Name: Migrating to a cluster with multi-architecture compute machines - File: migrating-to-multi-payload - - Name: Updating the boot loader on Red Hat Enterprise Linux CoreOS nodes using bootupd - File: updating-bootloader-rhcos - Distros: openshift-enterprise - - Name: Updating the boot loader on Fedora CoreOS nodes using bootupd - File: updating-bootloader-rhcos - Distros: openshift-origin -- Name: Troubleshooting a cluster update - Dir: troubleshooting_updates - Distros: openshift-enterprise,openshift-origin - Topics: - #- Name: Recovering when an update fails before it is applied - # File: recovering-update-before-applied - - Name: Gathering data about your cluster update - File: gathering-data-cluster-update - #- Name: Restoring your cluster to a previous state - #File: restoring-cluster-previous-state ---- -Name: Support -Dir: support -Distros: openshift-enterprise,openshift-online,openshift-origin -Topics: -- Name: Support overview - File: index -- Name: Managing your cluster resources - File: managing-cluster-resources -- Name: Getting support - File: getting-support - Distros: openshift-enterprise -- Name: Remote health monitoring with connected clusters - Dir: remote_health_monitoring - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: About remote health monitoring - File: about-remote-health-monitoring - - Name: Showing data collected by remote health monitoring - File: showing-data-collected-by-remote-health-monitoring - - Name: Opting out of remote health reporting - File: opting-out-of-remote-health-reporting - - Name: Enabling remote health reporting - File: enabling-remote-health-reporting - - Name: Using Insights to identify issues with your cluster - File: using-insights-to-identify-issues-with-your-cluster - - Name: Using the Insights Operator - File: using-insights-operator - - Name: Using remote health reporting in a disconnected environment - File: remote-health-reporting-from-restricted-network - - Name: Importing simple content access entitlements with Insights Operator - File: insights-operator-simple-access -- Name: Gathering data about your cluster - File: gathering-cluster-data - Distros: openshift-enterprise,openshift-origin -- Name: Summarizing cluster specifications - File: summarizing-cluster-specifications - Distros: openshift-enterprise,openshift-origin -- Name: Troubleshooting - Dir: troubleshooting - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Troubleshooting installations - File: troubleshooting-installations - - Name: Verifying node health - File: verifying-node-health - - Name: Troubleshooting CRI-O container runtime issues - File: troubleshooting-crio-issues - - Name: Troubleshooting operating system issues - File: troubleshooting-operating-system-issues - Distros: openshift-enterprise,openshift-origin - - Name: Troubleshooting network issues - File: troubleshooting-network-issues - Distros: openshift-enterprise,openshift-origin - - Name: Troubleshooting Operator issues - File: troubleshooting-operator-issues - - Name: Investigating pod issues - File: investigating-pod-issues - - Name: Troubleshooting the Source-to-Image process - File: troubleshooting-s2i - - Name: Troubleshooting storage issues - File: troubleshooting-storage-issues - - Name: Troubleshooting Windows container workload issues - File: troubleshooting-windows-container-workload-issues - - Name: Investigating monitoring issues - File: investigating-monitoring-issues - - Name: Diagnosing OpenShift CLI (oc) issues - File: diagnosing-oc-issues ---- -Name: Web console -Dir: web_console -Distros: openshift-enterprise,openshift-origin,openshift-online -Topics: -- Name: Web console overview - File: web-console-overview -- Name: Accessing the web console - File: web-console -- Name: Using the OpenShift Container Platform dashboard to get cluster information - File: using-dashboard-to-get-cluster-information -- Name: Adding user preferences - File: adding-user-preferences - Distros: openshift-enterprise,openshift-origin -- Name: Configuring the web console - File: configuring-web-console - Distros: openshift-enterprise,openshift-origin -- Name: Customizing the web console - File: customizing-the-web-console - Distros: openshift-enterprise,openshift-origin -- Name: Dynamic plugins - Dir: dynamic-plugin - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Overview of dynamic plugins - File: overview-dynamic-plugin - - Name: Getting started with dynamic plugins - File: dynamic-plugins-get-started - - Name: Deploy your plugin on a cluster - File: deploy-plugin-cluster - - Name: Content Security Policy - File: content-security-policy - - Name: Dynamic plugin example - File: dynamic-plugin-example - - Name: Dynamic plugin reference - File: dynamic-plugins-reference -- Name: Web terminal - Dir: web_terminal - Distros: openshift-enterprise,openshift-online - Topics: - - Name: Installing the web terminal - File: installing-web-terminal - - Name: Configuring the web terminal - File: configuring-web-terminal - - Name: Using the web terminal - File: odc-using-web-terminal - - Name: Troubleshooting the web terminal - File: troubleshooting-web-terminal - - Name: Uninstalling the web terminal - File: uninstalling-web-terminal -- Name: Disabling the web console - File: disabling-web-console - Distros: openshift-enterprise,openshift-origin -- Name: Creating quick start tutorials - File: creating-quick-start-tutorials - Distros: openshift-enterprise,openshift-origin -- Name: Optional capabilities and products - File: capabilities_products-web-console - Distros: openshift-enterprise,openshift-origin ---- -Name: CLI tools -Dir: cli_reference -Distros: openshift-enterprise,openshift-origin,openshift-online -Topics: -- Name: CLI tools overview - File: index -- Name: OpenShift CLI (oc) - Dir: openshift_cli - Topics: - - Name: Getting started with the OpenShift CLI - File: getting-started-cli - - Name: Configuring the OpenShift CLI - File: configuring-cli - - Name: Usage of oc and kubectl commands - File: usage-oc-kubectl - - Name: Managing CLI profiles - File: managing-cli-profiles - - Name: Extending the OpenShift CLI with plugins - File: extending-cli-plugins - - Name: OpenShift CLI developer command reference - File: developer-cli-commands - - Name: OpenShift CLI administrator command reference - File: administrator-cli-commands - Distros: openshift-enterprise,openshift-origin -- Name: OpenShift CLI Manager - Dir: cli_manager - Topics: - - Name: OpenShift CLI Manager overview - File: index - - Name: OpenShift CLI Manager release notes - File: cli-manager-release-notes - - Name: Installing the Openshift CLI Manager - File: cli-manager-install - - Name: Using the OpenShift CLI Manager - File: cli-manager-using - - Name: Uninstalling the OpenShift CLI Manager - File: cli-manager-uninstall -- Name: Developer CLI (odo) - File: odo-important-update - # Dir: developer_cli_odo - Distros: openshift-enterprise,openshift-origin,openshift-online - # Topics: - # - Name: odo release notes - # File: odo-release-notes - # - Name: Understanding odo - # File: understanding-odo - # - Name: Installing odo - # File: installing-odo - # - Name: Configuring the odo CLI - # File: configuring-the-odo-cli - # - Name: odo CLI reference - # File: odo-cli-reference -- Name: Knative CLI (kn) for use with OpenShift Serverless - File: kn-cli-tools - Distros: openshift-enterprise,openshift-origin -- Name: Pipelines CLI (tkn) - Dir: tkn_cli - Distros: openshift-enterprise - Topics: - - Name: Installing tkn - File: installing-tkn - - Name: Configuring tkn - File: op-configuring-tkn - - Name: Basic tkn commands - File: op-tkn-reference -- Name: GitOps CLI (argocd) for use with OpenShift GitOps - File: gitops-argocd-cli-tools - Distros: openshift-enterprise -- Name: opm CLI - Dir: opm - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Installing the opm CLI - File: cli-opm-install - - Name: opm CLI reference - File: cli-opm-ref -- Name: Operator SDK - Dir: osdk - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Installing the Operator SDK CLI - File: cli-osdk-install - - Name: Operator SDK CLI reference - File: cli-osdk-ref ---- -Name: Security and compliance -Dir: security -Distros: openshift-enterprise,openshift-origin,openshift-aro -Topics: -- Name: Security and compliance overview - File: index -- Name: Container security - Dir: container_security - Topics: - - Name: Understanding container security - File: security-understanding - - Name: Understanding host and VM security - File: security-hosts-vms - - Name: Hardening Red Hat Enterprise Linux CoreOS - File: security-hardening - Distros: openshift-enterprise,openshift-aro - - Name: Container image signatures - File: security-container-signature - - Name: Hardening Fedora CoreOS - File: security-hardening - Distros: openshift-origin - - Name: Understanding compliance - File: security-compliance - - Name: Securing container content - File: security-container-content - - Name: Using container registries securely - File: security-registries - - Name: Securing the build process - File: security-build - - Name: Deploying containers - File: security-deploy - - Name: Securing the container platform - File: security-platform - - Name: Securing networks - File: security-network - - Name: Securing attached storage - File: security-storage - - Name: Monitoring cluster events and logs - File: security-monitoring -- Name: Configuring certificates - Dir: certificates - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Replacing the default ingress certificate - File: replacing-default-ingress-certificate - - Name: Adding API server certificates - File: api-server - - Name: Securing service traffic using service serving certificates - File: service-serving-certificate - - Name: Updating the CA bundle - File: updating-ca-bundle -- Name: Certificate types and descriptions - Dir: certificate_types_descriptions - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: User-provided certificates for the API server - File: user-provided-certificates-for-api-server - - Name: Proxy certificates - File: proxy-certificates - - Name: Service CA certificates - File: service-ca-certificates - - Name: Node certificates - File: node-certificates - - Name: Bootstrap certificates - File: bootstrap-certificates - - Name: etcd certificates - File: etcd-certificates - - Name: OLM certificates - File: olm-certificates - - Name: Aggregated API client certificates - File: aggregated-api-client-certificates - - Name: Machine Config Operator certificates - File: machine-config-operator-certificates - - Name: User-provided certificates for default ingress - File: user-provided-certificates-for-default-ingress - - Name: Ingress certificates - File: ingress-certificates - - Name: Monitoring and cluster logging Operator component certificates - File: monitoring-and-cluster-logging-operator-component-certificates - - Name: Control plane certificates - File: control-plane-certificates -- Name: Compliance Operator - Dir: compliance_operator - Topics: - - Name: Compliance Operator overview - File: co-overview - - Name: Compliance Operator release notes - File: compliance-operator-release-notes - - Name: Compliance Operator support - File: co-support - - Name: Compliance Operator concepts - Dir: co-concepts - Topics: - - Name: Understanding the Compliance Operator - File: compliance-operator-understanding - - Name: Understanding the Custom Resource Definitions - File: compliance-operator-crd - - Name: Compliance Operator management - Dir: co-management - Topics: - - Name: Installing the Compliance Operator - File: compliance-operator-installation - - Name: Updating the Compliance Operator - File: compliance-operator-updating - - Name: Managing the Compliance Operator - File: compliance-operator-manage - - Name: Uninstalling the Compliance Operator - File: compliance-operator-uninstallation - - Name: Compliance Operator scan management - Dir: co-scans - Topics: - - Name: Supported compliance profiles - File: compliance-operator-supported-profiles - - Name: Compliance Operator scans - File: compliance-scans - - Name: Tailoring the Compliance Operator - File: compliance-operator-tailor - - Name: Retrieving Compliance Operator raw results - File: compliance-operator-raw-results - - Name: Managing Compliance Operator remediation - File: compliance-operator-remediation - - Name: Performing advanced Compliance Operator tasks - File: compliance-operator-advanced - - Name: Troubleshooting Compliance Operator scans - File: compliance-operator-troubleshooting - - Name: Using the oc-compliance plugin - File: oc-compliance-plug-in-using -- Name: File Integrity Operator - Dir: file_integrity_operator - Topics: - - Name: File Integrity Operator Overview - File: fio-overview - - Name: File Integrity Operator release notes - File: file-integrity-operator-release-notes - - Name: File Integrity Operator support - File: fio-support - - Name: Installing the File Integrity Operator - File: file-integrity-operator-installation - - Name: Updating the File Integrity Operator - File: file-integrity-operator-updating - - Name: Understanding the File Integrity Operator - File: file-integrity-operator-understanding - - Name: Configuring the File Integrity Operator - File: file-integrity-operator-configuring - - Name: Performing advanced File Integrity Operator tasks - File: file-integrity-operator-advanced-usage - - Name: Troubleshooting the File Integrity Operator - File: file-integrity-operator-troubleshooting - - Name: Uninstalling the File Integrity Operator - File: fio-uninstalling -- Name: Security Profiles Operator - Dir: security_profiles_operator - Topics: - - Name: Security Profiles Operator overview - File: spo-overview - - Name: Security Profiles Operator release notes - File: spo-release-notes - - Name: Security Profiles Operator support - File: spo-support - - Name: Understanding the Security Profiles Operator - File: spo-understanding - - Name: Enabling the Security Profiles Operator - File: spo-enabling - - Name: Managing seccomp profiles - File: spo-seccomp - - Name: Managing SELinux profiles - File: spo-selinux - - Name: Advanced Security Profiles Operator tasks - File: spo-advanced - - Name: Troubleshooting the Security Profiles Operator - File: spo-troubleshooting - - Name: Uninstalling the Security Profiles Operator - File: spo-uninstalling -- Name: NBDE Tang Server Operator - Dir: nbde_tang_server_operator - Distros: openshift-enterprise - Topics: - - Name: NBDE Tang Server Operator overview - File: nbde-tang-server-operator-overview - - Name: NBDE Tang Server Operator release notes - File: nbde-tang-server-operator-release-notes - - Name: Understanding the NBDE Tang Server Operator - File: nbde-tang-server-operator-understanding - - Name: Installing the NBDE Tang Server Operator - File: nbde-tang-server-operator-installing - - Name: Configuring and managing Tang servers using the NBDE Tang Server Operator - File: nbde-tang-server-operator-configuring-managing - - Name: Identifying URL of a Tang server deployed with the NBDE Tang Server Operator - File: nbde-tang-server-operator-identifying-url -- Name: cert-manager Operator for Red Hat OpenShift - Dir: cert_manager_operator - Distros: openshift-enterprise - Topics: - - Name: cert-manager Operator for Red Hat OpenShift overview - File: index - - Name: cert-manager Operator for Red Hat OpenShift release notes - File: cert-manager-operator-release-notes - - Name: Installing the cert-manager Operator for Red Hat OpenShift - File: cert-manager-operator-install - - Name: Configuring the egress proxy - File: cert-manager-operator-proxy - - Name: Customizing cert-manager by using the cert-manager Operator API fields - File: cert-manager-customizing-api-fields - - Name: Authenticating the cert-manager Operator for Red Hat OpenShift - File: cert-manager-authenticate - - Name: Configuring an ACME issuer - File: cert-manager-operator-issuer-acme - - Name: Configuring certificates with an issuer - File: cert-manager-creating-certificate - - Name: Securing routes with the cert-manager Operator for Red Hat OpenShift - File: cert-manager-securing-routes - - Name: Integrating the cert-manager Operator with Istio-CSR - File: cert-manager-operator-integrating-istio - - Name: Monitoring the cert-manager Operator for Red Hat OpenShift - File: cert-manager-monitoring - - Name: Configuring log levels for cert-manager and the cert-manager Operator for Red Hat OpenShift - File: cert-manager-log-levels - - Name: Uninstalling the cert-manager Operator for Red Hat OpenShift - File: cert-manager-operator-uninstall -- Name: Viewing audit logs - File: audit-log-view -- Name: Configuring the audit log policy - File: audit-log-policy-config -- Name: Configuring TLS security profiles - File: tls-security-profiles -- Name: Configuring seccomp profiles - File: seccomp-profiles -- Name: Allowing JavaScript-based access to the API server from additional hosts - File: allowing-javascript-access-api-server - Distros: openshift-enterprise,openshift-origin -- Name: Encrypting etcd data - File: encrypting-etcd - Distros: openshift-enterprise,openshift-origin -- Name: Scanning pods for vulnerabilities - File: pod-vulnerability-scan - Distros: openshift-enterprise,openshift-origin -- Name: Network-Bound Disk Encryption (NBDE) - Dir: network_bound_disk_encryption - Topics: - - Name: About disk encryption technology - File: nbde-about-disk-encryption-technology - - Name: Tang server installation considerations - File: nbde-tang-server-installation-considerations - - Name: Tang server encryption key management - File: nbde-managing-encryption-keys - - Name: Disaster recovery considerations - File: nbde-disaster-recovery-considerations - Distros: openshift-enterprise,openshift-origin ---- -Name: Authentication and authorization -Dir: authentication -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: Authentication and authorization overview - File: index -- Name: Understanding authentication - File: understanding-authentication - Distros: openshift-enterprise,openshift-origin,openshift-online -- Name: Configuring the internal OAuth server - File: configuring-internal-oauth -- Name: Configuring OAuth clients - File: configuring-oauth-clients -- Name: Managing user-owned OAuth access tokens - File: managing-oauth-access-tokens - Distros: openshift-enterprise,openshift-origin -- Name: Understanding identity provider configuration - File: understanding-identity-provider - Distros: openshift-enterprise,openshift-origin -- Name: Configuring identity providers - Dir: identity_providers - Topics: - - Name: Configuring an htpasswd identity provider - File: configuring-htpasswd-identity-provider - Distros: openshift-enterprise,openshift-origin - - Name: Configuring a Keystone identity provider - File: configuring-keystone-identity-provider - Distros: openshift-enterprise,openshift-origin - - Name: Configuring an LDAP identity provider - File: configuring-ldap-identity-provider - - Name: Configuring a basic authentication identity provider - File: configuring-basic-authentication-identity-provider - Distros: openshift-enterprise,openshift-origin - - Name: Configuring a request header identity provider - File: configuring-request-header-identity-provider - Distros: openshift-enterprise,openshift-origin - - Name: Configuring a GitHub or GitHub Enterprise identity provider - File: configuring-github-identity-provider - - Name: Configuring a GitLab identity provider - File: configuring-gitlab-identity-provider - Distros: openshift-enterprise,openshift-origin - - Name: Configuring a Google identity provider - File: configuring-google-identity-provider - - Name: Configuring an OpenID Connect identity provider - File: configuring-oidc-identity-provider -- Name: Using RBAC to define and apply permissions - File: using-rbac -- Name: Removing the kubeadmin user - File: remove-kubeadmin - Distros: openshift-enterprise,openshift-origin -#- Name: Configuring LDAP failover -# File: configuring-ldap-failover -- Name: Understanding and creating service accounts - File: understanding-and-creating-service-accounts -- Name: Using service accounts in applications - File: using-service-accounts-in-applications -- Name: Using a service account as an OAuth client - File: using-service-accounts-as-oauth-client -- Name: Scoping tokens - File: tokens-scoping -- Name: Using bound service account tokens - File: bound-service-account-tokens -- Name: Managing security context constraints - File: managing-security-context-constraints - Distros: openshift-enterprise,openshift-origin -- Name: Understanding and managing pod security admission - File: understanding-and-managing-pod-security-admission - Distros: openshift-enterprise,openshift-origin -- Name: Impersonating the system:admin user - File: impersonating-system-admin - Distros: openshift-enterprise,openshift-origin -- Name: Syncing LDAP groups - File: ldap-syncing - Distros: openshift-enterprise,openshift-origin -- Name: Managing cloud provider credentials - Dir: managing_cloud_provider_credentials - Topics: - - Name: About the Cloud Credential Operator - File: about-cloud-credential-operator - - Name: Mint mode - File: cco-mode-mint - - Name: Passthrough mode - File: cco-mode-passthrough - - Name: Manual mode with long-term credentials for components - File: cco-mode-manual - - Name: Manual mode with short-term credentials for components - File: cco-short-term-creds ---- -Name: Networking -Dir: networking -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: Understanding networking - File: understanding-networking -- Name: Accessing hosts - File: accessing-hosts -- Name: Networking dashboards - File: networking-dashboards -- Name: Networking Operators - Dir: networking_operators - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Kubernetes NMState Operator - File: k8s-nmstate-about-the-k8s-nmstate-operator - - Name: AWS Load Balancer Operator - Dir: aws_load_balancer_operator - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: AWS Load Balancer Operator release notes - File: aws-load-balancer-operator-release-notes - - Name: AWS Load Balancer Operator in OpenShift Container Platform - File: understanding-aws-load-balancer-operator - - Name: Preparing an AWS STS cluster for the AWS Load Balancer Operator - File: preparing-sts-cluster-for-albo - - Name: Installing the AWS Load Balancer Operator - File: install-aws-load-balancer-operator - - Name: Configuring the AWS Load Balancer Operator - File: configuring-aws-load-balancer-operator - - Name: eBPF manager Operator - Dir: ebpf_manager - Topics: - - Name: About the eBPF Manager Operator - File: ebpf-manager-operator-about - - Name: Installing the eBPF Manager Operator - File: ebpf-manager-operator-install - - Name: Deploying an eBPF program - File: ebpf-manager-operator-deploy - - Name: External DNS Operator - Dir: external_dns_operator - Topics: - - Name: External DNS Operator release notes - File: external-dns-operator-release-notes - - Name: Understanding the External DNS Operator - File: understanding-external-dns-operator - - Name: Installing the External DNS Operator - File: nw-installing-external-dns-operator-on-cloud-providers - - Name: External DNS Operator configuration parameters - File: nw-configuration-parameters - - Name: Creating DNS records on AWS - File: nw-creating-dns-records-on-aws - - Name: Creating DNS records on Azure - File: nw-creating-dns-records-on-azure - - Name: Creating DNS records on GCP - File: nw-creating-dns-records-on-gcp - - Name: Creating DNS records on Infoblox - File: nw-creating-dns-records-on-infoblox - - Name: Configuring the cluster-wide proxy on the External DNS Operator - File: nw-configuring-cluster-wide-egress-proxy - - Name: MetalLB Operator - Dir: metallb-operator - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: About MetalLB and the MetalLB Operator - File: about-metallb - - Name: Installing the MetalLB Operator - File: metallb-operator-install - - Name: Upgrading the MetalLB Operator - File: metallb-upgrading-operator - - Name: Cluster Network Operator in OpenShift Container Platform - File: cluster-network-operator - Distros: openshift-enterprise,openshift-origin - - Name: DNS Operator in OpenShift Container Platform - File: dns-operator - Distros: openshift-enterprise,openshift-origin - - Name: Ingress Operator in OpenShift Container Platform - File: ingress-operator - Distros: openshift-enterprise,openshift-origin - - Name: Ingress Node Firewall Operator in OpenShift Container Platform - File: ingress-node-firewall-operator - Distros: openshift-enterprise,openshift-origin - - Name: SR-IOV Operator - Dir: sr-iov-operator - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Installing the SR-IOV Operator - File: installing-sriov-operator - - Name: Configuring the SR-IOV Operator - File: configuring-sriov-operator - - Name: Uninstalling the SR-IOV Operator - File: uninstalling-sriov-operator -- Name: Network security - Dir: network_security - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Understanding network policy APIs - File: network-policy-apis - - Name: Admin network policy - Dir: AdminNetworkPolicy - Distros: openshift-enterprise, openshift-origin - Topics: - - Name: About AdminNetworkPolicy - File: ovn-k-anp - - Name: About BaselineAdminNetworkPolicy - File: ovn-k-banp - - Name: Metrics for AdminNetworkPolicy - File: ovn-k-anp-banp-metrics - - Name: Northbound Traffic Controls for AdminNetworkPolicy - File: ovn-k-egress-nodes-networks-peer - - Name: Troubleshooting - File: ovn-k-anp-troubleshooting - - Name: Best practices - File: ovn-k-anp-recommended-practices - - Name: Network policy - Dir: network_policy - Distros: openshift-enterprise, openshift-origin - Topics: - - Name: About network policy - File: about-network-policy - - Name: Creating a network policy - File: creating-network-policy - - Name: Viewing a network policy - File: viewing-network-policy - - Name: Editing a network policy - File: editing-network-policy - - Name: Deleting a network policy - File: deleting-network-policy - - Name: Defining a default network policy for projects - File: default-network-policy - - Name: Configuring multitenant isolation with network policy - File: multitenant-network-policy - - Name: Audit logging for network security - File: logging-network-security - - Name: Egress Firewall - Dir: egress_firewall - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Viewing an egress firewall for a project - File: viewing-egress-firewall-ovn - - Name: Editing an egress firewall for a project - File: editing-egress-firewall-ovn - - Name: Removing an egress firewall from a project - File: removing-egress-firewall-ovn - - Name: Configuring an egress firewall for a project - File: configuring-egress-firewall-ovn - - Name: Configuring IPsec encryption - File: configuring-ipsec-ovn - - Name: Zero trust networking - File: zero-trust-networking -- Name: Configuring the Ingress Controller for manual DNS management - File: ingress-controller-dnsmgt - Distros: openshift-enterprise,openshift-origin -- Name: Verifying connectivity to an endpoint - File: verifying-connectivity-endpoint -- Name: Changing the cluster network MTU - File: changing-cluster-network-mtu -- Name: Configuring the node port service range - File: configuring-node-port-service-range -- Name: Configuring the cluster network IP address range - File: configuring-cluster-network-range -- Name: Configuring IP failover - File: configuring-ipfailover -- Name: Configuring system controls and interface attributes using the tuning plugin - File: configure-syscontrols-interface-tuning-cni -- Name: Using Stream Control Transmission Protocol - File: using-sctp - Distros: openshift-enterprise,openshift-origin -- Name: Using Precision Time Protocol hardware - Dir: ptp - Topics: - - Name: About Precision Time Protocol in OpenShift cluster nodes - File: about-ptp - - Name: Configuring PTP devices - File: configuring-ptp - - Name: Developing PTP events consumer applications with the REST API v2 - File: ptp-cloud-events-consumer-dev-reference-v2 - - Name: PTP events REST API v2 reference - File: ptp-events-rest-api-reference-v2 -- Name: CIDR range definitions - File: cidr-range-definitions -- Name: Multiple networks - Dir: multiple_networks - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Understanding multiple networks - File: understanding-multiple-networks - - Name: Primary networks - Dir: primary_networks - Distros: openshift-enterprise, openshift-origin - Topics: - - Name: UserDefinedNetwork CR - File: about-user-defined-networks - - Name: NetworkAttachmentDefinition CR - File: about-primary-nwt-nad - - Name: Secondary networks - Dir: secondary_networks - Distros: openshift-enterprise, openshift-origin - Topics: - - Name: Creating secondary networks on OVN-Kubernetes - File: creating-secondary-nwt-ovnk - - Name: Creating secondary networks with other CNI plugins - File: creating-secondary-nwt-other-cni - - Name: Attaching a pod to an additional network - File: attaching-pod - - Name: Configuring multi-network policies - File: configuring-multi-network-policy - - Name: Removing a pod from an additional network - File: removing-pod - - Name: Editing an additional network - File: editing-additional-network - - Name: Configuring IP address assignment for secondary networks - File: configuring-ip-secondary-nwt - - Name: Configuring the master interface in the container network namespace - File: configuring-master-interface - - Name: Removing an additional network - File: removing-additional-network - - Name: About virtual routing and forwarding - File: about-virtual-routing-and-forwarding - - Name: Assigning a secondary network to a VRF - File: assigning-a-secondary-network-to-a-vrf -- Name: Hardware networks - Dir: hardware_networks - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: About Single Root I/O Virtualization (SR-IOV) hardware networks - File: about-sriov - - Name: Configuring an SR-IOV network device - File: configuring-sriov-device - - Name: Configuring an SR-IOV Ethernet network attachment - File: configuring-sriov-net-attach - - Name: Configuring an SR-IOV InfiniBand network attachment - File: configuring-sriov-ib-attach - - Name: Configuring an RDMA subsystem for SR-IOV - File: configuring-sriov-rdma-cni - - Name: Configuring interface-level network sysctl settings and all-multicast mode for SR-IOV networks - File: configuring-interface-sysctl-sriov-device - - Name: Configuring QinQ support for SR-IOV networks - File: configuring-sriov-qinq-support - - Name: Using high performance multicast - File: using-sriov-multicast - - Name: Using DPDK and RDMA - File: using-dpdk-and-rdma - - Name: Using pod-level bonding for secondary networks - File: using-pod-level-bonding - - Name: Configuring hardware offloading - File: configuring-hardware-offloading - - Name: Switching Bluefield-2 from NIC to DPU mode - File: switching-bf2-nic-dpu -- Name: OVN-Kubernetes network plugin - Dir: ovn_kubernetes_network_provider - Topics: - - Name: About the OVN-Kubernetes network plugin - File: about-ovn-kubernetes - - Name: OVN-Kubernetes architecture - File: ovn-kubernetes-architecture-assembly - - Name: OVN-Kubernetes troubleshooting - File: ovn-kubernetes-troubleshooting-sources - - Name: OVN-Kubernetes traffic tracing - File: ovn-kubernetes-tracing-using-ovntrace - - Name: Converting to IPv4/IPv6 dual stack networking - File: converting-to-dual-stack - - Name: Configuring internal subnets - File: configure-ovn-kubernetes-subnets - - Name: Configuring gateway mode - File: configuring-gateway-mode - - Name: Configure an external gateway on the default network - File: configuring-secondary-external-gateway - - Name: Configuring an egress IP address - File: configuring-egress-ips-ovn - - Name: Assigning an egress IP address - File: assigning-egress-ips-ovn - - Name: Configuring an egress service - File: configuring-egress-traffic-for-vrf-loadbalancer-services - - Name: Considerations for the use of an egress router pod - File: using-an-egress-router-ovn - - Name: Deploying an egress router pod in redirect mode - File: deploying-egress-router-ovn-redirection - - Name: Enabling multicast for a project - File: enabling-multicast - - Name: Disabling multicast for a project - File: disabling-multicast - - Name: Tracking network flows - File: tracking-network-flows - - Name: Configuring hybrid networking - File: configuring-hybrid-networking -- Name: Configuring Routes - Dir: routes - Topics: - - Name: Route configuration - File: route-configuration - - Name: Secured routes - File: secured-routes -- Name: Configuring ingress cluster traffic - Dir: configuring_ingress_cluster_traffic - Topics: - - Name: Overview - File: overview-traffic - Distros: openshift-enterprise,openshift-origin - - Name: Configuring ExternalIPs for services - File: configuring-externalip - Distros: openshift-enterprise,openshift-origin - - Name: Configuring ingress cluster traffic using an Ingress Controller - File: configuring-ingress-cluster-traffic-ingress-controller - - Name: Configuring the Ingress Controller endpoint publishing strategy - File: nw-configuring-ingress-controller-endpoint-publishing-strategy - Distros: openshift-enterprise,openshift-origin - - Name: Configuring ingress cluster traffic using a load balancer - File: configuring-ingress-cluster-traffic-load-balancer - Distros: openshift-enterprise,openshift-origin - - Name: Configuring ingress cluster traffic on AWS - File: configuring-ingress-cluster-traffic-aws - Distros: openshift-enterprise,openshift-origin - - Name: Configuring ingress cluster traffic using a service external IP - File: configuring-ingress-cluster-traffic-service-external-ip - Distros: openshift-enterprise,openshift-origin - - Name: Configuring ingress cluster traffic using a NodePort - File: configuring-ingress-cluster-traffic-nodeport - Distros: openshift-enterprise,openshift-origin - - Name: Configuring ingress cluster traffic using load balancer allowed source ranges - File: configuring-ingress-cluster-traffic-load-balancer-allowed-source-ranges - Distros: openshift-enterprise,openshift-origin - - Name: Patching existing ingress objects - File: configuring-ingress-cluster-patch-fields - Distros: openshift-enterprise,openshift-origin - # Kubernetes NMState (TECHNOLOGY PREVIEW) -- Name: Kubernetes NMState - Dir: k8s_nmstate - Topics: - - Name: Observing and updating node network state and configuration - File: k8s-nmstate-updating-node-network-config - - Name: Troubleshooting node network configuration - File: k8s-nmstate-troubleshooting-node-network -- Name: Configuring the cluster-wide proxy - File: enable-cluster-wide-proxy - Distros: openshift-enterprise,openshift-origin -- Name: Configuring a custom PKI - File: configuring-a-custom-pki - Distros: openshift-enterprise,openshift-origin -- Name: Load balancing on OpenStack - File: load-balancing-openstack -- Name: Load balancing with MetalLB - Dir: metallb - Topics: - - Name: Configuring MetalLB address pools - File: metallb-configure-address-pools - - Name: Advertising the IP address pools - File: about-advertising-ipaddresspool - - Name: Configuring MetalLB BGP peers - File: metallb-configure-bgp-peers - - Name: Advertising an IP address pool using the community alias - File: metallb-configure-community-alias - - Name: Configuring MetalLB BFD profiles - File: metallb-configure-bfd-profiles - - Name: Configuring services to use MetalLB - File: metallb-configure-services - - Name: Managing symmetric routing with MetalLB - File: metallb-configure-return-traffic - - Name: Configuring the integration of MetalLB and FRR-K8s - File: metallb-frr-k8s - - Name: MetalLB logging, troubleshooting, and support - File: metallb-troubleshoot-support -- Name: Associating secondary interfaces metrics to network attachments - File: associating-secondary-interfaces-metrics-to-network-attachments ---- -Name: Storage -Dir: storage -Distros: openshift-enterprise,openshift-origin,openshift-online -Topics: -- Name: Storage overview - File: index - Distros: openshift-enterprise,openshift-origin,openshift-online -- Name: Understanding ephemeral storage - File: understanding-ephemeral-storage - Distros: openshift-enterprise,openshift-origin,openshift-online -- Name: Understanding persistent storage - File: understanding-persistent-storage - Distros: openshift-enterprise,openshift-origin,openshift-online -- Name: Configuring persistent storage - Dir: persistent_storage - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Persistent storage using AWS Elastic Block Store - File: persistent-storage-aws - - Name: Persistent storage using Azure Disk - File: persistent-storage-azure - - Name: Persistent storage using Azure File - File: persistent-storage-azure-file - - Name: Persistent storage using Cinder - File: persistent-storage-cinder - - Name: Persistent storage using Fibre Channel - File: persistent-storage-fibre - - Name: Persistent storage using FlexVolume - File: persistent-storage-flexvolume - - Name: Persistent storage using GCE Persistent Disk - File: persistent-storage-gce - - Name: Persistent Storage using iSCSI - File: persistent-storage-iscsi - - Name: Persistent storage using NFS - File: persistent-storage-nfs - - Name: Persistent storage using Red Hat OpenShift Data Foundation - File: persistent-storage-ocs - - Name: Persistent storage using VMware vSphere - File: persistent-storage-vsphere - - Name: Persistent storage using local storage - Dir: persistent_storage_local - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Local storage overview - File: ways-to-provision-local-storage - - Name: Persistent storage using local volumes - File: persistent-storage-local - - Name: Persistent storage using hostPath - File: persistent-storage-hostpath - - Name: Persistent storage using LVM Storage - File: persistent-storage-using-lvms -- Name: Using Container Storage Interface (CSI) - Dir: container_storage_interface - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Configuring CSI volumes - File: persistent-storage-csi - - Name: CSI inline ephemeral volumes - File: ephemeral-storage-csi-inline - - Name: CSI volume snapshots - File: persistent-storage-csi-snapshots - - Name: CSI volume group snapshots - File: persistent-storage-csi-group-snapshots - - Name: CSI volume cloning - File: persistent-storage-csi-cloning - - Name: Managing the default storage class - File: persistent-storage-csi-sc-manage - - Name: CSI automatic migration - File: persistent-storage-csi-migration - - Name: AWS Elastic Block Store CSI Driver Operator - File: persistent-storage-csi-ebs - - Name: AWS Elastic File Service CSI Driver Operator - File: persistent-storage-csi-aws-efs - - Name: Azure Disk CSI Driver Operator - File: persistent-storage-csi-azure - - Name: Azure File CSI Driver Operator - File: persistent-storage-csi-azure-file - - Name: Azure Stack Hub CSI Driver Operator - File: persistent-storage-csi-azure-stack-hub - - Name: GCP PD CSI Driver Operator - File: persistent-storage-csi-gcp-pd - - Name: GCP Filestore CSI Driver Operator - File: persistent-storage-csi-google-cloud-file - - Name: IBM Cloud Block Storage (VPC) CSI Driver Operator - File: persistent-storage-csi-ibm-cloud-vpc-block - - Name: IBM Power Virtual Server Block Storage CSI Driver Operator - File: persistent-storage-csi-ibm-powervs-block - - Name: OpenStack Cinder CSI Driver Operator - File: persistent-storage-csi-cinder - - Name: OpenStack Manila CSI Driver Operator - File: persistent-storage-csi-manila - - Name: Secrets Store CSI Driver Operator - File: persistent-storage-csi-secrets-store - - Name: CIFS/SMB CSI Driver Operator - File: persistent-storage-csi-smb-cifs - - Name: VMware vSphere CSI Driver Operator - File: persistent-storage-csi-vsphere -- Name: Generic ephemeral volumes - File: generic-ephemeral-vols - Distros: openshift-enterprise,openshift-origin,openshift-online -- Name: Expanding persistent volumes - File: expanding-persistent-volumes - Distros: openshift-enterprise,openshift-origin -- Name: Dynamic provisioning - File: dynamic-provisioning - Distros: openshift-enterprise,openshift-origin -- Name: Detach volumes after non-graceful node shutdown - File: persistent-storage-csi-vol-detach-non-graceful-shutdown - Distros: openshift-enterprise,openshift-origin,openshift-online ---- -Name: Registry -Dir: registry -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: Registry overview - File: index -- Name: Image Registry Operator in OpenShift Container Platform - File: configuring-registry-operator - Distros: openshift-enterprise -- Name: Image Registry Operator in OKD - File: configuring-registry-operator - Distros: openshift-origin -- Name: Setting up and configuring the registry - Dir: configuring_registry_storage - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Configuring the registry for AWS user-provisioned infrastructure - File: configuring-registry-storage-aws-user-infrastructure - - Name: Configuring the registry for GCP user-provisioned infrastructure - File: configuring-registry-storage-gcp-user-infrastructure - - Name: Configuring the registry for OpenStack user-provisioned infrastructure - File: configuring-registry-storage-openstack-user-infrastructure - - Name: Configuring the registry for Azure user-provisioned infrastructure - File: configuring-registry-storage-azure-user-infrastructure - - Name: Configuring the registry for OpenStack - File: configuring-registry-storage-osp - - Name: Configuring the registry for bare metal - File: configuring-registry-storage-baremetal - - Name: Configuring the registry for vSphere - File: configuring-registry-storage-vsphere - - Name: Configuring the registry for OpenShift Data Foundation - File: configuring-registry-storage-rhodf - Distros: openshift-enterprise,openshift-origin - - Name: Configuring the registry for Nutanix - File: configuring-registry-storage-nutanix - Distros: openshift-enterprise,openshift-origin -- Name: Accessing the registry - File: accessing-the-registry -- Name: Exposing the registry - File: securing-exposing-registry - Distros: openshift-enterprise,openshift-origin ---- -Name: Operators -Dir: operators -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: Operators overview - File: index -- Name: Understanding Operators - Dir: understanding - Topics: - - Name: What are Operators? - File: olm-what-operators-are - - Name: Packaging format - File: olm-packaging-format - - Name: Common terms - File: olm-common-terms - - Name: Operator Lifecycle Manager (OLM) - Dir: olm - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Concepts and resources - File: olm-understanding-olm - - Name: Architecture - File: olm-arch - - Name: Workflow - File: olm-workflow - - Name: Dependency resolution - File: olm-understanding-dependency-resolution - - Name: Operator groups - File: olm-understanding-operatorgroups - - Name: Multitenancy and Operator colocation - File: olm-colocation - - Name: Operator conditions - File: olm-operatorconditions - - Name: Metrics - File: olm-understanding-metrics - - Name: Webhooks - File: olm-webhooks - - Name: OperatorHub - Distros: openshift-enterprise,openshift-origin - File: olm-understanding-operatorhub - - Name: Red Hat-provided Operator catalogs - Distros: openshift-enterprise - File: olm-rh-catalogs - - Name: Operators in multitenant clusters - Distros: openshift-enterprise,openshift-origin - File: olm-multitenancy - - Name: CRDs - Dir: crds - Topics: - - Name: Extending the Kubernetes API with CRDs - File: crd-extending-api-with-crds - Distros: openshift-origin,openshift-enterprise - - Name: Managing resources from CRDs - File: crd-managing-resources-from-crds - Distros: openshift-origin,openshift-enterprise -- Name: User tasks - Dir: user - Topics: - - Name: Creating applications from installed Operators - File: olm-creating-apps-from-installed-operators - Distros: openshift-enterprise,openshift-origin - - Name: Installing Operators in your namespace - File: olm-installing-operators-in-namespace - Distros: openshift-enterprise,openshift-origin -- Name: Administrator tasks - Dir: admin - Topics: - - Name: Adding Operators to a cluster - File: olm-adding-operators-to-cluster - Distros: openshift-enterprise,openshift-origin - - Name: Updating installed Operators - File: olm-upgrading-operators - Distros: openshift-enterprise,openshift-origin - - Name: Deleting Operators from a cluster - File: olm-deleting-operators-from-cluster - Distros: openshift-enterprise,openshift-origin - - Name: Configuring OLM features - File: olm-config - Distros: openshift-enterprise,openshift-origin - - Name: Configuring proxy support - File: olm-configuring-proxy-support - Distros: openshift-enterprise,openshift-origin - - Name: Viewing Operator status - File: olm-status - Distros: openshift-enterprise,openshift-origin - - Name: Managing Operator conditions - File: olm-managing-operatorconditions - Distros: openshift-origin,openshift-enterprise - - Name: Allowing non-cluster administrators to install Operators - File: olm-creating-policy - Distros: openshift-origin,openshift-enterprise - - Name: Managing custom catalogs - File: olm-managing-custom-catalogs - Distros: openshift-origin,openshift-enterprise - - Name: Using OLM in disconnected environments - File: olm-restricted-networks - Distros: openshift-origin,openshift-enterprise - - Name: Catalog source pod scheduling - File: olm-cs-podsched - Distros: openshift-origin,openshift-enterprise - - Name: Troubleshooting Operator issues - File: olm-troubleshooting-operator-issues - Distros: openshift-enterprise,openshift-origin -- Name: Developing Operators - Dir: operator_sdk - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: About the Operator SDK - File: osdk-about - - Name: Installing the Operator SDK CLI - File: osdk-installing-cli - - Name: Go-based Operators - Dir: golang - Topics: - - Name: Getting started - File: osdk-golang-quickstart - - Name: Tutorial - File: osdk-golang-tutorial - - Name: Project layout - File: osdk-golang-project-layout - - Name: Updating Go-based projects - File: osdk-golang-updating-projects - - Name: Ansible-based Operators - Dir: ansible - Topics: - - Name: Getting started - File: osdk-ansible-quickstart - - Name: Tutorial - File: osdk-ansible-tutorial - - Name: Project layout - File: osdk-ansible-project-layout - - Name: Updating Ansible-based projects - File: osdk-ansible-updating-projects - - Name: Ansible support - File: osdk-ansible-support - - Name: Kubernetes Collection for Ansible - File: osdk-ansible-k8s-collection - - Name: Using Ansible inside an Operator - File: osdk-ansible-inside-operator - - Name: Custom resource status management - File: osdk-ansible-cr-status - - Name: Helm-based Operators - Dir: helm - Topics: - - Name: Getting started - File: osdk-helm-quickstart - - Name: Tutorial - File: osdk-helm-tutorial - - Name: Project layout - File: osdk-helm-project-layout - - Name: Updating Helm-based projects - File: osdk-helm-updating-projects - - Name: Helm support - File: osdk-helm-support - - Name: Defining cluster service versions (CSVs) - File: osdk-generating-csvs - - Name: Working with bundle images - File: osdk-working-bundle-images - - Name: Complying with pod security admission - File: osdk-complying-with-psa - - Name: Token authentication - Dir: token_auth - Topics: - - Name: Token authentication for Operators on cloud providers - File: osdk-token-auth - - Name: CCO-based workflow for OLM-managed Operators with AWS STS - File: osdk-cco-aws-sts - - Name: CCO-based workflow for OLM-managed Operators with Microsoft Entra Workload ID - File: osdk-cco-azure - - Name: CCO-based workflow for OLM-managed Operators with GCP Workload Identity - File: osdk-cco-gcp - - Name: Validating Operators using the scorecard - File: osdk-scorecard - - Name: Validating Operator bundles - File: osdk-bundle-validate - - Name: High-availability or single-node cluster detection and support - File: osdk-ha-sno - - Name: Configuring built-in monitoring with Prometheus - File: osdk-monitoring-prometheus - - Name: Configuring leader election - File: osdk-leader-election - - Name: Configuring support for multiple platforms - File: osdk-multi-arch-support - - Name: Object pruning utility - File: osdk-pruning-utility - - Name: Migrating package manifest projects to bundle format - File: osdk-pkgman-to-bundle - - Name: Operator SDK CLI reference - File: osdk-cli-ref - - Name: Migrating to Operator SDK v0.1.0 - File: osdk-migrating-to-v0-1-0 - Distros: openshift-origin -- Name: Cluster Operators reference - File: operator-reference -- Name: OLM v1 - Dir: olm_v1 - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: About OLM v1 - File: index ---- -Name: Extensions -Dir: extensions -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: Extensions overview - File: index -- Name: Architecture - Dir: arch - Topics: - - Name: Components overview - File: components - - Name: Operator Controller - File: operator-controller - - Name: Catalogd - File: catalogd -- Name: Common terms - File: of-terms -- Name: Catalogs - Dir: catalogs - Topics: - - Name: File-based catalogs - File: fbc - - Name: Red Hat-provided catalogs - File: rh-catalogs - - Name: Managing catalogs - File: managing-catalogs - - Name: Catalog content resolution - File: catalog-content-resolution - - Name: Creating catalogs - File: creating-catalogs - - Name: Disconnected environment support in OLM v1 - File: disconnected-catalogs -- Name: Cluster extensions - Dir: ce - Topics: - - Name: Managing extensions - File: managing-ce - - Name: User access to extension resources - File: user-access-resources - - Name: Update paths - File: update-paths - - Name: CRD upgrade safety - File: crd-upgrade-safety ---- -Name: CI/CD -Dir: cicd -Distros: openshift-enterprise,openshift-origin,openshift-online -Topics: -- Name: CI/CD overview - Dir: overview - Distros: openshift-enterprise,openshift-origin,openshift-online - Topics: - - Name: About CI/CD - File: index -- Name: Builds using Shipwright - Dir: builds_using_shipwright - Distros: openshift-enterprise - Topics: - - Name: Overview of Builds - File: overview-openshift-builds -- Name: Builds using BuildConfig - Dir: builds - Distros: openshift-enterprise,openshift-origin,openshift-online - Topics: - - Name: Understanding image builds - File: understanding-image-builds - - Name: Understanding build configurations - File: understanding-buildconfigs - - Name: Creating build inputs - File: creating-build-inputs - - Name: Managing build output - File: managing-build-output - - Name: Using build strategies - File: build-strategies - - Name: Custom image builds with Buildah - File: custom-builds-buildah - Distros: openshift-enterprise,openshift-origin - - Name: Performing and configuring basic builds - File: basic-build-operations - Distros: openshift-enterprise,openshift-origin,openshift-online - - Name: Triggering and modifying builds - File: triggering-builds-build-hooks - Distros: openshift-enterprise,openshift-origin,openshift-online - - Name: Performing advanced builds - File: advanced-build-operations - Distros: openshift-enterprise,openshift-origin - - Name: Using Red Hat subscriptions in builds - File: running-entitled-builds - Distros: openshift-enterprise,openshift-origin - - Name: Securing builds by strategy - File: securing-builds-by-strategy - Distros: openshift-enterprise,openshift-origin - - Name: Build configuration resources - File: build-configuration - Distros: openshift-enterprise,openshift-origin - - Name: Troubleshooting builds - File: troubleshooting-builds - Distros: openshift-enterprise,openshift-origin - - Name: Setting up additional trusted certificate authorities for builds - File: setting-up-trusted-ca - Distros: openshift-enterprise,openshift-origin -- Name: Pipelines - Dir: pipelines - Distros: openshift-enterprise - Topics: - - Name: About OpenShift Pipelines - File: about-pipelines -- Name: GitOps - Dir: gitops - Distros: openshift-enterprise - Topics: - - Name: About OpenShift GitOps - File: about-redhat-openshift-gitops -- Name: Jenkins - Dir: jenkins - Distros: openshift-enterprise - Topics: - - Name: Configuring Jenkins images - File: images-other-jenkins - - Name: Jenkins agent - File: images-other-jenkins-agent - - Name: Migrating from Jenkins to OpenShift Pipelines - File: migrating-from-jenkins-to-openshift-pipelines - - Name: Important changes to OpenShift Jenkins images - File: important-changes-to-openshift-jenkins-images ---- -Name: Images -Dir: openshift_images -Distros: openshift-enterprise,openshift-origin,openshift-online -Topics: -- Name: Overview of images - File: index -- Name: Configuring the Cluster Samples Operator - File: configuring-samples-operator - Distros: openshift-enterprise,openshift-origin -- Name: Using the Cluster Samples Operator with an alternate registry - File: samples-operator-alt-registry - Distros: openshift-enterprise,openshift-origin -- Name: Creating images - File: create-images -- Name: Managing images - Dir: managing_images - Topics: - - Name: Managing images overview - File: managing-images-overview - - Name: Tagging images - File: tagging-images - - Name: Image pull policy - File: image-pull-policy - - Name: Using image pull secrets - File: using-image-pull-secrets -- Name: Managing image streams - File: image-streams-manage - Distros: openshift-enterprise,openshift-origin -- Name: Using image streams with Kubernetes resources - File: using-imagestreams-with-kube-resources - Distros: openshift-enterprise,openshift-origin -- Name: Triggering updates on image stream changes - File: triggering-updates-on-imagestream-changes - Distros: openshift-enterprise,openshift-origin -- Name: Image configuration resources - File: image-configuration - Distros: openshift-enterprise,openshift-origin -- Name: Using images - Dir: using_images - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Using images overview - File: using-images-overview - - Name: Source-to-image - File: using-s21-images - - Name: Customizing source-to-image images - File: customizing-s2i-images ---- -Name: Building applications -Dir: applications -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: Building applications overview - File: index -- Name: Projects - Dir: projects - Topics: - - Name: Working with projects - File: working-with-projects - - Name: Creating a project as another user - File: creating-project-other-user - Distros: openshift-enterprise,openshift-origin - - Name: Configuring project creation - File: configuring-project-creation - Distros: openshift-enterprise,openshift-origin -- Name: Creating applications - Dir: creating_applications - Topics: - - Name: Using templates - File: using-templates - - Name: Creating applications using the Developer perspective - File: odc-creating-applications-using-developer-perspective - - Name: Creating applications from installed Operators - File: creating-apps-from-installed-operators - - Name: Creating applications by using the CLI - File: creating-applications-using-cli - - Name: Creating applications using Ruby on Rails - File: templates-using-ruby-on-rails -- Name: Viewing application composition by using the Topology view - File: odc-viewing-application-composition-using-topology-view -- Name: Exporting applications - File: odc-exporting-applications -- Name: Working with Helm charts - Dir: working_with_helm_charts - Topics: - - Name: Understanding Helm - File: understanding-helm - - Name: Installing Helm - File: installing-helm - - Name: Configuring custom Helm chart repositories - File: configuring-custom-helm-chart-repositories - - Name: Working with Helm releases - File: odc-working-with-helm-releases -- Name: Deployments - Dir: deployments - Topics: - - Name: Understanding deployments - File: what-deployments-are - - Name: Managing deployment processes - File: managing-deployment-processes - - Name: Using deployment strategies - File: deployment-strategies - - Name: Using route-based deployment strategies - File: route-based-deployment-strategies -- Name: Quotas - Dir: quotas - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Resource quotas per project - File: quotas-setting-per-project - - Name: Resource quotas across multiple projects - File: quotas-setting-across-multiple-projects - Distros: openshift-enterprise,openshift-origin -- Name: Using config maps with applications - File: config-maps -- Name: Monitoring project and application metrics using the Developer perspective - File: odc-monitoring-project-and-application-metrics-using-developer-perspective -- Name: Monitoring application health - File: application-health -- Name: Editing applications - File: odc-editing-applications -- Name: Working with quotas - File: working-with-quotas - Distros: openshift-online -- Name: Pruning objects to reclaim resources - File: pruning-objects - Distros: openshift-origin,openshift-enterprise -- Name: Idling applications - File: idling-applications - Distros: openshift-origin,openshift-enterprise -- Name: Deleting applications - File: odc-deleting-applications -- Name: Using the Red Hat Marketplace - File: red-hat-marketplace - Distros: openshift-origin,openshift-enterprise ---- -Name: Serverless -Dir: serverless -Distros: openshift-enterprise -Topics: -- Name: About Serverless - Dir: about - Topics: - - Name: Serverless overview - File: about-serverless ---- -Name: Machine configuration -Dir: machine_configuration -Distros: openshift-enterprise, openshift-origin -Topics: -- Name: Machine configuration overview - File: index -- Name: Using machine config objects to configure nodes - File: machine-configs-configure -- Name: Using node disruption policies to minimize disruption from machine config changes - File: machine-config-node-disruption -- Name: Configuring MCO-related custom resources - File: machine-configs-custom -- Name: Updated boot images - File: mco-update-boot-images -- Name: Managing unused rendered machine configs - File: machine-configs-garbage-collection -- Name: Red Hat Enterprise Linux (RHEL) CoreOS image layering - File: mco-coreos-layering - Distros: openshift-enterprise -- Name: Fedora CoreOS image layering - File: mco-coreos-layering - Distros: openshift-origin -- Name: Machine Config Daemon metrics - File: machine-config-daemon-metrics ---- -Name: Machine management -Dir: machine_management -Distros: openshift-origin,openshift-enterprise -Topics: -- Name: Overview of machine management - File: index -- Name: Managing compute machines with the Machine API - Dir: creating_machinesets - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Creating a compute machine set on AWS - File: creating-machineset-aws - - Name: Creating a compute machine set on Azure - File: creating-machineset-azure - - Name: Creating a compute machine set on Azure Stack Hub - File: creating-machineset-azure-stack-hub - - Name: Creating a compute machine set on GCP - File: creating-machineset-gcp - - Name: Creating a compute machine set on IBM Cloud - File: creating-machineset-ibm-cloud - - Name: Creating a compute machine set on IBM Power Virtual Server - File: creating-machineset-ibm-power-vs - - Name: Creating a compute machine set on Nutanix - File: creating-machineset-nutanix - - Name: Creating a compute machine set on OpenStack - File: creating-machineset-osp - - Name: Creating a compute machine set on vSphere - File: creating-machineset-vsphere - - Name: Creating a compute machine set on bare metal - File: creating-machineset-bare-metal -- Name: Manually scaling a compute machine set - File: manually-scaling-machineset -- Name: Modifying a compute machine set - File: modifying-machineset -- Name: Machine phases and lifecycle - File: machine-phases-lifecycle -- Name: Deleting a machine - File: deleting-machine -- Name: Applying autoscaling to a cluster - File: applying-autoscaling -- Name: Creating infrastructure machine sets - File: creating-infrastructure-machinesets -- Name: Adding a RHEL compute machine - File: adding-rhel-compute - Distros: openshift-enterprise -- Name: Adding more RHEL compute machines - File: more-rhel-compute - Distros: openshift-enterprise -- Name: Managing user-provisioned infrastructure manually - Dir: user_infra - Topics: - - Name: Adding compute machines to clusters with user-provisioned infrastructure manually - File: adding-compute-user-infra-general - - Name: Adding compute machines to AWS using CloudFormation templates - File: adding-aws-compute-user-infra - - Name: Adding compute machines to vSphere manually - File: adding-vsphere-compute-user-infra - - Name: Adding compute machines to bare metal - File: adding-bare-metal-compute-user-infra -- Name: Managing control plane machines - Dir: control_plane_machine_management - Topics: - - Name: About control plane machine sets - File: cpmso-about - - Name: Getting started with control plane machine sets - File: cpmso-getting-started - - Name: Managing control plane machines with control plane machine sets - File: cpmso-managing-machines - - Name: Control plane machine set configuration - File: cpmso-configuration - - Name: Configuration options for control plane machines - Dir: cpmso_provider_configurations - Topics: - - Name: Control plane configuration options for Amazon Web Services - File: cpmso-config-options-aws - - Name: Control plane configuration options for Microsoft Azure - File: cpmso-config-options-azure - - Name: Control plane configuration options for Google Cloud Platform - File: cpmso-config-options-gcp - - Name: Control plane configuration options for Nutanix - File: cpmso-config-options-nutanix - - Name: Control plane configuration options for Red Hat OpenStack Platform - File: cpmso-config-options-openstack - - Name: Control plane configuration options for VMware vSphere - File: cpmso-config-options-vsphere - - Name: Control plane resiliency and recovery - File: cpmso-resiliency - - Name: Troubleshooting the control plane machine set - File: cpmso-troubleshooting - - Name: Disabling the control plane machine set - File: cpmso-disabling -- Name: Managing machines with the Cluster API - Dir: cluster_api_machine_management - Topics: - - Name: About the Cluster API - File: cluster-api-about - - Name: Getting started with the Cluster API - File: cluster-api-getting-started - - Name: Managing machines with the Cluster API - File: cluster-api-managing-machines - - Name: Cluster API configuration - File: cluster-api-configuration - - Name: Configuration options for Cluster API machines - Dir: cluster_api_provider_configurations - Topics: - - Name: Cluster API configuration options for Amazon Web Services - File: cluster-api-config-options-aws - - Name: Cluster API configuration options for Google Cloud Platform - File: cluster-api-config-options-gcp - - Name: Cluster API configuration options for Microsoft Azure - File: cluster-api-config-options-azure - - Name: Cluster API configuration options for Red Hat OpenStack Platform - File: cluster-api-config-options-rhosp - - Name: Cluster API configuration options for VMware vSphere - File: cluster-api-config-options-vsphere -# - Name: Cluster API resiliency and recovery -# File: cluster-api-resiliency - - Name: Troubleshooting Cluster API clusters - File: cluster-api-troubleshooting -# - Name: Disabling Cluster API machine sets -# File: cluster-api-disabling -- Name: Deploying machine health checks - File: deploying-machine-health-checks ---- -Name: Hosted control planes -Dir: hosted_control_planes -Distros: openshift-enterprise, openshift-origin -Topics: -- Name: Hosted control planes release notes - File: hosted-control-planes-release-notes -- Name: Hosted control planes overview - File: index -- Name: Preparing to deploy hosted control planes - Dir: hcp-prepare - Topics: - - Name: Requirements for hosted control planes - File: hcp-requirements - - Name: Sizing guidance for hosted control planes - File: hcp-sizing-guidance - - Name: Overriding resouce utilization measurements - File: hcp-override-resource-util - - Name: Installing the hosted control plane command-line interface - File: hcp-cli - - Name: Distributing hosted cluster workloads - File: hcp-distribute-workloads - - Name: Enabling or disabling the hosted control planes feature - File: hcp-enable-disable -- Name: Deploying hosted control planes - Dir: hcp-deploy - Topics: - - Name: Deploying hosted control planes on AWS - File: hcp-deploy-aws - - Name: Deploying hosted control planes on bare metal - File: hcp-deploy-bm - - Name: Deploying hosted control planes on OpenShift Virtualization - File: hcp-deploy-virt - - Name: Deploying hosted control planes on non-bare-metal agent machines - File: hcp-deploy-non-bm - - Name: Deploying hosted control planes on IBM Z - File: hcp-deploy-ibmz - - Name: Deploying hosted control planes on IBM Power - File: hcp-deploy-ibm-power -- Name: Managing hosted control planes - Dir: hcp-manage - Topics: - - Name: Managing hosted control planes on AWS - File: hcp-manage-aws - - Name: Managing hosted control planes on bare metal - File: hcp-manage-bm - - Name: Managing hosted control planes on OpenShift Virtualization - File: hcp-manage-virt - - Name: Managing hosted control planes on non-bare-metal agent machines - File: hcp-manage-non-bm - - Name: Managing hosted control planes on IBM Power - File: hcp-manage-ibm-power -- Name: Deploying hosted control planes in a disconnected environment - Dir: hcp-disconnected - Topics: - - Name: Introduction to hosted control planes in a disconnected environment - File: hcp-deploy-dc - - Name: Deploying hosted control planes on OpenShift Virtualization in a disconnected environment - File: hcp-deploy-dc-virt - - Name: Deploying hosted control planes on bare metal in a disconnected environment - File: hcp-deploy-dc-bm - - Name: Deploying hosted control planes on IBM Z in a disconnected environment - File: disconnected-install-ibmz-hcp - - Name: Monitoring user workload in a disconnected environment - File: hcp-dc-monitor -- Name: Updating hosted control planes - File: hcp-updating -- Name: High availability for hosted control planes - Dir: hcp_high_availability - Topics: - - Name: About high availability for hosted control planes - File: about-hcp-ha - - Name: Recovering a failing etcd cluster - File: hcp-recovering-etcd-cluster - - Name: Backing up and restoring etcd in an on-premise environment - File: hcp-backup-restore-on-premise - - Name: Backing up and restoring etcd on AWS - File: hcp-backup-restore-aws - - Name: Backing up and restoring a hosted cluster on OpenShift Virtualization - File: hcp-backup-restore-virt - - Name: Disaster recovery for a hosted cluster in AWS - File: hcp-disaster-recovery-aws - - Name: Disaster recovery for a hosted cluster by using OADP - File: hcp-disaster-recovery-oadp -- Name: Authentication and authorization for hosted control planes - File: hcp-authentication-authorization -- Name: Handling machine configuration for hosted control planes - File: hcp-machine-config -- Name: Using feature gates in a hosted cluster - File: hcp-using-feature-gates -- Name: Observability for hosted control planes - File: hcp-observability -- Name: Networking for hosted control planes - File: hcp-networking -- Name: Troubleshooting hosted control planes - File: hcp-troubleshooting -- Name: Destroying a hosted cluster - Dir: hcp-destroy - Topics: - - Name: Destroying a hosted cluster on AWS - File: hcp-destroy-aws - - Name: Destroying a hosted cluster on bare metal - File: hcp-destroy-bm - - Name: Destroying a hosted cluster on OpenShift Virtualization - File: hcp-destroy-virt - - Name: Destroying a hosted cluster on IBM Z - File: hcp-destroy-ibmz - - Name: Destroying a hosted cluster on IBM Power - File: hcp-destroy-ibm-power - - Name: Destroying a hosted cluster on non-bare-metal agent machines - File: hcp-destroy-non-bm -- Name: Manually importing a hosted cluster - File: hcp-import ---- -Name: Nodes -Dir: nodes -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: Overview of nodes - File: index -- Name: Working with pods - Dir: pods - Topics: - - Name: About pods - File: nodes-pods-using - - Name: Viewing pods - File: nodes-pods-viewing - - Name: Configuring a cluster for pods - File: nodes-pods-configuring - Distros: openshift-enterprise,openshift-origin - - Name: Automatically scaling pods with the horizontal pod autoscaler - File: nodes-pods-autoscaling - - Name: Automatically adjust pod resource levels with the vertical pod autoscaler - File: nodes-pods-vertical-autoscaler - - Name: Providing sensitive data to pods by using secrets - File: nodes-pods-secrets - - Name: Providing sensitive data to pods by using an external secrets store - File: nodes-pods-secrets-store - - Name: Authenticating pods with short-term credentials - File: nodes-pods-short-term-auth - - Name: Creating and using config maps - File: nodes-pods-configmaps - - Name: Using Device Manager to make devices available to nodes - File: nodes-pods-plugins - Distros: openshift-enterprise,openshift-origin - - Name: Including pod priority in pod scheduling decisions - File: nodes-pods-priority - Distros: openshift-enterprise,openshift-origin - - Name: Placing pods on specific nodes using node selectors - File: nodes-pods-node-selectors - Distros: openshift-enterprise,openshift-origin - - Name: Run Once Duration Override Operator - Dir: run_once_duration_override - Distros: openshift-enterprise - Topics: - - Name: Run Once Duration Override Operator overview - File: index - - Name: Run Once Duration Override Operator release notes - File: run-once-duration-override-release-notes - - Name: Overriding the active deadline for run-once pods - File: run-once-duration-override-install - - Name: Uninstalling the Run Once Duration Override Operator - File: run-once-duration-override-uninstall - - Name: Running pods in Linux user namespaces - File: nodes-pods-user-namespaces -- Name: Automatically scaling pods with the Custom Metrics Autoscaler Operator - Dir: cma - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Release notes - Dir: nodes-cma-rn - Topics: - - Name: Custom Metrics Autoscaler Operator release notes - File: nodes-cma-autoscaling-custom-rn - - Name: Past releases - File: nodes-cma-autoscaling-custom-rn-past - - Name: Custom Metrics Autoscaler Operator overview - File: nodes-cma-autoscaling-custom - - Name: Installing the custom metrics autoscaler - File: nodes-cma-autoscaling-custom-install - - Name: Understanding the custom metrics autoscaler triggers - File: nodes-cma-autoscaling-custom-trigger - - Name: Understanding custom metrics autoscaler trigger authentications - File: nodes-cma-autoscaling-custom-trigger-auth - - Name: Pausing the custom metrics autoscaler - File: nodes-cma-autoscaling-custom-pausing - - Name: Gathering audit logs - File: nodes-cma-autoscaling-custom-audit-log - - Name: Gathering debugging data - File: nodes-cma-autoscaling-custom-debugging - - Name: Viewing Operator metrics - File: nodes-cma-autoscaling-custom-metrics - - Name: Understanding how to add custom metrics autoscalers - File: nodes-cma-autoscaling-custom-adding - - Name: Removing the Custom Metrics Autoscaler Operator - File: nodes-cma-autoscaling-custom-removing -- Name: Controlling pod placement onto nodes (scheduling) - Dir: scheduling - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: About pod placement using the scheduler - File: nodes-scheduler-about - - Name: Scheduling pods using a scheduler profile - File: nodes-scheduler-profiles - - Name: Placing pods relative to other pods using pod affinity and anti-affinity rules - File: nodes-scheduler-pod-affinity - - Name: Controlling pod placement on nodes using node affinity rules - File: nodes-scheduler-node-affinity - - Name: Placing pods onto overcommited nodes - File: nodes-scheduler-overcommit - - Name: Controlling pod placement using node taints - File: nodes-scheduler-taints-tolerations - - Name: Placing pods on specific nodes using node selectors - File: nodes-scheduler-node-selectors - - Name: Controlling pod placement using pod topology spread constraints - File: nodes-scheduler-pod-topology-spread-constraints -# - Name: Placing a pod on a specific node by name -# File: nodes-scheduler-node-names -# - Name: Placing a pod in a specific project -# File: nodes-scheduler-node-projects -# - Name: Keeping your cluster balanced using the descheduler -# File: nodes-scheduler-descheduler - - Name: Descheduler - Dir: descheduler - Distros: openshift-enterprise - Topics: - - Name: Descheduler overview - File: index - - Name: Descheduler release notes - File: nodes-descheduler-release-notes - - Name: Evicting pods using the descheduler - File: nodes-descheduler-configuring - - Name: Uninstalling the descheduler - File: nodes-descheduler-uninstalling - - Name: Secondary scheduler - Dir: secondary_scheduler - Distros: openshift-enterprise - Topics: - - Name: Secondary scheduler overview - File: index - - Name: Secondary Scheduler Operator release notes - File: nodes-secondary-scheduler-release-notes - - Name: Scheduling pods using a secondary scheduler - File: nodes-secondary-scheduler-configuring - - Name: Uninstalling the Secondary Scheduler Operator - File: nodes-secondary-scheduler-uninstalling -- Name: Using jobs and daemon sets - Dir: jobs - Topics: - - Name: Running background tasks on nodes automatically with daemon sets - File: nodes-pods-daemonsets - Distros: openshift-enterprise,openshift-origin - - Name: Running tasks in pods using jobs - File: nodes-nodes-jobs -- Name: Working with nodes - Dir: nodes - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Viewing and listing the nodes in your cluster - File: nodes-nodes-viewing - - Name: Working with nodes - File: nodes-nodes-working - - Name: Managing nodes - File: nodes-nodes-managing - - Name: Adding worker nodes to an on-premise cluster - File: nodes-nodes-adding-node-iso -# Hiding this assembly per @rphillips: "We are trying to enable the feature, but there are cases we are running into where networking does not get enabled at boot." -# - Name: Managing graceful node shutdown -# File: nodes-nodes-graceful-shutdown - - Name: Managing the maximum number of pods per node - File: nodes-nodes-managing-max-pods - - Name: Using the Node Tuning Operator - File: nodes-node-tuning-operator - - Name: Remediating, fencing, and maintaining nodes - File: nodes-remediating-fencing-maintaining-rhwa - - Name: Understanding node rebooting - File: nodes-nodes-rebooting - - Name: Freeing node resources using garbage collection - File: nodes-nodes-garbage-collection - - Name: Allocating resources for nodes - File: nodes-nodes-resources-configuring - - Name: Allocating specific CPUs for nodes in a cluster - File: nodes-nodes-resources-cpus - - Name: Enabling TLS security profiles for the kubelet - File: nodes-nodes-tls - Distros: openshift-enterprise,openshift-origin -# - Name: Monitoring for problems in your nodes -# File: nodes-nodes-problem-detector - - Name: Creating infrastructure nodes - File: nodes-nodes-creating-infrastructure-nodes -- Name: Working with containers - Dir: containers - Topics: - - Name: Understanding containers - File: nodes-containers-using - - Name: Using Init Containers to perform tasks before a pod is deployed - File: nodes-containers-init - Distros: openshift-enterprise,openshift-origin - - Name: Using volumes to persist container data - File: nodes-containers-volumes - - Name: Mapping volumes using projected volumes - File: nodes-containers-projected-volumes - - Name: Allowing containers to consume API objects - File: nodes-containers-downward-api - - Name: Copying files to or from a container - File: nodes-containers-copying-files - - Name: Executing remote commands in a container - File: nodes-containers-remote-commands - - Name: Using port forwarding to access applications in a container - File: nodes-containers-port-forwarding - - Name: Using sysctls in containers - File: nodes-containers-sysctls - - Name: Accessing faster builds with /dev/fuse - File: nodes-containers-dev-fuse -- Name: Working with clusters - Dir: clusters - Topics: - - Name: Viewing system event information in a cluster - File: nodes-containers-events - - Name: Analyzing cluster resource levels - File: nodes-cluster-resource-levels - Distros: openshift-enterprise,openshift-origin - - Name: Setting limit ranges - File: nodes-cluster-limit-ranges - - Name: Configuring cluster memory to meet container memory and risk requirements - File: nodes-cluster-resource-configure - Distros: openshift-enterprise,openshift-origin - - Name: Configuring your cluster to place pods on overcommited nodes - File: nodes-cluster-overcommit - Distros: openshift-enterprise - - Name: Configuring the Linux cgroup version on your nodes - File: nodes-cluster-cgroups-2 - - Name: Enabling features using FeatureGates - File: nodes-cluster-enabling-features - Distros: openshift-enterprise,openshift-origin - - Name: Improving cluster stability in high latency environments using worker latency profiles - File: nodes-cluster-worker-latency-profiles - Distros: openshift-enterprise,openshift-origin -- Name: Remote worker nodes on the network edge - Dir: edge - Distros: openshift-enterprise - Topics: - - Name: Using remote worker node at the network edge - File: nodes-edge-remote-workers -- Name: Worker nodes for single-node OpenShift clusters - Dir: nodes - Distros: openshift-enterprise - Topics: - - Name: Adding worker nodes to single-node OpenShift clusters - File: nodes-sno-worker-nodes -- Name: Node metrics dashboard - File: nodes-dashboard-using -- Name: Manage secure signatures with sigstore - File: nodes-sigstore-using ---- -Name: Windows Container Support for OpenShift -Dir: windows_containers -Distros: openshift-origin,openshift-enterprise -Topics: -- Name: Red Hat OpenShift support for Windows Containers overview - File: index -- Name: Release notes - Dir: wmco_rn - Topics: - - Name: Red Hat OpenShift support for Windows Containers release notes - File: windows-containers-release-notes-10-17-x - - Name: Past releases - File: windows-containers-release-notes-10-17-x-past - - Name: Windows Machine Config Operator prerequisites - File: windows-containers-release-notes-10-17-x-prereqs - - Name: Windows Machine Config Operator known limitations - File: windows-containers-release-notes-10-17-x-limitations -- Name: Getting support - File: windows-containers-support - Distros: openshift-enterprise -- Name: Understanding Windows container workloads - File: understanding-windows-container-workloads -- Name: Enabling Windows container workloads - File: enabling-windows-container-workloads -- Name: Creating Windows machine sets - Dir: creating_windows_machinesets - Topics: - - Name: Creating a Windows machine set on AWS - File: creating-windows-machineset-aws - - Name: Creating a Windows machine set on Azure - File: creating-windows-machineset-azure - - Name: Creating a Windows machine set on GCP - File: creating-windows-machineset-gcp - - Name: Creating a Windows machine set on Nutanix - File: creating-windows-machineset-nutanix - - Name: Creating a Windows machine set on vSphere - File: creating-windows-machineset-vsphere -- Name: Scheduling Windows container workloads - File: scheduling-windows-workloads -- Name: Windows node updates - File: windows-node-upgrades -- Name: Using Bring-Your-Own-Host Windows instances as nodes - File: byoh-windows-instance -- Name: Removing Windows nodes - File: removing-windows-nodes -- Name: Disabling Windows container workloads - File: disabling-windows-container-workloads ---- -Name: OpenShift sandboxed containers -Dir: sandboxed_containers -Distros: openshift-enterprise -Topics: -- Name: Documentation moved - File: sandboxed-containers-moved ---- -Name: Observability -Dir: observability -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: Observability overview - Dir: overview - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: About Observability - File: index -- Name: Cluster Observability Operator - Dir: cluster_observability_operator - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Cluster Observability Operator release notes - File: cluster-observability-operator-release-notes - - Name: Cluster Observability Operator overview - File: cluster-observability-operator-overview - - Name: Installing the Cluster Observability Operator - File: installing-the-cluster-observability-operator - - Name: Configuring the Cluster Observability Operator to monitor a service - File: configuring-the-cluster-observability-operator-to-monitor-a-service - - Name: Observability UI plugins - Dir: ui_plugins - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Observability UI plugins overview - File: observability-ui-plugins-overview - - Name: Monitoring UI plugin - File: monitoring-ui-plugin - - Name: Logging UI plugin - File: logging-ui-plugin - - Name: Distributed tracing UI plugin - File: distributed-tracing-ui-plugin - - Name: Troubleshooting UI plugin - File: troubleshooting-ui-plugin -# - Name: Dashboard UI plugin -# File: dashboard-ui-plugin -- Name: Monitoring - Dir: monitoring - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: About OpenShift Container Platform monitoring - Dir: about-ocp-monitoring - Topics: - - Name: About OpenShift Container Platform monitoring - File: about-ocp-monitoring - - Name: Monitoring stack architecture - File: monitoring-stack-architecture - - Name: Key concepts - File: key-concepts - - Name: Getting started - Dir: getting-started - Topics: - - Name: Maintenance and support for monitoring - File: maintenance-and-support-for-monitoring - - Name: Core platform monitoring first steps - File: core-platform-monitoring-first-steps - - Name: User workload monitoring first steps - File: user-workload-monitoring-first-steps - - Name: Developer and non-administrator steps - File: developer-and-non-administrator-steps - - Name: Configuring core platform monitoring - Dir: configuring-core-platform-monitoring - Topics: - - Name: Preparing to configure the monitoring stack - File: preparing-to-configure-the-monitoring-stack - - Name: Configuring performance and scalability - File: configuring-performance-and-scalability - - Name: Storing and recording data - File: storing-and-recording-data - - Name: Configuring metrics - File: configuring-metrics - - Name: Configuring alerts and notifications - File: configuring-alerts-and-notifications - - Name: Configuring user workload monitoring - Dir: configuring-user-workload-monitoring - Topics: - - Name: Preparing to configure the monitoring stack - File: preparing-to-configure-the-monitoring-stack-uwm - - Name: Configuring performance and scalability - File: configuring-performance-and-scalability-uwm - - Name: Storing and recording data - File: storing-and-recording-data-uwm - - Name: Configuring metrics - File: configuring-metrics-uwm - - Name: Configuring alerts and notifications - File: configuring-alerts-and-notifications-uwm - - Name: Accessing metrics - Dir: accessing-metrics - Topics: - - Name: Accessing metrics as an administrator - File: accessing-metrics-as-an-administrator - - Name: Accessing metrics as a developer - File: accessing-metrics-as-a-developer - - Name: Accessing monitoring APIs by using the CLI - File: accessing-monitoring-apis-by-using-the-cli - - Name: Managing alerts - Dir: managing-alerts - Topics: - - Name: Managing alerts as an administrator - File: managing-alerts-as-an-administrator - - Name: Managing alerts as a developer - File: managing-alerts-as-a-developer - - Name: Troubleshooting monitoring issues - File: troubleshooting-monitoring-issues - - Name: Config map reference for the Cluster Monitoring Operator - File: config-map-reference-for-the-cluster-monitoring-operator - - Name: Monitoring clusters that run on RHOSO - File: shiftstack-prometheus-configuration -- Name: Logging - Dir: logging - Distros: openshift-enterprise,openshift-origin - Topics: -# - Name: Release notes -# Dir: logging_release_notes -# Topics: -# - Name: Logging 5.9 -# File: logging-5-9-release-notes -# - Name: Logging 5.8 -# File: logging-5-8-release-notes -# - Name: Logging 5.7 -# File: logging-5-7-release-notes - - Name: Logging 6.2 - Dir: logging-6.2 - Topics: - - Name: Support - File: log62-cluster-logging-support - - Name: Release notes - File: log6x-release-notes-6.2 - - Name: About logging 6.2 - File: log6x-about-6.2 - - Name: Configuring log forwarding - File: log6x-clf-6.2 - - Name: Configuring the logging collector - File: 6x-cluster-logging-collector-6.2 - - Name: Configuring LokiStack storage - File: log6x-loki-6.2 - - Name: Configuring LokiStack for OTLP - File: log6x-configuring-lokistack-otlp-6.2 - - Name: Visualization for logging - File: log6x-visual-6.2 - - Name: Logging 6.1 - Dir: logging-6.1 - Topics: - - Name: Support - File: log61-cluster-logging-support - - Name: Release notes - File: log6x-release-notes-6.1 - - Name: About logging 6.1 - File: log6x-about-6.1 - - Name: Configuring log forwarding - File: log6x-clf-6.1 - - Name: Configuring the logging collector - File: 6x-cluster-logging-collector-6.1 - - Name: Configuring LokiStack storage - File: log6x-loki-6.1 - - Name: Configuring LokiStack for OTLP - File: log6x-configuring-lokistack-otlp-6.1 - - Name: OpenTelemetry data model - File: log6x-opentelemetry-data-model-6.1 - - Name: Visualization for logging - File: log6x-visual-6.1 -# - Name: Support -# File: cluster-logging-support -# - Name: Troubleshooting logging -# Dir: troubleshooting -# Topics: -# - Name: Viewing Logging status -# File: cluster-logging-cluster-status -# - Name: Troubleshooting log forwarding -# File: log-forwarding-troubleshooting -# - Name: Troubleshooting logging alerts -# File: troubleshooting-logging-alerts -# File: cluster-logging-log-store-status -# - Name: About Logging -# File: cluster-logging -# - Name: Installing Logging -# File: cluster-logging-deploying -# - Name: Updating Logging -# File: cluster-logging-upgrading -# Distros: openshift-enterprise,openshift-origin -# - Name: Visualizing logs -# Topics: -# - Name: About log visualization -# File: log-visualization -# - Name: Log visualization with the web console -# File: log-visualization-ocp-console -# - Name: Viewing cluster dashboards -# File: cluster-logging-dashboards -# - Name: Log visualization with Kibana -# File: logging-kibana -# - Name: Configuring your Logging deployment -# Dir: config -# Distros: openshift-enterprise,openshift-origin -# Topics: -# - Name: Configuring CPU and memory limits for Logging components -# File: cluster-logging-memory -# - Name: Configuring systemd-journald for Logging -# File: cluster-logging-systemd -# - Name: Log collection and forwarding -# Dir: log_collection_forwarding -# Topics: -# - Name: About log collection and forwarding -# File: log-forwarding -# - Name: Log output types -# File: logging-output-types -# - Name: Enabling JSON log forwarding -# File: cluster-logging-enabling-json-logging -# - Name: Configuring log forwarding -# File: configuring-log-forwarding -# - Name: Configuring the logging collector -# File: cluster-logging-collector -# - Name: Collecting and storing Kubernetes events -# File: cluster-logging-eventrouter -# - Name: Log storage -# Dir: log_storage -# Topics: -# - Name: About log storage -# File: about-log-storage -# File: installing-log-storage -# - Name: Configuring the LokiStack log store -# File: cluster-logging-loki -# - Name: Configuring the Elasticsearch log store -# File: logging-config-es-store -# - Name: Logging alerts -# Dir: logging_alerts -# Topics: -# - Name: Default logging alerts -# File: default-logging-alerts -# - Name: Custom logging alerts -# File: custom-logging-alerts -# - Name: Performance and reliability tuning -# Dir: performance_reliability -# Topics: -# - Name: Flow control mechanisms -# File: logging-flow-control-mechanisms -# - Name: Filtering logs by content -# File: logging-content-filtering -# - Name: Filtering logs by metadata -# File: logging-input-spec-filtering -# - Name: Scheduling resources -# Dir: scheduling_resources -# Topics: -# - Name: Using node selectors to move logging resources -# File: logging-node-selectors -# - Name: Using tolerations to control logging pod placement -# File: logging-taints-tolerations -# - Name: Uninstalling Logging -# File: cluster-logging-uninstall -# - Name: Exported fields -# File: cluster-logging-exported-fields -# Distros: openshift-enterprise,openshift-origin -# - Name: API reference -# Dir: api_reference -# Topics: - # - Name: 5.8 Logging API reference - # File: logging-5-8-reference - # - Name: 5.7 Logging API reference - # File: logging-5-7-reference -# - Name: 5.6 Logging API reference -# File: logging-5-6-reference -# - Name: Glossary -# File: logging-common-terms -- Name: Distributed tracing - Dir: distr_tracing - Distros: openshift-enterprise - Topics: - - Name: Release notes - File: distr-tracing-rn - - Name: Distributed tracing architecture - Dir: distr_tracing_arch - Topics: - - Name: Distributed tracing architecture - File: distr-tracing-architecture - - Name: Distributed tracing platform (Tempo) - Dir: distr_tracing_tempo - Topics: - - Name: Installing - File: distr-tracing-tempo-installing - - Name: Configuring - File: distr-tracing-tempo-configuring - - Name: Troubleshooting - File: distr-tracing-tempo-troubleshooting - - Name: Upgrading - File: distr-tracing-tempo-updating - - Name: Removing - File: distr-tracing-tempo-removing - - Name: Distributed tracing platform (Jaeger) - Dir: distr_tracing_jaeger - Topics: - - Name: Installing - File: distr-tracing-jaeger-installing - - Name: Configuring - File: distr-tracing-jaeger-configuring - - Name: Upgrading - File: distr-tracing-jaeger-updating - - Name: Removing - File: distr-tracing-jaeger-removing -- Name: Red Hat build of OpenTelemetry - Dir: otel - Distros: openshift-enterprise - Topics: - - Name: Release notes - File: otel-rn - - Name: Installing - File: otel-installing - - Name: Configuring the Collector - Dir: otel-collector - Topics: - - Name: Collector options - File: otel-collector-configuration-intro - - Name: Receivers - File: otel-collector-receivers - - Name: Processors - File: otel-collector-processors - - Name: Exporters - File: otel-collector-exporters - - Name: Connectors - File: otel-collector-connectors - - Name: Extensions - File: otel-collector-extensions - - Name: Target Allocator - File: otel-collector-target-allocator - - Name: Configuring the instrumentation - File: otel-configuration-of-instrumentation - - Name: Sending traces and metrics to the Collector - File: otel-sending-traces-and-metrics-to-otel-collector - - Name: Configuring metrics for the monitoring stack - File: otel-configuring-metrics-for-monitoring-stack - - Name: Forwarding telemetry data - File: otel-forwarding-telemetry-data - - Name: Configuring the Collector metrics - File: otel-configuring-otelcol-metrics - - Name: Gathering the observability data from multiple clusters - File: otel-config-multicluster - - Name: Troubleshooting - File: otel-troubleshooting - - Name: Migrating - File: otel-migrating - - Name: Upgrading - File: otel-updating - - Name: Removing - File: otel-removing -- Name: Network Observability - Dir: network_observability - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Network Observability release notes - File: network-observability-operator-release-notes - - Name: Network Observability overview - File: network-observability-overview - - Name: Installing the Network Observability Operator - File: installing-operators - - Name: Understanding Network Observability Operator - File: understanding-network-observability-operator - - Name: Configuring the Network Observability Operator - File: configuring-operator - - Name: Network Policy - File: network-observability-network-policy - - Name: Observing the network traffic - File: observing-network-traffic - - Name: Using metrics with dashboards and alerts - File: metrics-alerts-dashboards - - Name: Monitoring the Network Observability Operator - File: network-observability-operator-monitoring - - Name: Scheduling resources - File: network-observability-scheduling-resources - - Name: Secondary networks - File: network-observability-secondary-networks - - Name: Network Observability CLI - Dir: netobserv_cli - Topics: - - Name: Installing the Network Observability CLI - File: netobserv-cli-install - - Name: Using the Network Observability CLI - File: netobserv-cli-using - - Name: Network Observability CLI reference - File: netobserv-cli-reference - - Name: FlowCollector API reference - File: flowcollector-api - - Name: FlowMetric API reference - File: flowmetric-api - - Name: Flows format reference - File: json-flows-format-reference - - Name: Troubleshooting Network Observability - File: troubleshooting-network-observability -- Name: Power Monitoring - Dir: power_monitoring - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Power monitoring release notes - File: power-monitoring-release-notes - - Name: Power monitoring overview - File: power-monitoring-overview - - Name: Installing power monitoring - File: installing-power-monitoring - - Name: Configuring power monitoring - File: configuring-power-monitoring - - Name: Visualizing power monitoring metrics - File: visualizing-power-monitoring-metrics - - Name: Uninstalling power monitoring - File: uninstalling-power-monitoring ---- -Name: Scalability and performance -Dir: scalability_and_performance -Distros: openshift-origin,openshift-enterprise,openshift-webscale,openshift-dpu -Topics: -- Name: Scalability and performance overview - File: index -- Name: Recommended performance and scalability practices - Dir: recommended-performance-scale-practices - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Recommended control plane practices - File: recommended-control-plane-practices - - Name: Recommended infrastructure practices - File: recommended-infrastructure-practices - - Name: Recommended etcd practices - File: recommended-etcd-practices -- Name: Telco core reference design specifications - File: telco-core-rds -- Name: Telco RAN DU reference design specifications - File: telco-ran-du-rds -- Name: Comparing cluster configurations - Dir: cluster-compare - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Understanding the cluster-compare plugin - File: understanding-the-cluster-compare-plugin - - Name: Installing the cluster-compare plugin - File: installing-cluster-compare-plugin - - Name: Using the cluster-compare plugin - File: using-the-cluster-compare-plugin - - Name: Creating a reference configuration - File: creating-a-reference-configuration - - Name: Performing advanced reference configuration customization - File: advanced-ref-config-customization - - Name: Troubleshooting cluster comparisons - File: troubleshooting-cluster-comparisons -- Name: Planning your environment according to object maximums - File: planning-your-environment-according-to-object-maximums - Distros: openshift-origin,openshift-enterprise -- Name: Compute Resource Quotas - File: compute-resource-quotas -- Name: Recommended host practices for IBM Z & IBM LinuxONE environments - File: ibm-z-recommended-host-practices - Distros: openshift-enterprise -- Name: Using the Node Tuning Operator - File: using-node-tuning-operator - Distros: openshift-origin,openshift-enterprise -- Name: Using CPU Manager and Topology Manager - File: using-cpu-manager - Distros: openshift-origin,openshift-enterprise -- Name: Scheduling NUMA-aware workloads - File: cnf-numa-aware-scheduling - Distros: openshift-origin,openshift-enterprise -- Name: Scalability and performance optimization - Dir: optimization - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Optimizing storage - File: optimizing-storage - - Name: Optimizing routing - File: routing-optimization - - Name: Optimizing networking - File: optimizing-networking - - Name: Optimizing CPU usage - File: optimizing-cpu-usage -- Name: Managing bare metal hosts - File: managing-bare-metal-hosts - Distros: openshift-origin,openshift-enterprise -- Name: What huge pages do and how they are consumed by apps - File: what-huge-pages-do-and-how-they-are-consumed-by-apps - Distros: openshift-origin,openshift-enterprise -- Name: Understanding low latency - File: cnf-understanding-low-latency -- Name: Tuning nodes for low latency with the performance profile - File: cnf-tuning-low-latency-nodes-with-perf-profile -- Name: Provisioning real-time and low latency workloads - File: cnf-provisioning-low-latency-workloads -- Name: Debugging low latency tuning - File: cnf-debugging-low-latency-tuning-status -- Name: Performing latency tests for platform verification - File: cnf-performing-platform-verification-latency-tests -- Name: Improving cluster stability in high latency environments using worker latency profiles - File: scaling-worker-latency-profiles - Distros: openshift-origin,openshift-enterprise -- Name: Workload partitioning - File: enabling-workload-partitioning - Distros: openshift-origin,openshift-enterprise -- Name: Using the Node Observability Operator - File: node-observability-operator - Distros: openshift-origin,openshift-enterprise ---- -Name: Edge computing -Dir: edge_computing -Distros: openshift-origin,openshift-enterprise -Topics: -- Name: Challenges of the network far edge - File: ztp-deploying-far-edge-clusters-at-scale -- Name: Preparing the hub cluster for ZTP - File: ztp-preparing-the-hub-cluster -- Name: Updating GitOps ZTP - File: ztp-updating-gitops -- Name: Installing managed clusters with RHACM and SiteConfig resources - File: ztp-deploying-far-edge-sites -- Name: Manually installing a single-node OpenShift cluster with GitOps ZTP - File: ztp-manual-install -- Name: Recommended single-node OpenShift cluster configuration for vDU application workloads - File: ztp-reference-cluster-configuration-for-vdu -- Name: Validating cluster tuning for vDU application workloads - File: ztp-vdu-validating-cluster-tuning -- Name: Advanced managed cluster configuration with SiteConfig resources - File: ztp-advanced-install-ztp -- Name: Managing cluster policies with PolicyGenerator resources - Dir: policygenerator_for_ztp - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Configuring managed cluster policies by using PolicyGenerator resources - File: ztp-configuring-managed-clusters-policygenerator - - Name: Advanced managed cluster configuration with PolicyGenerator resources - File: ztp-advanced-policygenerator-config - - Name: Updating managed clusters in a disconnected environment with PolicyGenerator resources and TALM - File: ztp-talm-updating-managed-policies-pg -- Name: Managing cluster policies with PolicyGenTemplate resources - Dir: policygentemplate_for_ztp - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Configuring managed cluster policies by using PolicyGenTemplate resources - File: ztp-configuring-managed-clusters-policies - - Name: Advanced managed cluster configuration with PolicyGenTemplate resources - File: ztp-advanced-policy-config - - Name: Updating managed clusters in a disconnected environment with PolicyGenTemplate resources and TALM - File: ztp-talm-updating-managed-policies -- Name: Using hub templates in PolicyGenerator or PolicyGenTemplate CRs - File: ztp-using-hub-cluster-templates -- Name: Updating managed clusters with the Topology Aware Lifecycle Manager - File: cnf-talm-for-cluster-upgrades -- Name: Expanding single-node OpenShift clusters with GitOps ZTP - File: ztp-sno-additional-worker-node -- Name: Pre-caching images for single-node OpenShift deployments - File: ztp-precaching-tool -- Name: Image-based upgrade for single-node OpenShift clusters - Dir: image_based_upgrade - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Understanding the image-based upgrade for single-node OpenShift clusters - File: cnf-understanding-image-based-upgrade - - Name: Preparing for an image-based upgrade for single-node OpenShift clusters - Dir: preparing_for_image_based_upgrade - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Configuring a shared container partition for the image-based upgrade - File: cnf-image-based-upgrade-shared-container-partition - - Name: Installing Operators for the image-based upgrade - File: cnf-image-based-upgrade-install-operators - - Name: Generating a seed image for the image-based upgrade with the Lifecycle Agent - File: cnf-image-based-upgrade-generate-seed - - Name: Creating ConfigMap objects for the image-based upgrade with the Lifecycle Agent - File: cnf-image-based-upgrade-prep-resources - - Name: Creating ConfigMap objects for the image-based upgrade with Lifecycle Agent using GitOps ZTP - File: ztp-image-based-upgrade-prep-resources - - Name: Configuring the automatic image cleanup of the container storage disk - File: cnf-image-based-upgrade-auto-image-cleanup - - Name: Performing an image-based upgrade for single-node OpenShift clusters with the Lifecycle Agent - File: cnf-image-based-upgrade-base - - Name: Performing an image-based upgrade for single-node OpenShift clusters using GitOps ZTP - File: ztp-image-based-upgrade -- Name: Image-based installation for single-node OpenShift - Dir: image_base_install - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Understanding image-based installation and deployment for single-node OpenShift - File: ibi-understanding-image-based-install - - Name: Preparing for a single-node OpenShift image-based installation - File: ibi-preparing-for-image-based-install - - Name: Preinstalling single-node OpenShift using an image-based installation - File: ibi-factory-image-based-install - - Name: Deploying single-node OpenShift clusters - Dir: ibi_deploying_sno_clusters - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Deploying managed single-node OpenShift using the IBI Operator - File: ibi-edge-image-based-install - - Name: Deploying single-node OpenShift using the installation program - File: ibi-edge-image-based-install-standalone -- Name: Day 2 operations for telco core CNF clusters - Dir: day_2_core_cnf_clusters - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Upgrading telco core CNF clusters - Dir: updating - Topics: - - Name: Upgrading telco core CNF clusters - File: telco-update-welcome - - Name: OpenShift Container Platform API compatibility - File: telco-update-api - - Name: Preparing for the cluster update - File: telco-update-ocp-update-prep - - Name: Managing live CNF pods during the cluster update - File: telco-update-cnf-update-prep - - Name: Before you update the cluster - File: telco-update-before-the-update - - Name: Completing the Control Plane Only update - File: telco-update-completing-the-control-plane-only-update - - Name: Completing the y-stream update - File: telco-update-completing-the-y-stream-update - - Name: Completing the z-stream update - File: telco-update-completing-the-z-stream-update - - Name: Troubleshooting and maintaining telco core CNF clusters - Dir: troubleshooting - Topics: - - Name: Troubleshooting and maintaining telco core CNF clusters - File: telco-troubleshooting-intro - - Name: General troubleshooting - File: telco-troubleshooting-general-troubleshooting - - Name: Cluster maintenance - File: telco-troubleshooting-cluster-maintenance - - Name: Security - File: telco-troubleshooting-security - - Name: Certificate maintenance - File: telco-troubleshooting-cert-maintenance - - Name: Machine Config Operator - File: telco-troubleshooting-mco - - Name: Bare-metal node maintenance - File: telco-troubleshooting-bmn-maintenance - - Name: Observability - Dir: observability - Topics: - - Name: Observability in OpenShift Container Platform - File: telco-observability - - Name: Security - Dir: security - Topics: - - Name: Security basics - File: telco-security-basics - - Name: Host security - File: telco-security-host-sec - - Name: Security context constraints - File: telco-security-sec-context-constraints ---- -Name: Specialized hardware and driver enablement -Dir: hardware_enablement -Distros: openshift-origin,openshift-enterprise -Topics: -- Name: About specialized hardware and driver enablement - File: about-hardware-enablement -- Name: Driver Toolkit - File: psap-driver-toolkit -- Name: Node Feature Discovery Operator - File: psap-node-feature-discovery-operator -- Name: Kernel Module Management Operator - File: kmm-kernel-module-management -- Name: Kernel Module Management Operator release notes - File: kmm-release-notes ---- -Name: Hardware accelerators -Dir: hardware_accelerators -Distros: openshift-origin,openshift-enterprise -Topics: -- Name: About hardware accelerators - File: about-hardware-accelerators -- Name: NVIDIA GPU architecture - File: nvidia-gpu-architecture -- Name: AMD GPU Operator - File: amd-gpu-operator ---- -Name: Backup and restore -Dir: backup_and_restore -Distros: openshift-origin,openshift-enterprise -Topics: -- Name: Overview of backup and restore operations - File: index -- Name: Shutting down a cluster gracefully - File: graceful-cluster-shutdown -- Name: Restarting a cluster gracefully - File: graceful-cluster-restart -- Name: Hibernating a cluster - File: hibernating-cluster -- Name: OADP Application backup and restore - Dir: application_backup_and_restore - Topics: - - Name: Introduction to OpenShift API for Data Protection - File: oadp-intro - - Name: OADP release notes - Dir: release-notes - Topics: - - Name: OADP 1.4 release notes - File: oadp-1-4-release-notes - - Name: OADP performance - Dir: oadp-performance - Topics: - - Name: OADP recommended network settings - File: oadp-recommended-network-settings - - Name: OADP features and plugins - File: oadp-features-plugins - - Name: OADP use cases - Dir: oadp-use-cases - Topics: - - Name: Backing up an application using OADP and ODF - File: oadp-usecase-backup-using-odf - - Name: Restoring a backup to a different namespace - File: oadp-usecase-restore-different-namespace - - Name: Including a self-signed CA certificate during backup - File: oadp-usecase-enable-ca-cert - - Name: Using the legacy-aws Velero plugin - File: oadp-usecase-legacy-aws-plugin - - Name: Installing and configuring OADP - Dir: installing - Topics: - - Name: About installing OADP - File: about-installing-oadp - - Name: Installing the OADP Operator - File: oadp-installing-operator - - Name: Configuring OADP with AWS S3 compatible storage - File: installing-oadp-aws - - Name: Configuring OADP with IBM Cloud - File: installing-oadp-ibm-cloud - - Name: Configuring OADP with Azure - File: installing-oadp-azure - - Name: Configuring OADP with GCP - File: installing-oadp-gcp - - Name: Configuring OADP with MCG - File: installing-oadp-mcg - - Name: Configuring OADP with ODF - File: installing-oadp-ocs - - Name: Configuring OADP with OpenShift Virtualization - File: installing-oadp-kubevirt - - Name: Configuring OADP with multiple backup storage locations - File: configuring-oadp-multiple-bsl - - Name: Configuring OADP with multiple Volume Snapshot Locations - File: configuring-oadp-multiple-vsl - - Name: Uninstalling OADP - Dir: installing - Topics: - - Name: Uninstalling OADP - File: uninstalling-oadp - - Name: OADP backing up - Dir: backing_up_and_restoring - Topics: - - Name: Backing up applications - File: backing-up-applications - - Name: Creating a Backup CR - File: oadp-creating-backup-cr - - Name: Backing up persistent volumes with CSI snapshots - File: oadp-backing-up-pvs-csi-doc - - Name: Backing up applications with File System Backup - File: oadp-backing-up-applications-restic-doc - - Name: Creating backup hooks - File: oadp-creating-backup-hooks-doc - - Name: Scheduling backups using Schedule CR - File: oadp-scheduling-backups-doc - - Name: Deleting backups - File: oadp-deleting-backups - - Name: About Kopia - File: oadp-about-kopia - - Name: OADP restoring - Dir: backing_up_and_restoring - Topics: - - Name: Restoring applications - File: restoring-applications - - Name: OADP and ROSA - Dir: oadp-rosa - Topics: - - Name: Backing up applications on ROSA STS using OADP - File: oadp-rosa-backing-up-applications - - Name: OADP and AWS STS - Dir: aws-sts - Topics: - - Name: Backing up applications on AWS STS using OADP - File: oadp-aws-sts - - Name: OADP and 3scale - Dir: oadp-3scale - Topics: - - Name: Backing up and restoring 3scale by using OADP - File: backing-up-and-restoring-3scale-by-using-oadp - - Name: OADP Data Mover - Dir: installing - Topics: - - Name: About the OADP Data Mover - File: about-oadp-data-mover - - Name: Backing up and restoring volumes by using CSI snapshots data movement - File: oadp-backup-restore-csi-snapshots - - Name: Overriding Kopia algorithms - File: overriding-kopia-algorithms - - Name: OADP API - File: oadp-api - - Name: Advanced OADP features and functionalities - File: oadp-advanced-topics - - Name: Troubleshooting OADP - File: troubleshooting - - Name: Velero CLI tool - File: velero-cli-tool - - Name: Pods crash or restart due to lack of memory or CPU - File: pods-crash-or-restart-due-to-lack-of-memory-or-cpu - - Name: Issues with Velero and admission webhooks - File: issues-with-velero-and-admission-webhooks - - Name: OADP installation issues - File: oadp-installation-issues - - Name: OADP Operator issues - File: oadp-operator-issues - - Name: OADP timeouts - File: oadp-timeouts - - Name: Backup and Restore CR issues - File: backup-and-restore-cr-issues - - Name: Restic issues - File: restic-issues - - Name: Using the must-gather tool - File: using-the-must-gather-tool - - Name: OADP monitoring - File: oadp-monitoring -- Name: Control plane backup and restore - Dir: control_plane_backup_and_restore - Topics: - - Name: Backing up etcd data - File: backing-up-etcd - - Name: Replacing an unhealthy etcd member - File: replacing-unhealthy-etcd-member - - Name: Disaster recovery - Dir: disaster_recovery - Topics: - - Name: About disaster recovery - File: about-disaster-recovery - - Name: Quorum restoration - File: quorum-restoration - - Name: Restoring to a previous cluster state - File: scenario-2-restoring-cluster-state - - Name: Recovering from expired control plane certificates - File: scenario-3-expired-certs ---- -Name: Migrating from version 3 to 4 -Dir: migrating_from_ocp_3_to_4 -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: Migrating from version 3 to 4 overview - File: index -- Name: About migrating from OpenShift Container Platform 3 to 4 - File: about-migrating-from-3-to-4 - Distros: openshift-enterprise -- Name: About migrating from OKD 3 to 4 - File: about-migrating-from-3-to-4 - Distros: openshift-origin -- Name: Differences between OpenShift Container Platform 3 and 4 - File: planning-migration-3-4 - Distros: openshift-enterprise -- Name: Differences between OKD 3 and 4 - File: planning-migration-3-4 - Distros: openshift-origin -- Name: Network considerations - File: planning-considerations-3-4 -- Name: About MTC - File: about-mtc-3-4 -- Name: Installing MTC - File: installing-3-4 -- Name: Installing MTC in a disconnected environment - File: installing-restricted-3-4 -- Name: Upgrading MTC - File: upgrading-3-4 -- Name: Premigration checklists - File: premigration-checklists-3-4 -- Name: Migrating your applications - File: migrating-applications-3-4 -- Name: Advanced migration options - File: advanced-migration-options-3-4 -- Name: Troubleshooting - File: troubleshooting-3-4 ---- -Name: Migration Toolkit for Containers -Dir: migration_toolkit_for_containers -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: About MTC - File: about-mtc -- Name: MTC release notes - Dir: release_notes - Topics: - - Name: MTC release notes 1.8 - File: mtc-release-notes - - Name: MTC release notes 1.7 - File: mtc-release-notes-1-7 - - Name: MTC release notes 1.6 - File: mtc-release-notes-1-6 - - Name: MTC release notes 1.5 - File: mtc-release-notes-1-5 -- Name: Installing MTC - File: installing-mtc -- Name: Installing MTC in a disconnected environment - File: installing-mtc-restricted -- Name: Upgrading MTC - File: upgrading-mtc -- Name: Premigration checklists - File: premigration-checklists-mtc -- Name: Network considerations - File: network-considerations-mtc -- Name: Direct Migration Requirements - File: mtc-direct-migration-requirements -- Name: Migrating your applications - File: migrating-applications-with-mtc -- Name: Advanced migration options - File: advanced-migration-options-mtc -- Name: Troubleshooting - File: troubleshooting-mtc ---- -Name: API reference -Dir: rest_api -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: API overview - Dir: overview - Topics: - - Name: Understanding API tiers - File: understanding-api-support-tiers - - Name: API compatibility guidelines - File: understanding-compatibility-guidelines - - Name: Editing kubelet log level verbosity and gathering logs - File: editing-kubelet-log-level-verbosity - - Name: API list - File: index -- Name: Common object reference - Dir: objects - Topics: - - Name: Index - File: index -- Name: Authorization APIs - Dir: authorization_apis - Topics: - - Name: About Authorization APIs - File: authorization-apis-index - - Name: 'LocalResourceAccessReview [authorization.openshift.io/v1]' - File: localresourceaccessreview-authorization-openshift-io-v1 - - Name: 'LocalSubjectAccessReview [authorization.openshift.io/v1]' - File: localsubjectaccessreview-authorization-openshift-io-v1 - - Name: 'ResourceAccessReview [authorization.openshift.io/v1]' - File: resourceaccessreview-authorization-openshift-io-v1 - - Name: 'SelfSubjectRulesReview [authorization.openshift.io/v1]' - File: selfsubjectrulesreview-authorization-openshift-io-v1 - - Name: 'SubjectAccessReview [authorization.openshift.io/v1]' - File: subjectaccessreview-authorization-openshift-io-v1 - - Name: 'SubjectRulesReview [authorization.openshift.io/v1]' - File: subjectrulesreview-authorization-openshift-io-v1 - - Name: 'SelfSubjectReview [authentication.k8s.io/v1]' - File: selfsubjectreview-authentication-k8s-io-v1 - - Name: 'TokenRequest [authentication.k8s.io/v1]' - File: tokenrequest-authentication-k8s-io-v1 - - Name: 'TokenReview [authentication.k8s.io/v1]' - File: tokenreview-authentication-k8s-io-v1 - - Name: 'LocalSubjectAccessReview [authorization.k8s.io/v1]' - File: localsubjectaccessreview-authorization-k8s-io-v1 - - Name: 'SelfSubjectAccessReview [authorization.k8s.io/v1]' - File: selfsubjectaccessreview-authorization-k8s-io-v1 - - Name: 'SelfSubjectRulesReview [authorization.k8s.io/v1]' - File: selfsubjectrulesreview-authorization-k8s-io-v1 - - Name: 'SubjectAccessReview [authorization.k8s.io/v1]' - File: subjectaccessreview-authorization-k8s-io-v1 -- Name: Autoscale APIs - Dir: autoscale_apis - Topics: - - Name: About Autoscale APIs - File: autoscale-apis-index - - Name: 'ClusterAutoscaler [autoscaling.openshift.io/v1]' - File: clusterautoscaler-autoscaling-openshift-io-v1 - - Name: 'MachineAutoscaler [autoscaling.openshift.io/v1beta1]' - File: machineautoscaler-autoscaling-openshift-io-v1beta1 - - Name: 'HorizontalPodAutoscaler [autoscaling/v2]' - File: horizontalpodautoscaler-autoscaling-v2 - - Name: 'Scale [autoscaling/v1]' - File: scale-autoscaling-v1 -- Name: Cluster APIs - Dir: cluster_apis - Topics: - - Name: About Cluster APIs - File: cluster-apis-index - - Name: 'IPAddress [ipam.cluster.x-k8s.io/v1beta1]' - File: ipaddress-ipam-cluster-x-k8s-io-v1beta1 - - Name: 'IPAddressClaim [ipam.cluster.x-k8s.io/v1beta1]' - File: ipaddressclaim-ipam-cluster-x-k8s-io-v1beta1 -- Name: Config APIs - Dir: config_apis - Topics: - - Name: About Config APIs - File: config-apis-index - - Name: 'APIServer [config.openshift.io/v1]' - File: apiserver-config-openshift-io-v1 - - Name: 'Authentication [config.openshift.io/v1]' - File: authentication-config-openshift-io-v1 - - Name: 'Build [config.openshift.io/v1]' - File: build-config-openshift-io-v1 - - Name: 'ClusterOperator [config.openshift.io/v1]' - File: clusteroperator-config-openshift-io-v1 - - Name: 'ClusterVersion [config.openshift.io/v1]' - File: clusterversion-config-openshift-io-v1 - - Name: 'Console [config.openshift.io/v1]' - File: console-config-openshift-io-v1 - - Name: 'DNS [config.openshift.io/v1]' - File: dns-config-openshift-io-v1 - - Name: 'FeatureGate [config.openshift.io/v1]' - File: featuregate-config-openshift-io-v1 - - Name: 'HelmChartRepository [helm.openshift.io/v1beta1]' - File: helmchartrepository-helm-openshift-io-v1beta1 - - Name: 'Image [config.openshift.io/v1]' - File: image-config-openshift-io-v1 - - Name: 'ImageDigestMirrorSet [config.openshift.io/v1]' - File: imagedigestmirrorset-config-openshift-io-v1 - - Name: 'ImageContentPolicy [config.openshift.io/v1]' - File: imagecontentpolicy-config-openshift-io-v1 - - Name: 'ImageTagMirrorSet [config.openshift.io/v1]' - File: imagetagmirrorset-config-openshift-io-v1 - - Name: 'Infrastructure [config.openshift.io/v1]' - File: infrastructure-config-openshift-io-v1 - - Name: 'Ingress [config.openshift.io/v1]' - File: ingress-config-openshift-io-v1 - - Name: 'Network [config.openshift.io/v1]' - File: network-config-openshift-io-v1 - - Name: 'Node [config.openshift.io/v1]' - File: node-config-openshift-io-v1 - - Name: 'OAuth [config.openshift.io/v1]' - File: oauth-config-openshift-io-v1 - - Name: 'OperatorHub [config.openshift.io/v1]' - File: operatorhub-config-openshift-io-v1 - - Name: 'Project [config.openshift.io/v1]' - File: project-config-openshift-io-v1 - - Name: 'ProjectHelmChartRepository [helm.openshift.io/v1beta1]' - File: projecthelmchartrepository-helm-openshift-io-v1beta1 - - Name: 'Proxy [config.openshift.io/v1]' - File: proxy-config-openshift-io-v1 - - Name: 'Scheduler [config.openshift.io/v1]' - File: scheduler-config-openshift-io-v1 -- Name: Console APIs - Dir: console_apis - Topics: - - Name: About Console APIs - File: console-apis-index - - Name: 'ConsoleCLIDownload [console.openshift.io/v1]' - File: consoleclidownload-console-openshift-io-v1 - - Name: 'ConsoleExternalLogLink [console.openshift.io/v1]' - File: consoleexternalloglink-console-openshift-io-v1 - - Name: 'ConsoleLink [console.openshift.io/v1]' - File: consolelink-console-openshift-io-v1 - - Name: 'ConsoleNotification [console.openshift.io/v1]' - File: consolenotification-console-openshift-io-v1 - - Name: 'ConsolePlugin [console.openshift.io/v1]' - File: consoleplugin-console-openshift-io-v1 - - Name: 'ConsoleQuickStart [console.openshift.io/v1]' - File: consolequickstart-console-openshift-io-v1 - - Name: 'ConsoleSample [console.openshift.io/v1]' - File: consolesample-console-openshift-io-v1 - - Name: 'ConsoleYAMLSample [console.openshift.io/v1]' - File: consoleyamlsample-console-openshift-io-v1 -- Name: Extension APIs - Dir: extension_apis - Topics: - - Name: About Extension APIs - File: extension-apis-index - - Name: 'APIService [apiregistration.k8s.io/v1]' - File: apiservice-apiregistration-k8s-io-v1 - - Name: 'CustomResourceDefinition [apiextensions.k8s.io/v1]' - File: customresourcedefinition-apiextensions-k8s-io-v1 - - Name: 'MutatingWebhookConfiguration [admissionregistration.k8s.io/v1]' - File: mutatingwebhookconfiguration-admissionregistration-k8s-io-v1 - - Name: 'ValidatingAdmissionPolicy [admissionregistration.k8s.io/v1]' - File: validatingadmissionpolicy-admissionregistration-k8s-io-v1 - - Name: 'ValidatingAdmissionPolicyBinding [admissionregistration.k8s.io/v1]' - File: validatingadmissionpolicybinding-admissionregistration-k8s-io-v1 - - Name: 'ValidatingWebhookConfiguration [admissionregistration.k8s.io/v1]' - File: validatingwebhookconfiguration-admissionregistration-k8s-io-v1 -- Name: Image APIs - Dir: image_apis - Topics: - - Name: About Image APIs - File: image-apis-index - - Name: 'Image [image.openshift.io/v1]' - File: image-image-openshift-io-v1 - - Name: 'ImageSignature [image.openshift.io/v1]' - File: imagesignature-image-openshift-io-v1 - - Name: 'ImageStreamImage [image.openshift.io/v1]' - File: imagestreamimage-image-openshift-io-v1 - - Name: 'ImageStreamImport [image.openshift.io/v1]' - File: imagestreamimport-image-openshift-io-v1 - - Name: 'ImageStreamLayers [image.openshift.io/v1]' - File: imagestreamlayers-image-openshift-io-v1 - - Name: 'ImageStreamMapping [image.openshift.io/v1]' - File: imagestreammapping-image-openshift-io-v1 - - Name: 'ImageStream [image.openshift.io/v1]' - File: imagestream-image-openshift-io-v1 - - Name: 'ImageStreamTag [image.openshift.io/v1]' - File: imagestreamtag-image-openshift-io-v1 - - Name: 'ImageTag [image.openshift.io/v1]' - File: imagetag-image-openshift-io-v1 - - Name: 'SecretList [image.openshift.io/v1]' - File: secretlist-image-openshift-io-v1 -- Name: Machine APIs - Dir: machine_apis - Topics: - - Name: About Machine APIs - File: machine-apis-index - - Name: 'ContainerRuntimeConfig [machineconfiguration.openshift.io/v1]' - File: containerruntimeconfig-machineconfiguration-openshift-io-v1 - - Name: 'ControllerConfig [machineconfiguration.openshift.io/v1]' - File: controllerconfig-machineconfiguration-openshift-io-v1 - - Name: 'ControlPlaneMachineSet [machine.openshift.io/v1]' - File: controlplanemachineset-machine-openshift-io-v1 - - Name: 'KubeletConfig [machineconfiguration.openshift.io/v1]' - File: kubeletconfig-machineconfiguration-openshift-io-v1 - - Name: 'MachineConfig [machineconfiguration.openshift.io/v1]' - File: machineconfig-machineconfiguration-openshift-io-v1 - - Name: 'MachineConfigPool [machineconfiguration.openshift.io/v1]' - File: machineconfigpool-machineconfiguration-openshift-io-v1 - - Name: 'MachineHealthCheck [machine.openshift.io/v1beta1]' - File: machinehealthcheck-machine-openshift-io-v1beta1 - - Name: 'Machine [machine.openshift.io/v1beta1]' - File: machine-machine-openshift-io-v1beta1 - - Name: 'MachineSet [machine.openshift.io/v1beta1]' - File: machineset-machine-openshift-io-v1beta1 -- Name: Metadata APIs - Dir: metadata_apis - Topics: - - Name: About Metadata APIs - File: metadata-apis-index - - Name: 'APIRequestCount [apiserver.openshift.io/v1]' - File: apirequestcount-apiserver-openshift-io-v1 - - Name: 'Binding [undefined/v1]' - File: binding-v1 - - Name: 'ComponentStatus [undefined/v1]' - File: componentstatus-v1 - - Name: 'ConfigMap [undefined/v1]' - File: configmap-v1 - - Name: 'ControllerRevision [apps/v1]' - File: controllerrevision-apps-v1 - - Name: 'Event [events.k8s.io/v1]' - File: event-events-k8s-io-v1 - - Name: 'Event [undefined/v1]' - File: event-v1 - - Name: 'Lease [coordination.k8s.io/v1]' - File: lease-coordination-k8s-io-v1 - - Name: 'Namespace [undefined/v1]' - File: namespace-v1 -- Name: Monitoring APIs - Dir: monitoring_apis - Topics: - - Name: About Monitoring APIs - File: monitoring-apis-index - - Name: 'Alertmanager [monitoring.coreos.com/v1]' - File: alertmanager-monitoring-coreos-com-v1 - - Name: 'AlertmanagerConfig [monitoring.coreos.com/v1beta1]' - File: alertmanagerconfig-monitoring-coreos-com-v1beta1 - - Name: 'AlertRelabelConfig [monitoring.openshift.io/v1]' - File: alertrelabelconfig-monitoring-openshift-io-v1 - - Name: 'AlertingRule [monitoring.openshift.io/v1]' - File: alertingrule-monitoring-openshift-io-v1 - - Name: 'PodMonitor [monitoring.coreos.com/v1]' - File: podmonitor-monitoring-coreos-com-v1 - - Name: 'Probe [monitoring.coreos.com/v1]' - File: probe-monitoring-coreos-com-v1 - - Name: 'Prometheus [monitoring.coreos.com/v1]' - File: prometheus-monitoring-coreos-com-v1 - - Name: 'PrometheusRule [monitoring.coreos.com/v1]' - File: prometheusrule-monitoring-coreos-com-v1 - - Name: 'ServiceMonitor [monitoring.coreos.com/v1]' - File: servicemonitor-monitoring-coreos-com-v1 - - Name: 'ThanosRuler [monitoring.coreos.com/v1]' - File: thanosruler-monitoring-coreos-com-v1 - - Name: 'NodeMetrics [metrics.k8s.io/v1beta1]' - File: nodemetrics-metrics-k8s-io-v1beta1 - - Name: 'PodMetrics [metrics.k8s.io/v1beta1]' - File: podmetrics-metrics-k8s-io-v1beta1 -- Name: Network APIs - Dir: network_apis - Topics: - - Name: About Network APIs - File: network-apis-index - - Name: 'AdminNetworkPolicy [policy.networking.k8s.io/v1alpha1]' - File: adminnetworkpolicy-policy-networking-k8s-io-v1alpha1 - - Name: 'AdminPolicyBasedExternalRoute [k8s.ovn.org/v1]' - File: adminpolicybasedexternalroute-k8s-ovn-org-v1 - - Name: 'BaselineAdminNetworkPolicy [policy.networking.k8s.io/v1alpha1]' - File: baselineadminnetworkpolicy-policy-networking-k8s-io-v1alpha1 - - Name: 'CloudPrivateIPConfig [cloud.network.openshift.io/v1]' - File: cloudprivateipconfig-cloud-network-openshift-io-v1 - - Name: 'EgressFirewall [k8s.ovn.org/v1]' - File: egressfirewall-k8s-ovn-org-v1 - - Name: 'EgressIP [k8s.ovn.org/v1]' - File: egressip-k8s-ovn-org-v1 - - Name: 'EgressQoS [k8s.ovn.org/v1]' - File: egressqos-k8s-ovn-org-v1 - - Name: 'EgressService [k8s.ovn.org/v1]' - File: egressservice-k8s-ovn-org-v1 - - Name: 'Endpoints [undefined/v1]' - File: endpoints-v1 - - Name: 'EndpointSlice [discovery.k8s.io/v1]' - File: endpointslice-discovery-k8s-io-v1 - - Name: 'EgressRouter [network.operator.openshift.io/v1]' - File: egressrouter-network-operator-openshift-io-v1 - - Name: 'Ingress [networking.k8s.io/v1]' - File: ingress-networking-k8s-io-v1 - - Name: 'IngressClass [networking.k8s.io/v1]' - File: ingressclass-networking-k8s-io-v1 - - Name: 'IPPool [whereabouts.cni.cncf.io/v1alpha1]' - File: ippool-whereabouts-cni-cncf-io-v1alpha1 - - Name: 'MultiNetworkPolicy [k8s.cni.cncf.io/v1beta1]' - File: multinetworkpolicy-k8s-cni-cncf-io-v1beta1 - - Name: 'NetworkAttachmentDefinition [k8s.cni.cncf.io/v1]' - File: networkattachmentdefinition-k8s-cni-cncf-io-v1 - - Name: 'NetworkPolicy [networking.k8s.io/v1]' - File: networkpolicy-networking-k8s-io-v1 - - Name: 'OverlappingRangeIPReservation [whereabouts.cni.cncf.io/v1alpha1]' - File: overlappingrangeipreservation-whereabouts-cni-cncf-io-v1alpha1 - - Name: 'PodNetworkConnectivityCheck [controlplane.operator.openshift.io/v1alpha1]' - File: podnetworkconnectivitycheck-controlplane-operator-openshift-io-v1alpha1 - - Name: 'Route [route.openshift.io/v1]' - File: route-route-openshift-io-v1 - - Name: 'Service [undefined/v1]' - File: service-v1 -- Name: Node APIs - Dir: node_apis - Topics: - - Name: About Node APIs - File: node-apis-index - - Name: 'Node [undefined/v1]' - File: node-v1 - - Name: 'PerformanceProfile [performance.openshift.io/v2]' - File: performanceprofile-performance-openshift-io-v2 - - Name: 'Profile [tuned.openshift.io/v1]' - File: profile-tuned-openshift-io-v1 - - Name: 'RuntimeClass [node.k8s.io/v1]' - File: runtimeclass-node-k8s-io-v1 - - Name: 'Tuned [tuned.openshift.io/v1]' - File: tuned-tuned-openshift-io-v1 -- Name: OAuth APIs - Dir: oauth_apis - Topics: - - Name: About OAuth APIs - File: oauth-apis-index - - Name: 'OAuthAccessToken [oauth.openshift.io/v1]' - File: oauthaccesstoken-oauth-openshift-io-v1 - - Name: 'OAuthAuthorizeToken [oauth.openshift.io/v1]' - File: oauthauthorizetoken-oauth-openshift-io-v1 - - Name: 'OAuthClientAuthorization [oauth.openshift.io/v1]' - File: oauthclientauthorization-oauth-openshift-io-v1 - - Name: 'OAuthClient [oauth.openshift.io/v1]' - File: oauthclient-oauth-openshift-io-v1 - - Name: 'UserOAuthAccessToken [oauth.openshift.io/v1]' - File: useroauthaccesstoken-oauth-openshift-io-v1 -- Name: Operator APIs - Dir: operator_apis - Topics: - - Name: About Operator APIs - File: operator-apis-index - - Name: 'Authentication [operator.openshift.io/v1]' - File: authentication-operator-openshift-io-v1 - - Name: 'CloudCredential [operator.openshift.io/v1]' - File: cloudcredential-operator-openshift-io-v1 - - Name: 'ClusterCSIDriver [operator.openshift.io/v1]' - File: clustercsidriver-operator-openshift-io-v1 - - Name: 'Console [operator.openshift.io/v1]' - File: console-operator-openshift-io-v1 - - Name: 'Config [operator.openshift.io/v1]' - File: config-operator-openshift-io-v1 - - Name: 'Config [imageregistry.operator.openshift.io/v1]' - File: config-imageregistry-operator-openshift-io-v1 - - Name: 'Config [samples.operator.openshift.io/v1]' - File: config-samples-operator-openshift-io-v1 - - Name: 'CSISnapshotController [operator.openshift.io/v1]' - File: csisnapshotcontroller-operator-openshift-io-v1 - - Name: 'DNS [operator.openshift.io/v1]' - File: dns-operator-openshift-io-v1 - - Name: 'DNSRecord [ingress.operator.openshift.io/v1]' - File: dnsrecord-ingress-operator-openshift-io-v1 - - Name: 'Etcd [operator.openshift.io/v1]' - File: etcd-operator-openshift-io-v1 - - Name: 'ImageContentSourcePolicy [operator.openshift.io/v1alpha1]' - File: imagecontentsourcepolicy-operator-openshift-io-v1alpha1 - - Name: 'ImagePruner [imageregistry.operator.openshift.io/v1]' - File: imagepruner-imageregistry-operator-openshift-io-v1 - - Name: 'IngressController [operator.openshift.io/v1]' - File: ingresscontroller-operator-openshift-io-v1 - - Name: 'InsightsOperator [operator.openshift.io/v1]' - File: insightsoperator-operator-openshift-io-v1 - - Name: 'KubeAPIServer [operator.openshift.io/v1]' - File: kubeapiserver-operator-openshift-io-v1 - - Name: 'KubeControllerManager [operator.openshift.io/v1]' - File: kubecontrollermanager-operator-openshift-io-v1 - - Name: 'KubeScheduler [operator.openshift.io/v1]' - File: kubescheduler-operator-openshift-io-v1 - - Name: 'KubeStorageVersionMigrator [operator.openshift.io/v1]' - File: kubestorageversionmigrator-operator-openshift-io-v1 - - Name: 'MachineConfiguration [operator.openshift.io/v1]' - File: machineconfiguration-operator-openshift-io-v1 - - Name: 'Network [operator.openshift.io/v1]' - File: network-operator-openshift-io-v1 - - Name: 'OpenShiftAPIServer [operator.openshift.io/v1]' - File: openshiftapiserver-operator-openshift-io-v1 - - Name: 'OpenShiftControllerManager [operator.openshift.io/v1]' - File: openshiftcontrollermanager-operator-openshift-io-v1 - - Name: 'OperatorPKI [network.operator.openshift.io/v1]' - File: operatorpki-network-operator-openshift-io-v1 - - Name: 'ServiceCA [operator.openshift.io/v1]' - File: serviceca-operator-openshift-io-v1 - - Name: 'Storage [operator.openshift.io/v1]' - File: storage-operator-openshift-io-v1 -- Name: OperatorHub APIs - Dir: operatorhub_apis - Topics: - - Name: About OperatorHub APIs - File: operatorhub-apis-index - - Name: 'CatalogSource [operators.coreos.com/v1alpha1]' - File: catalogsource-operators-coreos-com-v1alpha1 - - Name: 'ClusterServiceVersion [operators.coreos.com/v1alpha1]' - File: clusterserviceversion-operators-coreos-com-v1alpha1 - - Name: 'InstallPlan [operators.coreos.com/v1alpha1]' - File: installplan-operators-coreos-com-v1alpha1 - - Name: 'OLMConfig [operators.coreos.com/v1]' - File: olmconfig-operators-coreos-com-v1 - - Name: 'Operator [operators.coreos.com/v1]' - File: operator-operators-coreos-com-v1 - - Name: 'OperatorCondition [operators.coreos.com/v2]' - File: operatorcondition-operators-coreos-com-v2 - - Name: 'OperatorGroup [operators.coreos.com/v1]' - File: operatorgroup-operators-coreos-com-v1 - - Name: 'PackageManifest [packages.operators.coreos.com/v1]' - File: packagemanifest-packages-operators-coreos-com-v1 - - Name: 'Subscription [operators.coreos.com/v1alpha1]' - File: subscription-operators-coreos-com-v1alpha1 -- Name: Policy APIs - Dir: policy_apis - Topics: - - Name: About Policy APIs - File: policy-apis-index - - Name: 'Eviction [policy/v1]' - File: eviction-policy-v1 - - Name: 'PodDisruptionBudget [policy/v1]' - File: poddisruptionbudget-policy-v1 -- Name: Project APIs - Dir: project_apis - Topics: - - Name: About Project APIs - File: project-apis-index - - Name: 'Project [project.openshift.io/v1]' - File: project-project-openshift-io-v1 - - Name: 'ProjectRequest [project.openshift.io/v1]' - File: projectrequest-project-openshift-io-v1 -- Name: Provisioning APIs - Dir: provisioning_apis - Topics: - - Name: About Provisioning APIs - File: provisioning-apis-index - - Name: 'BMCEventSubscription [metal3.io/v1alpha1]' - File: bmceventsubscription-metal3-io-v1alpha1 - - Name: 'BareMetalHost [metal3.io/v1alpha1]' - File: baremetalhost-metal3-io-v1alpha1 - - Name: 'DataImage [metal3.io/v1alpha1]' - File: dataimage-metal3-io-v1alpha1 - - Name: 'FirmwareSchema [metal3.io/v1alpha1]' - File: firmwareschema-metal3-io-v1alpha1 - - Name: 'HardwareData [metal3.io/v1alpha1]' - File: hardwaredata-metal3-io-v1alpha1 - - Name: 'HostFirmwareComponents [metal3.io/v1alpha1]' - File: hostfirmwarecomponents-metal3-io-v1alpha1 - - Name: 'HostFirmwareSettings [metal3.io/v1alpha1]' - File: hostfirmwaresettings-metal3-io-v1alpha1 - - Name: 'Metal3Remediation [infrastructure.cluster.x-k8s.io/v1beta1]' - File: metal3remediation-infrastructure-cluster-x-k8s-io-v1beta1 - - Name: 'Metal3RemediationTemplate [infrastructure.cluster.x-k8s.io/v1beta1]' - File: metal3remediationtemplate-infrastructure-cluster-x-k8s-io-v1beta1 - - Name: 'PreprovisioningImage [metal3.io/v1alpha1]' - File: preprovisioningimage-metal3-io-v1alpha1 - - Name: 'Provisioning [metal3.io/v1alpha1]' - File: provisioning-metal3-io-v1alpha1 -- Name: RBAC APIs - Dir: rbac_apis - Topics: - - Name: About RBAC APIs - File: rbac-apis-index - - Name: 'ClusterRoleBinding [rbac.authorization.k8s.io/v1]' - File: clusterrolebinding-rbac-authorization-k8s-io-v1 - - Name: 'ClusterRole [rbac.authorization.k8s.io/v1]' - File: clusterrole-rbac-authorization-k8s-io-v1 - - Name: 'RoleBinding [rbac.authorization.k8s.io/v1]' - File: rolebinding-rbac-authorization-k8s-io-v1 - - Name: 'Role [rbac.authorization.k8s.io/v1]' - File: role-rbac-authorization-k8s-io-v1 -- Name: Role APIs - Dir: role_apis - Topics: - - Name: About Role APIs - File: role-apis-index - - Name: 'ClusterRoleBinding [authorization.openshift.io/v1]' - File: clusterrolebinding-authorization-openshift-io-v1 - - Name: 'ClusterRole [authorization.openshift.io/v1]' - File: clusterrole-authorization-openshift-io-v1 - - Name: 'RoleBindingRestriction [authorization.openshift.io/v1]' - File: rolebindingrestriction-authorization-openshift-io-v1 - - Name: 'RoleBinding [authorization.openshift.io/v1]' - File: rolebinding-authorization-openshift-io-v1 - - Name: 'Role [authorization.openshift.io/v1]' - File: role-authorization-openshift-io-v1 -- Name: Schedule and quota APIs - Dir: schedule_and_quota_apis - Topics: - - Name: About Schedule and quota APIs - File: schedule-and-quota-apis-index - - Name: 'AppliedClusterResourceQuota [quota.openshift.io/v1]' - File: appliedclusterresourcequota-quota-openshift-io-v1 - - Name: 'ClusterResourceQuota [quota.openshift.io/v1]' - File: clusterresourcequota-quota-openshift-io-v1 - - Name: 'FlowSchema [flowcontrol.apiserver.k8s.io/v1]' - File: flowschema-flowcontrol-apiserver-k8s-io-v1 - - Name: 'LimitRange [undefined/v1]' - File: limitrange-v1 - - Name: 'PriorityClass [scheduling.k8s.io/v1]' - File: priorityclass-scheduling-k8s-io-v1 - - Name: 'PriorityLevelConfiguration [flowcontrol.apiserver.k8s.io/v1]' - File: prioritylevelconfiguration-flowcontrol-apiserver-k8s-io-v1 - - Name: 'ResourceQuota [undefined/v1]' - File: resourcequota-v1 -- Name: Security APIs - Dir: security_apis - Topics: - - Name: About Security APIs - File: security-apis-index - - Name: 'CertificateSigningRequest [certificates.k8s.io/v1]' - File: certificatesigningrequest-certificates-k8s-io-v1 - - Name: 'CredentialsRequest [cloudcredential.openshift.io/v1]' - File: credentialsrequest-cloudcredential-openshift-io-v1 - - Name: 'PodSecurityPolicyReview [security.openshift.io/v1]' - File: podsecuritypolicyreview-security-openshift-io-v1 - - Name: 'PodSecurityPolicySelfSubjectReview [security.openshift.io/v1]' - File: podsecuritypolicyselfsubjectreview-security-openshift-io-v1 - - Name: 'PodSecurityPolicySubjectReview [security.openshift.io/v1]' - File: podsecuritypolicysubjectreview-security-openshift-io-v1 - - Name: 'RangeAllocation [security.openshift.io/v1]' - File: rangeallocation-security-openshift-io-v1 - - Name: 'Secret [undefined/v1]' - File: secret-v1 - - Name: 'SecurityContextConstraints [security.openshift.io/v1]' - File: securitycontextconstraints-security-openshift-io-v1 - - Name: 'ServiceAccount [undefined/v1]' - File: serviceaccount-v1 -- Name: Storage APIs - Dir: storage_apis - Topics: - - Name: About Storage APIs - File: storage-apis-index - - Name: 'CSIDriver [storage.k8s.io/v1]' - File: csidriver-storage-k8s-io-v1 - - Name: 'CSINode [storage.k8s.io/v1]' - File: csinode-storage-k8s-io-v1 - - Name: 'CSIStorageCapacity [storage.k8s.io/v1]' - File: csistoragecapacity-storage-k8s-io-v1 - - Name: 'PersistentVolume [undefined/v1]' - File: persistentvolume-v1 - - Name: 'PersistentVolumeClaim [undefined/v1]' - File: persistentvolumeclaim-v1 - - Name: 'StorageClass [storage.k8s.io/v1]' - File: storageclass-storage-k8s-io-v1 - - Name: 'StorageState [migration.k8s.io/v1alpha1]' - File: storagestate-migration-k8s-io-v1alpha1 - - Name: 'StorageVersionMigration [migration.k8s.io/v1alpha1]' - File: storageversionmigration-migration-k8s-io-v1alpha1 - - Name: 'VolumeAttachment [storage.k8s.io/v1]' - File: volumeattachment-storage-k8s-io-v1 - - Name: 'VolumeSnapshot [snapshot.storage.k8s.io/v1]' - File: volumesnapshot-snapshot-storage-k8s-io-v1 - - Name: 'VolumeSnapshotClass [snapshot.storage.k8s.io/v1]' - File: volumesnapshotclass-snapshot-storage-k8s-io-v1 - - Name: 'VolumeSnapshotContent [snapshot.storage.k8s.io/v1]' - File: volumesnapshotcontent-snapshot-storage-k8s-io-v1 -- Name: Template APIs - Dir: template_apis - Topics: - - Name: About Template APIs - File: template-apis-index - - Name: 'BrokerTemplateInstance [template.openshift.io/v1]' - File: brokertemplateinstance-template-openshift-io-v1 - - Name: 'PodTemplate [undefined/v1]' - File: podtemplate-v1 - - Name: 'Template [template.openshift.io/v1]' - File: template-template-openshift-io-v1 - - Name: 'TemplateInstance [template.openshift.io/v1]' - File: templateinstance-template-openshift-io-v1 -- Name: User and group APIs - Dir: user_and_group_apis - Topics: - - Name: About User and group APIs - File: user-and-group-apis-index - - Name: 'Group [user.openshift.io/v1]' - File: group-user-openshift-io-v1 - - Name: 'Identity [user.openshift.io/v1]' - File: identity-user-openshift-io-v1 - - Name: 'UserIdentityMapping [user.openshift.io/v1]' - File: useridentitymapping-user-openshift-io-v1 - - Name: 'User [user.openshift.io/v1]' - File: user-user-openshift-io-v1 -- Name: Workloads APIs - Dir: workloads_apis - Topics: - - Name: About Workloads APIs - File: workloads-apis-index - - Name: 'BuildConfig [build.openshift.io/v1]' - File: buildconfig-build-openshift-io-v1 - - Name: 'Build [build.openshift.io/v1]' - File: build-build-openshift-io-v1 - - Name: 'BuildLog [build.openshift.io/v1]' - File: buildlog-build-openshift-io-v1 - - Name: 'BuildRequest [build.openshift.io/v1]' - File: buildrequest-build-openshift-io-v1 - - Name: 'CronJob [batch/v1]' - File: cronjob-batch-v1 - - Name: 'DaemonSet [apps/v1]' - File: daemonset-apps-v1 - - Name: 'Deployment [apps/v1]' - File: deployment-apps-v1 - - Name: 'DeploymentConfig [apps.openshift.io/v1]' - File: deploymentconfig-apps-openshift-io-v1 - - Name: 'DeploymentConfigRollback [apps.openshift.io/v1]' - File: deploymentconfigrollback-apps-openshift-io-v1 - - Name: 'DeploymentLog [apps.openshift.io/v1]' - File: deploymentlog-apps-openshift-io-v1 - - Name: 'DeploymentRequest [apps.openshift.io/v1]' - File: deploymentrequest-apps-openshift-io-v1 - - Name: 'Job [batch/v1]' - File: job-batch-v1 - - Name: 'Pod [undefined/v1]' - File: pod-v1 - - Name: 'ReplicationController [undefined/v1]' - File: replicationcontroller-v1 - - Name: 'ReplicaSet [apps/v1]' - File: replicaset-apps-v1 - - Name: 'StatefulSet [apps/v1]' - File: statefulset-apps-v1 ---- -Name: Lightspeed -Dir: lightspeed -Distros: openshift-enterprise -Topics: -- Name: About Lightspeed - Dir: about - Topics: - - Name: OpenShift Lightspeed overview - File: ols-openshift-lightspeed-overview ---- -Name: Service Mesh -Dir: service_mesh -Distros: openshift-enterprise -Topics: -- Name: Service Mesh 3.x - Dir: v3x - Topics: - - Name: OpenShift Service Mesh 3.0 TP1 overview - File: ossm-service-mesh-3-0-overview -- Name: Service Mesh 2.x - Dir: v2x - Topics: - - Name: About OpenShift Service Mesh - File: ossm-about - - Name: Service Mesh 2.x release notes - File: servicemesh-release-notes - - Name: Upgrading Service Mesh - File: upgrading-ossm - - Name: Understanding Service Mesh - File: ossm-architecture - - Name: Service Mesh deployment models - File: ossm-deployment-models - - Name: Service Mesh and Istio differences - File: ossm-vs-community - - Name: Preparing to install Service Mesh - File: preparing-ossm-installation - - Name: Installing the Operators - File: installing-ossm - - Name: Creating the ServiceMeshControlPlane - File: ossm-create-smcp - - Name: Adding services to a service mesh - File: ossm-create-mesh - - Name: Enabling sidecar injection - File: prepare-to-deploy-applications-ossm - - Name: Managing users and profiles - File: ossm-profiles-users - - Name: Security - File: ossm-security - - Name: Traffic management - File: ossm-traffic-manage - - Name: Gateway migration - File: ossm-gateway-migration - - Name: Route migration - File: ossm-route-migration - - Name: Metrics, logs, and traces - File: ossm-observability - - Name: Performance and scalability - File: ossm-performance-scalability - - Name: Deploying to production - File: ossm-deploy-production - - Name: Federation - File: ossm-federation - - Name: Extensions - File: ossm-extensions - - Name: OpenShift Service Mesh Console plugin - File: ossm-kiali-ossmc-plugin - - Name: 3scale WebAssembly for 2.1 - File: ossm-threescale-webassembly-module - - Name: 3scale Istio adapter for 2.0 - File: threescale-adapter - - Name: Troubleshooting Service Mesh - File: ossm-troubleshooting-istio - - Name: Control plane configuration reference - File: ossm-reference-smcp - - Name: Kiali configuration reference - File: ossm-reference-kiali - - Name: Jaeger configuration reference - File: ossm-reference-jaeger - - Name: Uninstalling Service Mesh - File: removing-ossm -- Name: Service Mesh 1.x - Dir: v1x - Topics: - - Name: Service Mesh 1.x release notes - File: servicemesh-release-notes - - Name: Service Mesh architecture - File: ossm-architecture - - Name: Service Mesh and Istio differences - File: ossm-vs-community - - Name: Preparing to install Service Mesh - File: preparing-ossm-installation - - Name: Installing Service Mesh - File: installing-ossm - - Name: Security - File: ossm-security - - Name: Traffic management - File: ossm-traffic-manage - - Name: Deploying applications on Service Mesh - File: prepare-to-deploy-applications-ossm - - Name: Data visualization and observability - File: ossm-observability - - Name: Custom resources - File: ossm-custom-resources - - Name: 3scale Istio adapter for 1.x - File: threescale-adapter - - Name: Removing Service Mesh - File: removing-ossm ---- -Name: Virtualization -Dir: virt -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: About - Dir: about_virt - Topics: - - Name: About OpenShift Virtualization - File: about-virt - Distros: openshift-enterprise - - Name: About OKD Virtualization - File: about-virt - Distros: openshift-origin - - Name: Supported limits - File: virt-supported-limits - - Name: Security policies - File: virt-security-policies - - Name: Architecture - File: virt-architecture - Distros: openshift-enterprise -- Name: Release notes - Dir: release_notes - Topics: - - Name: OpenShift Virtualization release notes - File: virt-release-notes-placeholder - Distros: openshift-enterprise - # - Name: OKD Virtualization release notes - # File: virt-release-notes-placeholder - # Distros: openshift-origin -- Name: Getting started - Dir: getting_started - Topics: - - Name: Getting started with OpenShift Virtualization - File: virt-getting-started - Distros: openshift-enterprise - - Name: Getting started with OKD Virtualization - File: virt-getting-started - Distros: openshift-origin - - Name: virtctl and libguestfs - File: virt-using-the-cli-tools - Distros: openshift-enterprise -- Name: Installing - Dir: install - Topics: - - Name: Preparing your cluster - File: preparing-cluster-for-virt - - Name: Installing OpenShift Virtualization - File: installing-virt - Distros: openshift-enterprise - - Name: Installing OKD Virtualization - File: installing-virt - Distros: openshift-origin - - Name: Uninstalling OpenShift Virtualization - File: uninstalling-virt - Distros: openshift-enterprise - - Name: Uninstalling OKD Virtualization - File: uninstalling-virt - Distros: openshift-origin -- Name: Postinstallation configuration - Dir: post_installation_configuration - Topics: - - Name: Postinstallation configuration - File: virt-post-install-config - - Name: Node placement rules - File: virt-node-placement-virt-components - - Name: Network configuration - File: virt-post-install-network-config - - Name: Storage configuration - File: virt-post-install-storage-config - - Name: Configuring higher VM workload density - File: virt-configuring-higher-vm-workload-density - - Name: Configuring certificate rotation - File: virt-configuring-certificate-rotation -- Name: Updating - Dir: updating - Topics: - - Name: Updating OpenShift Virtualization - File: upgrading-virt - Distros: openshift-enterprise - - Name: Updating OKD Virtualization - File: upgrading-virt - Distros: openshift-origin -- Name: Creating a virtual machine - Dir: creating_vm - Topics: - # - Name: Overview - # File: virt-basic-vm-overview - # - Name: Setting up your environment - # File: virt-setting-up-environment - - Name: Creating VMs from instance types - File: virt-creating-vms-from-instance-types - - Name: Creating VMs from templates - File: virt-creating-vms-from-templates -- Name: Advanced VM creation - Dir: creating_vms_advanced - Topics: - - Name: Advanced virtual machine creation overview - File: advanced-vm-creation-overview - - Name: Creating VMs in the web console - Dir: creating_vms_advanced_web - Topics: - - Name: Creating VMs from Red Hat images - File: virt-creating-vms-from-rh-images-overview - - Name: Creating VMs by importing images from web pages - File: virt-creating-vms-from-web-images - - Name: Creating VMs by uploading images - File: virt-creating-vms-uploading-images - - Name: Cloning VMs - File: virt-cloning-vms - - Name: Creating VMs using the CLI - Dir: creating_vms_cli - Topics: - - Name: Creating virtual machines from the command line - File: virt-creating-vms-from-cli - - Name: Creating VMs by using container disks - File: virt-creating-vms-from-container-disks - - Name: Creating VMs by cloning PVCs - File: virt-creating-vms-by-cloning-pvcs -- Name: Managing VMs - Dir: managing_vms - Topics: - - Name: Installing the QEMU guest agent and VirtIO drivers - File: virt-installing-qemu-guest-agent - - Name: Connecting to VM consoles - File: virt-accessing-vm-consoles - - Name: Configuring SSH access to VMs - File: virt-accessing-vm-ssh - - Name: Editing virtual machines - File: virt-edit-vms - - Name: Editing boot order - File: virt-edit-boot-order - - Name: Deleting virtual machines - File: virt-delete-vms - - Name: Exporting virtual machines - File: virt-exporting-vms - - Name: Managing virtual machine instances - File: virt-manage-vmis - - Name: Controlling virtual machine states - File: virt-controlling-vm-states - - Name: Using virtual Trusted Platform Module devices - File: virt-using-vtpm-devices - - Name: Managing virtual machines with OpenShift Pipelines - File: virt-managing-vms-openshift-pipelines - - Name: Advanced virtual machine management - Dir: advanced_vm_management - Topics: - - Name: Working with resource quotas for virtual machines - File: virt-working-with-resource-quotas-for-vms - - Name: Configuring the Application-Aware Quota Operator - File: virt-understanding-aaq-operator - - Name: Specifying nodes for virtual machines - File: virt-specifying-nodes-for-vms - - Name: Configuring the default CPU model - File: virt-configuring-default-cpu-model - - Name: UEFI mode for virtual machines - File: virt-uefi-mode-for-vms - - Name: Configuring PXE booting for virtual machines - File: virt-configuring-pxe-booting - - Name: Using huge pages with virtual machines - File: virt-using-huge-pages-with-vms - - Name: Enabling dedicated resources for a virtual machine - File: virt-dedicated-resources-vm - - Name: Scheduling virtual machines - File: virt-schedule-vms - - Name: Configuring PCI passthrough - File: virt-configuring-pci-passthrough - - Name: Configuring virtual GPUs - File: virt-configuring-virtual-gpus - - Name: Configuring USB host passthrough - File: virt-configuring-usb-host-passthrough - - Name: Enabling descheduler evictions on virtual machines - File: virt-enabling-descheduler-evictions - - Name: About high availability for virtual machines - File: virt-high-availability-for-vms - - Name: Control plane tuning - File: virt-vm-control-plane-tuning - - Name: Assigning compute resources - File: virt-assigning-compute-resources - - Name: About multi-queue functionality - File: virt-about-multi-queue - - Name: Managing virtual machines by using OpenShift GitOps - File: virt-managing-virtual-machines-by-using-openshift-gitops - - Name: VM disks - Dir: virtual_disks - Topics: - - Name: Hot-plugging VM disks - File: virt-hot-plugging-virtual-disks - - Name: Expanding VM disks - File: virt-expanding-vm-disks - - Name: Configuring shared volumes - File: virt-configuring-shared-volumes-for-vms - - Name: Migrating VM disks to a different storage class - File: virt-migrating-storage-class -- Name: Networking - Dir: vm_networking - Topics: - - Name: Networking configuration overview - File: virt-networking-overview - - Name: Connecting a VM to the default pod network - File: virt-connecting-vm-to-default-pod-network - - Name: Connecting a VM to a primary user-defined network - File: virt-connecting-vm-to-primary-udn - - Name: Exposing a VM by using a service - File: virt-exposing-vm-with-service - - Name: Accessing a VM by using its internal FQDN - File: virt-accessing-vm-internal-fqdn - - Name: Connecting a VM to a Linux bridge network - File: virt-connecting-vm-to-linux-bridge - - Name: Connecting a VM to an SR-IOV network - File: virt-connecting-vm-to-sriov - - Name: Using DPDK with SR-IOV - File: virt-using-dpdk-with-sriov - - Name: Connecting a VM to an OVN-Kubernetes secondary network - File: virt-connecting-vm-to-ovn-secondary-network - - Name: Hot plugging secondary network interfaces - File: virt-hot-plugging-network-interfaces - - Name: Connecting a VM to a service mesh - File: virt-connecting-vm-to-service-mesh - - Name: Configuring a dedicated network for live migration - File: virt-dedicated-network-live-migration - - Name: Configuring and viewing IP addresses - File: virt-configuring-viewing-ips-for-vms - - Name: Accessing a VM by using its external FQDN - File: virt-accessing-vm-secondary-network-fqdn - - Name: Managing MAC address pools for network interfaces - File: virt-using-mac-address-pool-for-vms -- Name: Storage - Dir: storage - Topics: - - Name: Storage configuration overview - File: virt-storage-config-overview - - Name: Configuring storage profiles - File: virt-configuring-storage-profile - - Name: Managing automatic boot source updates - File: virt-automatic-bootsource-updates - - Name: Reserving PVC space for file system overhead - File: virt-reserving-pvc-space-fs-overhead - - Name: Configuring local storage by using HPP - File: virt-configuring-local-storage-with-hpp - - Name: Enabling user permissions to clone data volumes across namespaces - File: virt-enabling-user-permissions-to-clone-datavolumes - - Name: Configuring CDI to override CPU and memory quotas - File: virt-configuring-cdi-for-namespace-resourcequota - - Name: Preparing CDI scratch space - File: virt-preparing-cdi-scratch-space - - Name: Using preallocation for data volumes - File: virt-using-preallocation-for-datavolumes - - Name: Managing data volume annotations - File: virt-managing-data-volume-annotations - - Name: Understanding virtual machine storage with the CSI paradigm - File: virt-storage-with-csi-paradigm -# Virtual machine live migration -- Name: Live migration - Dir: live_migration - Topics: - - Name: About live migration - File: virt-about-live-migration - - Name: Configuring live migration - File: virt-configuring-live-migration - - Name: Initiating and canceling live migration - File: virt-initiating-live-migration -# Node maintenance mode -- Name: Nodes - Dir: nodes - Topics: - - Name: Node maintenance - File: virt-node-maintenance - - Name: Managing node labeling for obsolete CPU models - File: virt-managing-node-labeling-obsolete-cpu-models - - Name: Preventing node reconciliation - File: virt-preventing-node-reconciliation - - Name: Deleting a failed node to trigger VM failover - File: virt-triggering-vm-failover-resolving-failed-node - - Name: Activating kernel samepage merging (KSM) - File: virt-activating-ksm -- Name: Monitoring - Dir: monitoring - Topics: - - Name: Monitoring overview - File: virt-monitoring-overview - - Name: Cluster checkup framework - File: virt-running-cluster-checkups - - Name: Prometheus queries for virtual resources - File: virt-prometheus-queries - - Name: Virtual machine custom metrics - File: virt-exposing-custom-metrics-for-vms - - Name: Virtual machine downward metrics - File: virt-exposing-downward-metrics - - Name: Virtual machine health checks - File: virt-monitoring-vm-health - - Name: Runbooks - File: virt-runbooks -- Name: Support - Dir: support - Topics: - - Name: Support overview - File: virt-support-overview - - Name: Collecting data for Red Hat Support - File: virt-collecting-virt-data - Distros: openshift-enterprise - - Name: Troubleshooting - File: virt-troubleshooting -- Name: Backup and restore - Dir: backup_restore - Topics: - - Name: Backup and restore by using VM snapshots - File: virt-backup-restore-snapshots - - Name: Backing up and restoring virtual machines - File: virt-backup-restore-overview - - Name: Disaster recovery - File: virt-disaster-recovery -# - Name: Collecting OKD Virtualization data for community report -# File: virt-collecting-virt-data -# Distros: openshift-origin +Distros: openshift-coo +Topics: +- Name: Installing the Cluster Observability Operator + File: installing-the-cluster-observability-operator +- Name: Installing end-to-end observability services + File: installing-end-to-end-observability +- Name: Configuring the Cluster Observability Operator to monitor a service + File: configuring-the-cluster-observability-operator-to-monitor-a-service +--- +Name: UI plugins for Red Hat OpenShift Cluster Observability Operator +Dir: ui_plugins +Distros: openshift-coo +Topics: +- Name: Observability UI plugins overview + File: observability-ui-plugins-overview +- Name: Monitoring UI plugin + File: monitoring-ui-plugin +- Name: Logging UI plugin + File: logging-ui-plugin +- Name: Distributed tracing UI plugin + File: distributed-tracing-ui-plugin +- Name: Troubleshooting UI plugin + File: troubleshooting-ui-plugin +#- Name: Dashboard UI plugin +# File: dashboard-ui-plugin +--- +Name: API reference for Red Hat OpenShift Cluster Observability Operator +Dir: api +Distros: openshift-coo +Topics: +- Name: Monitoring API reference + File: api-monitoring-package +#- Name: Observability API reference +# File: api-observability-package diff --git a/_topic_maps/_topic_map_ms.yml b/_topic_maps/_topic_map_ms.yml deleted file mode 100644 index c75992c36dec..000000000000 --- a/_topic_maps/_topic_map_ms.yml +++ /dev/null @@ -1,569 +0,0 @@ -# This configuration file dictates the organization of the topic groups and -# topics on the main page of the doc site for this branch. Each record -# consists of the following: -# -# --- <= Record delimiter -# Name: Origin of the Species <= Display name of topic group -# Dir: origin_of_the_species <= Directory name of topic group -# Topics: -# - Name: The Majestic Marmoset <= Topic name -# File: the_majestic_marmoset <= Topic file under group dir +/- -# - Name: The Curious Crocodile <= Topic 2 name -# File: the_curious_crocodile <= Topic 2 file -# - Name: The Numerous Nematodes <= Sub-topic group name -# Dir: the_numerous_nematodes <= Sub-topic group dir -# Topics: -# - Name: The Wily Worm <= Sub-topic name -# File: the_wily_worm <= Sub-topic file under / -# - Name: The Acrobatic Ascarid <= Sub-topic 2 name -# File: the_acrobatic_ascarid <= Sub-topic 2 file under / -# -# The ordering of the records in this document determines the ordering of the -# topic groups and topics on the main page. - ---- -Name: Understanding MicroShift -Dir: microshift_welcome -Distros: microshift -Topics: -- Name: Understanding Red Hat build of MicroShift - File: index ---- -Name: Red Hat build of MicroShift release notes -Dir: microshift_release_notes -Distros: microshift -Topics: -- Name: Red Hat build of MicroShift 4.19 release notes - File: microshift-4-19-release-notes ---- -Name: Getting ready to install MicroShift -Dir: microshift_install_get_ready -Distros: microshift -Topics: -- Name: Getting ready to install MicroShift - File: microshift-install-get-ready -- Name: Using FIPS mode - File: microshift-fips -- Name: Understanding system health checks - File: microshift-greenboot ---- -Name: Installing with an RPM package -Dir: microshift_install_rpm -Distros: microshift -Topics: -- Name: Installing with an RPM package - File: microshift-install-rpm ---- -Name: Installing optional RPM packages -Dir: microshift_install_rpm_opt -Distros: microshift -Topics: -- Name: Installing optional RPM packages - File: microshift-install-optional-rpms ---- -Name: Embedding in a RHEL for Edge image -Dir: microshift_install_rpm_ostree -Distros: microshift -Topics: -- Name: Embedding in a RHEL for Edge image using image builder - File: microshift-embed-in-rpm-ostree -- Name: Mirroring container images for disconnected installations - File: microshift-deploy-with-mirror-registry -- Name: Embedding in a RHEL for Edge image for offline use - File: microshift-embed-in-rpm-ostree-offline-use ---- -Name: Installing with RHEL image mode -Dir: microshift_install_bootc -Distros: microshift -Topics: -- Name: Understanding image mode for RHEL - File: microshift-about-rhel-image-mode -- Name: Installing a bootc image - File: microshift-install-bootc-image -- Name: Running the bootc image - File: microshift-install-running-bootc-image-vm ---- -Name: Using RHEL Kickstarts -Dir: microshift_install_kickstarts -Distros: microshift -Topics: -- Name: Using a Kickstart file for automating installation - File: microshift-rhel-kickstarts ---- -Name: Updating -Dir: microshift_updating -Distros: microshift -Topics: -- Name: Update options - File: microshift-update-options -- Name: Updates with rpm-ostree systems - File: microshift-update-rpms-ostree -- Name: Manual updates with RPMs - File: microshift-update-rpms-manually -- Name: Listing update package contents - File: microshift-list-update-contents ---- -Name: Support -Dir: microshift_support -Distros: microshift -Topics: -- Name: The etcd service - File: microshift-etcd -- Name: The sos report tool - File: microshift-sos-report -- Name: Getting your cluster ID - File: microshift-getting-cluster-id -- Name: Getting support - File: microshift-getting-support -- Name: Remote health monitoring with a connected cluster - File: microshift-remote-cluster-monitoring ---- -Name: Configuring -Dir: microshift_configuring -Distros: microshift -Topics: -- Name: Using the MicroShift configuration file - File: microshift-using-config-yaml -- Name: Configuring IPv6 networking - File: microshift-nw-ipv6-config -- Name: Using ingress control for a MicroShift cluster - File: microshift-ingress-controller -- Name: Disabling LVMS CSI provider and CSI snapshot - File: microshift-disable-lvms-csi-provider-csi-snapshot -- Name: Checking the status of Greenboot health checks - File: microshift-greenboot-checking-status -- Name: Cluster access with kubeconfig - File: microshift-cluster-access-kubeconfig -- Name: Configuring MicroShift authentication and security - Dir: microshift_auth_security - Topics: - - Name: Using custom certificate authorities - File: microshift-custom-ca - - Name: Configuring TLS security profiles - File: microshift-tls-config - - Name: Configuring audit logging policies - File: microshift-audit-logs-config -- Name: Configuring low latency - Dir: microshift_low_latency - Topics: - - Name: Configuring low latency - File: microshift-low-latency - - Name: Workload partitioning - File: microshift-workload-partitioning ---- -Name: Networking -Dir: microshift_networking -Distros: microshift -Topics: -- Name: About the networking plugin - File: microshift-cni -- Name: Using networking settings - File: microshift-networking-settings -- Name: Configuring the router - File: microshift-nw-router -- Name: Network policies - Dir: microshift_network_policy - Topics: - - Name: About network policies - File: microshift-network-policy-index - - Name: Creating network policies - File: microshift-creating-network-policy - - Name: Editing network policies - File: microshift-editing-network-policy - - Name: Deleting network policies - File: microshift-deleting-network-policy - - Name: Viewing network policies - File: microshift-viewing-network-policy -- Name: Multiple networks - Dir: microshift_multiple_networks - Topics: - - Name: About using multiple networks - File: microshift-cni-multus - - Name: Configuring and using multiple networks - File: microshift-cni-multus-using -- Name: Configuring routes - File: microshift-configuring-routes -- Name: Firewall configuration - File: microshift-firewall -- Name: Networking settings for fully disconnected hosts - File: microshift-disconnected-network-config ---- -Name: Storage -Dir: microshift_storage -Distros: microshift -Topics: -- Name: About storage - File: index -- Name: Understanding ephemeral storage - File: understanding-ephemeral-storage-microshift -- Name: Generic ephemeral volumes - File: generic-ephemeral-volumes-microshift -- Name: Understanding persistent storage - File: understanding-persistent-storage-microshift -- Name: Expanding persistent volumes - File: expanding-persistent-volumes-microshift -- Name: Dynamic storage using the LVMS plugin - File: microshift-storage-plugin-overview -- Name: Working with volume snapshots - File: volume-snapshots-microshift -- Name: Understanding storage migration - File: microshift-storage-migration ---- -Name: Running applications -Dir: microshift_running_apps -Distros: microshift -Topics: -- Name: Using Kustomize to deploy applications - File: microshift-applications -- Name: Deleting or updating Kustomize manifest resources - File: microshift-deleting-resource-manifests -- Name: Embedding applications on RHEL for Edge - File: microshift-embedded-apps-on-rhel-edge -- Name: Embedding applications for offline use - File: microshift-embed-apps-offline-use -- Name: Embedding applications tutorial - File: microshift-embedding-apps-tutorial -- Name: Creating application or workload health check scripts - File: microshift-greenboot-workload-scripts -- Name: Automating application management with GitOps - File: microshift-gitops -- Name: Pod security authentication and authorization - File: microshift-authentication -- Name: Operators - Dir: microshift_operators - Topics: - - Name: Using Operators - File: microshift-operators - - Name: Using Operator Lifecycle Manager - File: microshift-operators-olm - - Name: Creating custom catalogs with oc-mirror - File: microshift-operators-oc-mirror - - Name: Adding OLM-based Operators to a disconnected cluster - File: microshift-operators-oc-mirror-disconnected ---- -Name: Backup and restore -Dir: microshift_backup_and_restore -Distros: microshift -Topics: -- Name: Backing up and restoring data - File: microshift-backup-and-restore -- Name: Automated recovery from manual backups - File: microshift-auto-recover-manual-backup ---- -Name: Troubleshooting -Dir: microshift_troubleshooting -Distros: microshift -Topics: -- Name: Check your version - File: microshift-version -- Name: Troubleshoot the cluster - File: microshift-troubleshoot-cluster -- Name: Troubleshoot installation issues - File: microshift-installing-troubleshooting -- Name: Troubleshoot backup and restore - File: microshift-troubleshoot-backup-restore -- Name: Troubleshoot updates - File: microshift-troubleshoot-updates -- Name: Check the audit logs - File: microshift-audit-logs -- Name: Troubleshoot etcd - File: microshift-etcd-troubleshoot -- Name: Additional information - File: microshift-things-to-know -- Name: Data cleanup - File: microshift-cleanup-data ---- -Name: CLI tools -Dir: microshift_cli_ref -Distros: microshift -Topics: -- Name: CLI tools introduction - File: microshift-cli-tools-introduction -- Name: Installing the OpenShift CLI - File: microshift-oc-cli-install -- Name: Configuring the OpenShift CLI - File: microshift-oc-config -- Name: Using the OpenShift CLI - File: microshift-cli-using-oc -- Name: Using oc and kubectl - File: microshift-usage-oc-kubectl -- Name: List of oc CLI commands - File: microshift-oc-cli-commands-list ---- -Name: API reference -Dir: microshift_rest_api -Distros: microshift -Topics: -- Name: Understanding API tiers - File: understanding-api-support-tiers -- Name: API compatibility guidelines - File: understanding-compatibility-guidelines -- Name: API index - File: index -- Name: API object reference - Dir: objects - Topics: - - Name: API objects reference - File: index -- Name: Extension APIs - Dir: api_extensions_apis - Topics: - - Name: Extension APIs - File: api-extensions-apis-index - - Name: CustomResourceDefinition [apiextensions.k8s.io/v1] - File: customresourcedefinition-apiextensions-k8s-io-v1 -- Name: Registration APIs - Dir: api_registration_apis - Topics: - - Name: Registration APIs - File: api-registration-apis-index - - Name: APIService [apiregistration.k8s.io/v1] - File: apiservice-apiregistration-k8s-io-v1 -- Name: Apps APIs - Dir: apps_apis - Topics: - - Name: Apps APIs - File: apps-apis-index - - Name: ControllerRevision [apps/v1] - File: controllerrevision-apps-v1 - - Name: DaemonSet [apps/v1] - File: daemonset-apps-v1 - - Name: Deployment [apps/v1] - File: deployment-apps-v1 - - Name: ReplicaSet [apps/v1] - File: replicaset-apps-v1 - - Name: StatefulSet [apps/v1] - File: statefulset-apps-v1 -- Name: Authentication APIs - Dir: authentication_apis - Topics: - - Name: Authentication APIs - File: authentication-apis-index - - Name: TokenRequest [authentication.k8s.io/v1] - File: tokenrequest-authentication-k8s-io-v1 - - Name: TokenReview [authentication.k8s.io/v1] - File: tokenreview-authentication-k8s-io-v1 -- Name: Authorization APIs - Dir: authorization_apis - Topics: - - Name: Authorization APIs - File: authorization-apis-index - - Name: LocalSubjectAccessReview [authorization.k8s.io/v1] - File: localsubjectaccessreview-authorization-k8s-io-v1 - - Name: SelfSubjectAccessReview [authorization.k8s.io/v1] - File: selfsubjectaccessreview-authorization-k8s-io-v1 - - Name: SelfSubjectRulesReview [authorization.k8s.io/v1] - File: selfsubjectrulesreview-authorization-k8s-io-v1 - - Name: SubjectAccessReview [authorization.k8s.io/v1] - File: subjectaccessreview-authorization-k8s-io-v1 -- Name: Autoscaling APIs - Dir: autoscaling_apis - Topics: - - Name: Autoscaling APIs - File: autoscaling-apis-index - - Name: HorizontalPodAutoscaler [autoscaling/v2] - File: horizontalpodautoscaler-autoscaling-v2 - - Name: Scale [autoscaling/v1] - File: scale-autoscaling-v1 -- Name: Batch APIs - Dir: batch_apis - Topics: - - Name: Batch APIs - File: batch-apis-index - - Name: CronJob [batch/v1] - File: cronjob-batch-v1 - - Name: Job [batch/v1] - File: job-batch-v1 -- Name: Certificates APIs - Dir: certificates_apis - Topics: - - Name: Certificates APIs - File: certificates-apis-index - - Name: CertificateSigningRequest [certificates.k8s.io/v1] - File: certificatesigningrequest-certificates-k8s-io-v1 -- Name: Coordination APIs - Dir: coordination_apis - Topics: - - Name: Coordination APIs - File: coordination-apis-index - - Name: Lease [coordination.k8s.io/v1] - File: lease-coordination-k8s-io-v1 -- Name: Core APIs - Dir: core_apis - Topics: - - Name: Core APIs - File: core-apis-index - - Name: Binding [v1] - File: binding-v1 - - Name: ComponentStatus [v1] - File: componentstatus-v1 - - Name: ConfigMap [v1] - File: configmap-v1 - - Name: Endpoints [v1] - File: endpoints-v1 - - Name: Event [v1] - File: event-v1 - - Name: LimitRange [v1] - File: limitrange-v1 - - Name: Namespace [v1] - File: namespace-v1 - - Name: Node [v1] - File: node-v1 - - Name: PersistentVolume [v1] - File: persistentvolume-v1 - - Name: PersistentVolumeClaim [v1] - File: persistentvolumeclaim-v1 - - Name: Pod [v1] - File: pod-v1 - - Name: PodTemplate [v1] - File: podtemplate-v1 - - Name: ReplicationController [v1] - File: replicationcontroller-v1 - - Name: ResourceQuota [v1] - File: resourcequota-v1 - - Name: Secret [v1] - File: secret-v1 - - Name: Service [v1] - File: service-v1 - - Name: ServiceAccount [v1] - File: serviceaccount-v1 -- Name: Discovery APIs - Dir: discovery_apis - Topics: - - Name: Discovery APIs - File: discovery-apis-index - - Name: EndpointSlice [discovery.k8s.io/v1] - File: endpointslice-discovery-k8s-io-v1 -- Name: Events APIs - Dir: events_apis - Topics: - - Name: Events APIs - File: events-apis-index - - Name: Event [events.k8s.io/v1] - File: event-events-k8s-io-v1 -- Name: Flow Control APIs - Dir: flow_control_apis - Topics: - - Name: Flow Control APIs - File: flow-control-apis-index - - Name: FlowSchema [flowcontrol.apiserver.k8s.io/v1beta3] - File: flowschema-flowcontrol-apiserver-k8s-io-v1beta3 - - Name: PriorityLevelConfiguration [flowcontrol.apiserver.k8s.io/v1beta3] - File: prioritylevelconfiguration-flowcontrol-apiserver-k8s-io-v1beta3 -- Name: Networking APIs - Dir: networking_apis - Topics: - - Name: Networking APIs - File: networking-apis-index - - Name: Ingress [networking.k8s.io/v1] - File: ingress-networking-k8s-io-v1 - - Name: IngressClass [networking.k8s.io/v1] - File: ingressclass-networking-k8s-io-v1 - - Name: NetworkPolicy [networking.k8s.io/v1] - File: networkpolicy-networking-k8s-io-v1 -- Name: Node APIs - Dir: node_apis - Topics: - - Name: Node APIs - File: node-apis-index - - Name: RuntimeClass [node.k8s.io/v1] - File: runtimeclass-node-k8s-io-v1 -- Name: Policy APIs - Dir: policy_apis - Topics: - - Name: Policy APIs - File: policy-apis-index - - Name: Eviction [policy/v1] - File: eviction-policy-v1 - - Name: PodDisruptionBudget [policy/v1] - File: poddisruptionbudget-policy-v1 -- Name: RBAC APIs - Dir: rbac_apis - Topics: - - Name: RBAC APIs - File: rbac-apis-index - - Name: ClusterRole [rbac.authorization.k8s.io/v1] - File: clusterrole-rbac-authorization-k8s-io-v1 - - Name: ClusterRoleBinding [rbac.authorization.k8s.io/v1] - File: clusterrolebinding-rbac-authorization-k8s-io-v1 - - Name: Role [rbac.authorization.k8s.io/v1] - File: role-rbac-authorization-k8s-io-v1 - - Name: RoleBinding [rbac.authorization.k8s.io/v1] - File: rolebinding-rbac-authorization-k8s-io-v1 -- Name: Network APIs - Dir: network_apis - Topics: - - Name: Network APIs - File: network-apis-index - - Name: Route [route.openshift.io/v1] - File: route-route-openshift-io-v1 -- Name: Scheduling APIs - Dir: scheduling_apis - Topics: - - Name: Scheduling APIs - File: scheduling-apis-index - - Name: PriorityClass [scheduling.k8s.io/v1] - File: priorityclass-scheduling-k8s-io-v1 -- Name: Security APIs - Dir: security_apis - Topics: - - Name: Security APIs - File: security-apis-index - - Name: SecurityContextConstraints [security.openshift.io/v1] - File: securitycontextconstraints-security-openshift-io-v1 -- Name: Security-Internal APIs - Dir: security_internal_apis - Topics: - - Name: Security Internal APIs - File: security-internal-apis-index - - Name: RangeAllocation [security.internal.openshift.io/v1] - File: rangeallocation-security-internal-openshift-io-v1 -- Name: Snapshot APIs - Dir: snapshot_apis - Topics: - - Name: CSI Snapshot APIs - File: snapshot-apis-index - - Name: VolumeSnapshot [snapshot.storage.k8s.io/v1] - File: volumesnapshot-snapshot-storage-k8s-io-v1 - - Name: VolumeSnapshotClass [snapshot.storage.k8s.io/v1] - File: volumesnapshotclass-snapshot-storage-k8s-io-v1 - - Name: VolumeSnapshotContent [snapshot.storage.k8s.io/v1] - File: volumesnapshotcontent-snapshot-storage-k8s-io-v1 -- Name: Storage APIs - Dir: storage_apis - Topics: - - Name: Storage APIs - File: storage-apis-index - - Name: CSIDriver [storage.k8s.io/v1] - File: csidriver-storage-k8s-io-v1 - - Name: CSINode [storage.k8s.io/v1] - File: csinode-storage-k8s-io-v1 - - Name: CSIStorageCapacity [storage.k8s.io/v1] - File: csistoragecapacity-storage-k8s-io-v1 - - Name: StorageClass [storage.k8s.io/v1] - File: storageclass-storage-k8s-io-v1 - - Name: VolumeAttachment [storage.k8s.io/v1] - File: volumeattachment-storage-k8s-io-v1 -- Name: Storage Version Migration APIs - Dir: storage_version_migration_apis - Topics: - - Name: Storage Version Migration APIs - File: storage-version-migration-apis-index - - Name: StorageVersionMigration [migration.k8s.io/v1alpha1] - File: storageversionmigration-migration-k8s-io-v1alpha1 -- Name: TopoLVM APIs - Dir: topolvm_apis - Topics: - - Name: TopoLVM APIs - File: topolvm-apis-index - - Name: LogicalVolume [topolvm.io/v1] - File: logicalvolume-topolvm-io-v1 -- Name: Webhook APIs - Dir: webhook_apis - Topics: - - Name: Webhook APIs - File: webhook-apis-index - - Name: MutatingWebhookConfiguration [admissionregistration.k8s.io/v1] - File: mutatingwebhookconfiguration-admissionregistration-k8s-io-v1 - - Name: ValidatingWebhookConfiguration [admissionregistration.k8s.io/v1] - File: validatingwebhookconfiguration-admissionregistration-k8s-io-v1 diff --git a/_topic_maps/_topic_map_osd.yml b/_topic_maps/_topic_map_osd.yml deleted file mode 100644 index e4174f68a165..000000000000 --- a/_topic_maps/_topic_map_osd.yml +++ /dev/null @@ -1,1726 +0,0 @@ -# This configuration file dictates the organization of the topic groups and -# topics on the main page of the doc site for this branch. Each record -# consists of the following: -# -# --- <= Record delimiter -# Name: Origin of the Species <= Display name of topic group -# Dir: origin_of_the_species <= Directory name of topic group -# Topics: -# - Name: The Majestic Marmoset <= Topic name -# File: the_majestic_marmoset <= Topic file under group dir +/- -# - Name: The Curious Crocodile <= Topic 2 name -# File: the_curious_crocodile <= Topic 2 file -# - Name: The Numerous Nematodes <= Sub-topic group name -# Dir: the_numerous_nematodes <= Sub-topic group dir -# Topics: -# - Name: The Wily Worm <= Sub-topic name -# File: the_wily_worm <= Sub-topic file under / -# - Name: The Acrobatic Ascarid <= Sub-topic 2 name -# File: the_acrobatic_ascarid <= Sub-topic 2 file under / -# -# The ordering of the records in this document determines the ordering of the -# topic groups and topics on the main page. - ---- -Name: About -Dir: welcome -Distros: openshift-dedicated -Topics: -- Name: Welcome - File: index -- Name: Legal notice - File: legal-notice - Distros: openshift-dedicated ---- -Name: What's new -Dir: osd_whats_new -Distros: openshift-dedicated -Topics: -- Name: What's new with OpenShift Dedicated - File: osd-whats-new ---- -Name: Introduction to OpenShift Dedicated -Dir: osd_architecture -Distros: openshift-dedicated -Topics: -- Name: Understanding OpenShift Dedicated - File: osd-understanding -- Name: Policies and service definition - Dir: osd_policy - Distros: openshift-dedicated - Topics: - - Name: OpenShift Dedicated service definition - File: osd-service-definition - - Name: Responsibility assignment matrix - File: policy-responsibility-matrix - - Name: Understanding process and security for OpenShift Dedicated - File: policy-process-security - - Name: SRE and service account access - File: osd-sre-access - - Name: About availability for OpenShift Dedicated - File: policy-understand-availability - - Name: Update life cycle - File: osd-life-cycle -# Created a new assembly in ROSA/OSD. In OCP, the assembly is in a book that is not in ROSA/OSD -# - Name: About admission plugins -# File: osd-admission-plug-ins -# Distros: openshift-dedicated ---- -Name: Architecture -Dir: architecture -Distros: openshift-dedicated -Topics: -- Name: Architecture overview - File: index -- Name: Product architecture - File: architecture -- Name: Architecture models - File: osd-architecture-models-gcp -- Name: Control plane architecture - File: control-plane -- Name: NVIDIA GPU architecture overview - File: nvidia-gpu-architecture-overview -- Name: Understanding OpenShift development - File: understanding-development -- Name: Admission plugins - File: admission-plug-ins ---- -#Name: Tutorials -#Dir: cloud_experts_tutorials -#Distros: openshift-dedicated -#Topics: -#--- -Name: Red Hat OpenShift Cluster Manager -Dir: ocm -Distros: openshift-dedicated -Topics: -- Name: Red Hat OpenShift Cluster Manager - File: ocm-overview ---- -Name: Planning your environment -Dir: osd_planning -Distros: openshift-dedicated -Topics: -- Name: Limits and scalability - File: osd-limits-scalability -- Name: Customer Cloud Subscriptions on GCP - File: gcp-ccs -- Name: Customer Cloud Subscriptions on AWS - File: aws-ccs ---- -Name: Getting started -Dir: osd_getting_started -Distros: openshift-dedicated -Topics: -- Name: Understanding your cloud deployment options - File: osd-understanding-your-cloud-deployment-options -- Name: Getting started with OpenShift Dedicated - File: osd-getting-started ---- -Name: OpenShift Dedicated clusters on GCP -Dir: osd_gcp_clusters -Distros: openshift-dedicated -Topics: -- Name: Private Service Connect overview - File: creating-a-gcp-psc-enabled-private-cluster -- Name: Creating a cluster on GCP with Workload Identity Federation authentication - File: creating-a-gcp-cluster-with-workload-identity-federation -- Name: Creating a cluster on GCP with Service Account authentication - File: creating-a-gcp-cluster-sa -- Name: Creating a cluster on GCP with a Red Hat cloud account - File: creating-a-gcp-cluster-redhat-account - -#- Name: Configuring your identity providers -# File: config-identity-providers -#- Name: Revoking privileges and access to an OpenShift Dedicated cluster -# File: osd-revoking-cluster-privileges -- Name: Deleting an OpenShift Dedicated cluster on GCP - File: osd-deleting-a-cluster-gcp ---- -Name: OpenShift Dedicated clusters on AWS -Dir: osd_aws_clusters -Distros: openshift-dedicated -Topics: -- Name: Creating a cluster on AWS - File: creating-an-aws-cluster -- Name: Deleting an OpenShift Dedicated cluster on AWS - File: osd-deleting-a-cluster-aws ---- -Name: Support -Dir: support -Distros: openshift-dedicated -Topics: -- Name: Support overview - File: index -- Name: Managing your cluster resources - File: managing-cluster-resources -- Name: Getting support - File: getting-support - Distros: openshift-dedicated -- Name: Remote health monitoring with connected clusters - Dir: remote_health_monitoring - Distros: openshift-dedicated - Topics: - - Name: About remote health monitoring - File: about-remote-health-monitoring - - Name: Showing data collected by remote health monitoring - File: showing-data-collected-by-remote-health-monitoring -# cannot get resource "secrets" in API group "" in the namespace "openshift-config" -# - Name: Opting out of remote health reporting -# File: opting-out-of-remote-health-reporting -# cannot get resource "secrets" in API group "" in the namespace "openshift-config" -# - Name: Enabling remote health reporting -# File: enabling-remote-health-reporting - - Name: Using Insights to identify issues with your cluster - File: using-insights-to-identify-issues-with-your-cluster - - Name: Using Insights Operator - File: using-insights-operator -# Not supported per Michael McNeill -# - Name: Using remote health reporting in a restricted network -# File: remote-health-reporting-from-restricted-network -# cannot list resource "secrets" in API group "" in the namespace "openshift-config" -# - Name: Importing simple content access entitlements with Insights Operator -# File: insights-operator-simple-access -- Name: Gathering data about your cluster - File: gathering-cluster-data - Distros: openshift-dedicated -- Name: Summarizing cluster specifications - File: summarizing-cluster-specifications - Distros: openshift-dedicated -- Name: Troubleshooting - Dir: troubleshooting - Distros: openshift-dedicated - Topics: -# - Name: Troubleshooting installations -# File: troubleshooting-installations - - Name: Verifying node health - File: verifying-node-health -# cannot create resource "namespaces", cannot patch resource "nodes" -# - Name: Troubleshooting CRI-O container runtime issues -# File: troubleshooting-crio-issues -# requires ostree, butane, and other plug-ins -# - Name: Troubleshooting operating system issues -# File: troubleshooting-operating-system-issues -# Distros: openshift-dedicated -# cannot patch resource "nodes", "nodes/proxy", "namespaces" -# - Name: Troubleshooting network issues -# File: troubleshooting-network-issues -# Distros: openshift-dedicated - - Name: Troubleshooting Operator issues - File: troubleshooting-operator-issues - - Name: Investigating pod issues - File: investigating-pod-issues - - Name: Troubleshooting the Source-to-Image process - File: troubleshooting-s2i - - Name: Troubleshooting storage issues - File: troubleshooting-storage-issues -# Not supported per WINC team -# - Name: Troubleshooting Windows container workload issues -# File: troubleshooting-windows-container-workload-issues - - Name: Investigating monitoring issues - File: investigating-monitoring-issues - - Name: Diagnosing OpenShift CLI (oc) issues - File: diagnosing-oc-issues - - Name: OpenShift Dedicated managed resources - File: sd-managed-resources - Distros: openshift-dedicated ---- -Name: Web console -Dir: web_console -Distros: openshift-dedicated -Topics: -- Name: Web console overview - File: web-console-overview -- Name: Accessing the web console - File: web-console -- Name: Viewing cluster information - File: using-dashboard-to-get-cluster-information -- Name: Adding user preferences - File: adding-user-preferences - Distros: openshift-enterprise,openshift-origin -# cannot patch resource "consoles", insufficient permissions to read any Cluster configuration -#- Name: Configuring the web console -# File: configuring-web-console -# Distros: openshift-dedicated -#- Name: Customizing the web console -# File: customizing-the-web-console -# Distros: openshift-dedicated -- Name: Dynamic plugins - Dir: dynamic-plugin - Distros: openshift-dedicated - Topics: - - Name: Overview of dynamic plugins - File: overview-dynamic-plugin - - Name: Getting started with dynamic plugins - File: dynamic-plugins-get-started - - Name: Deploy your plugin on a cluster - File: deploy-plugin-cluster - - Name: Dynamic plugin example - File: dynamic-plugin-example - - Name: Dynamic plugin reference - File: dynamic-plugins-reference -- Name: Web terminal - Dir: web_terminal - Distros: openshift-dedicated - Topics: - - Name: Installing the web terminal - File: installing-web-terminal - # Do not have sufficient permissions to read any cluster configuration. - # - Name: Configuring the web terminal - # File: configuring-web-terminal - - Name: Using the web terminal - File: odc-using-web-terminal - - Name: Troubleshooting the web terminal - File: troubleshooting-web-terminal - - Name: Uninstalling the web terminal - File: uninstalling-web-terminal -- Name: About quick start tutorials - File: creating-quick-start-tutorials - Distros: openshift-dedicated ---- -Name: CLI tools -Dir: cli_reference -Distros: openshift-dedicated -Topics: -- Name: CLI tools overview - File: index -- Name: OpenShift CLI (oc) - Dir: openshift_cli - Topics: - - Name: Getting started with the OpenShift CLI - File: getting-started-cli - - Name: Configuring the OpenShift CLI - File: configuring-cli - - Name: Usage of oc and kubectl commands - File: usage-oc-kubectl - - Name: Managing CLI profiles - File: managing-cli-profiles - - Name: Extending the OpenShift CLI with plugins - File: extending-cli-plugins - # - Name: Managing CLI plugins with Krew - # File: managing-cli-plugins-krew - # Distros: openshift-dedicated - - Name: OpenShift CLI developer command reference - File: developer-cli-commands - - Name: OpenShift CLI administrator command reference - File: administrator-cli-commands - Distros: openshift-dedicated -- Name: Developer CLI (odo) - File: odo-important-update - # Dir: developer_cli_odo - Distros: openshift-dedicated - # Topics: - # - Name: odo release notes - # File: odo-release-notes - # - Name: Understanding odo - # File: understanding-odo - # - Name: Installing odo - # File: installing-odo - # - Name: Configuring the odo CLI - # File: configuring-the-odo-cli - # - Name: odo CLI reference - # File: odo-cli-reference -- Name: Knative CLI (kn) for use with OpenShift Serverless - File: kn-cli-tools - Distros: openshift-dedicated -- Name: Pipelines CLI (tkn) - Dir: tkn_cli - Distros: openshift-dedicated - Topics: - - Name: Installing tkn - File: installing-tkn - - Name: Configuring tkn - File: op-configuring-tkn - - Name: Basic tkn commands - File: op-tkn-reference -- Name: opm CLI - Dir: opm - Distros: openshift-dedicated - Topics: - - Name: Installing the opm CLI - File: cli-opm-install - - Name: opm CLI reference - File: cli-opm-ref -- Name: Operator SDK - Dir: osdk - Distros: openshift-dedicated - Topics: - - Name: Installing the Operator SDK CLI - File: cli-osdk-install - - Name: Operator SDK CLI reference - File: cli-osdk-ref ---- -Name: Cluster administration -Dir: osd_cluster_admin -Distros: openshift-dedicated -Topics: -- Name: Cluster notifications - File: osd-cluster-notifications - Distros: openshift-dedicated -- Name: Configuring private connections - Dir: osd_private_connections - Distros: openshift-dedicated - Topics: - - Name: Configuring private connections for AWS - File: aws-private-connections - - Name: Configuring a private cluster - File: private-cluster -- Name: Cluster autoscaling - File: osd-cluster-autoscaling -- Name: Nodes - Dir: osd_nodes - Distros: openshift-dedicated - Topics: - - Name: About machine pools - File: osd-nodes-machinepools-about - - Name: Managing compute nodes - File: osd-managing-worker-nodes - - Name: About autoscaling nodes on a cluster - File: osd-nodes-about-autoscaling-nodes ---- -Name: Security and compliance -Dir: security -Distros: openshift-dedicated -Topics: -- Name: Viewing audit logs - File: audit-log-view -# - Name: Required allowlist IP addresses for SRE cluster access -# File: rh-required-whitelisted-IP-addresses-for-sre-access ---- -Name: Authentication and authorization -Dir: authentication -Distros: openshift-dedicated -Topics: -- Name: Authentication and authorization overview - File: index -- Name: Understanding authentication - File: understanding-authentication -# - Name: Configuring the internal OAuth server -# File: configuring-internal-oauth -# - Name: Configuring OAuth clients -# File: configuring-oauth-clients -- Name: Managing user-owned OAuth access tokens - File: managing-oauth-access-tokens -# - Name: Understanding identity provider configuration -# File: understanding-identity-provider -- Name: Configuring identity providers - File: sd-configuring-identity-providers -- Name: Revoking privileges and access to an OpenShift Dedicated cluster - File: osd-revoking-cluster-privileges -# - Name: Configuring identity providers -# Dir: identity_providers -# Topics: -# - Name: Configuring an htpasswd identity provider -# File: configuring-htpasswd-identity-provider -# - Name: Configuring a Keystone identity provider -# File: configuring-keystone-identity-provider -# - Name: Configuring an LDAP identity provider -# File: configuring-ldap-identity-provider -# - Name: Configuring a basic authentication identity provider -# File: configuring-basic-authentication-identity-provider -# - Name: Configuring a request header identity provider -# File: configuring-request-header-identity-provider -# - Name: Configuring a GitHub or GitHub Enterprise identity provider -# File: configuring-github-identity-provider -# - Name: Configuring a GitLab identity provider -# File: configuring-gitlab-identity-provider -# - Name: Configuring a Google identity provider -# File: configuring-google-identity-provider -# - Name: Configuring an OpenID Connect identity provider -# File: configuring-oidc-identity-provider -- Name: Managing administration roles and users - File: osd-admin-roles -- Name: Using RBAC to define and apply permissions - File: using-rbac -# - Name: Removing the kubeadmin user -# File: remove-kubeadmin -#- Name: Configuring LDAP failover -# File: configuring-ldap-failover -- Name: Understanding and creating service accounts - File: understanding-and-creating-service-accounts -- Name: Using service accounts in applications - File: using-service-accounts-in-applications -- Name: Using a service account as an OAuth client - File: using-service-accounts-as-oauth-client -- Name: Scoping tokens - File: tokens-scoping -- Name: Using bound service account tokens - File: bound-service-account-tokens -- Name: Managing security context constraints - File: managing-security-context-constraints -- Name: Understanding and managing pod security admission - File: understanding-and-managing-pod-security-admission -# - Name: Impersonating the system:admin user -# File: impersonating-system-admin -- Name: Syncing LDAP groups - File: ldap-syncing -# - Name: Managing cloud provider credentials -# Dir: managing_cloud_provider_credentials -# Topics: -# - Name: About the Cloud Credential Operator -# File: about-cloud-credential-operator -# - Name: Mint mode -# File: cco-mode-mint -# - Name: Passthrough mode -# File: cco-mode-passthrough -# - Name: Manual mode with long-term credentials for components -# File: cco-mode-manual -# - Name: Manual mode with short-term credentials for components -# File: cco-short-term-creds ---- -Name: Upgrading -Dir: upgrading -Distros: openshift-dedicated -Topics: -- Name: Upgrading OpenShift Dedicated - File: osd-upgrades ---- -Name: CI/CD -Dir: cicd -Distros: openshift-dedicated -Topics: -- Name: CI/CD overview - Dir: overview - Topics: - - Name: About CI/CD - File: index -- Name: Builds using Shipwright - Dir: builds_using_shipwright - Topics: - - Name: Overview of Builds - File: overview-openshift-builds -- Name: Builds using BuildConfig - Dir: builds - Topics: - - Name: Understanding image builds - File: understanding-image-builds - - Name: Understanding build configurations - File: understanding-buildconfigs - - Name: Creating build inputs - File: creating-build-inputs - - Name: Managing build output - File: managing-build-output - - Name: Using build strategies - File: build-strategies -# - Name: Custom image builds with Buildah -# File: custom-builds-buildah - - Name: Performing and configuring basic builds - File: basic-build-operations - - Name: Triggering and modifying builds - File: triggering-builds-build-hooks - - Name: Performing advanced builds - File: advanced-build-operations - - Name: Using Red Hat subscriptions in builds - File: running-entitled-builds - # Dedicated-admin cannot secure builds by strategy - # - Name: Securing builds by strategy - # File: securing-builds-by-strategy - # Dedicated-admin cannot edit build configuration resources - # - Name: Build configuration resources - # File: build-configuration - - Name: Troubleshooting builds - File: troubleshooting-builds -# - Name: Setting up additional trusted certificate authorities for builds -# File: setting-up-trusted-ca -- Name: Pipelines - Dir: pipelines - Topics: - - Name: About OpenShift Pipelines - File: about-pipelines -- Name: GitOps - Dir: gitops - Topics: - - Name: About OpenShift GitOps - File: about-redhat-openshift-gitops -- Name: Jenkins - Dir: jenkins - Topics: - - Name: Configuring Jenkins images - File: images-other-jenkins - - Name: Jenkins agent - File: images-other-jenkins-agent - - Name: Migrating from Jenkins to OpenShift Pipelines - File: migrating-from-jenkins-to-openshift-pipelines - - Name: Important changes to OpenShift Jenkins images - File: important-changes-to-openshift-jenkins-images ---- -Name: Images -Dir: openshift_images -Distros: openshift-dedicated -Topics: -- Name: Overview of images - File: index -# replaced Configuring the Cluster Samples Operator name, cannot configure the operator -- Name: Overview of the Cluster Samples Operator - File: configuring-samples-operator - Distros: openshift-dedicated -- Name: Using the Cluster Samples Operator with an alternate registry - File: samples-operator-alt-registry - Distros: openshift-dedicated -- Name: Creating images - File: create-images -- Name: Managing images - Dir: managing_images - Topics: - - Name: Managing images overview - File: managing-images-overview - - Name: Tagging images - File: tagging-images - - Name: Image pull policy - File: image-pull-policy - - Name: Using image pull secrets - File: using-image-pull-secrets -- Name: Managing image streams - File: image-streams-manage - Distros: openshift-dedicated -- Name: Using image streams with Kubernetes resources - File: using-imagestreams-with-kube-resources - Distros: openshift-dedicated -- Name: Triggering updates on image stream changes - File: triggering-updates-on-imagestream-changes - Distros: openshift-dedicated -- Name: Image configuration resources - File: image-configuration - Distros: openshift-dedicated -- Name: Using images - Dir: using_images - Distros: openshift-dedicated - Topics: - - Name: Using images overview - File: using-images-overview - - Name: Source-to-image - File: using-s21-images - - Name: Customizing source-to-image images - File: customizing-s2i-images ---- -Name: Add-on services -Dir: adding_service_cluster -Distros: openshift-dedicated -Topics: -- Name: Adding services to a cluster - File: adding-service -- Name: Available services - File: available-services - Distros: openshift-dedicated ---- -Name: Storage -Dir: storage -Distros: openshift-dedicated -Topics: -- Name: Storage overview - File: index -- Name: Understanding ephemeral storage - File: understanding-ephemeral-storage -- Name: Understanding persistent storage - File: understanding-persistent-storage -- Name: Configuring persistent storage - Dir: persistent_storage - Topics: - - Name: Persistent storage using AWS Elastic Block Store - File: persistent-storage-aws - - Name: Persistent storage using GCE Persistent Disk - File: persistent-storage-gce -- Name: Using Container Storage Interface (CSI) - Dir: container_storage_interface - Topics: - - Name: Configuring CSI volumes - File: persistent-storage-csi - - Name: Managing the default storage class - File: persistent-storage-csi-sc-manage - - Name: AWS Elastic Block Store CSI Driver Operator - File: persistent-storage-csi-ebs - - Name: AWS Elastic File Service CSI Driver Operator - File: persistent-storage-csi-aws-efs - - Name: GCP PD CSI Driver Operator - File: persistent-storage-csi-gcp-pd - - Name: GCP Filestore CSI Driver Operator - File: persistent-storage-csi-google-cloud-file -- Name: Generic ephemeral volumes - File: generic-ephemeral-vols -- Name: Dynamic provisioning - File: dynamic-provisioning ---- -Name: Registry -Dir: registry -Distros: openshift-dedicated -Topics: -- Name: Registry overview - File: index -- Name: Image Registry Operator in OpenShift Dedicated - File: configuring-registry-operator -- Name: Accessing the registry - File: accessing-the-registry -# - Name: Exposing the registry -# File: securing-exposing-registry ---- -Name: Operators -Dir: operators -Distros: openshift-dedicated -Topics: -- Name: Operators overview - File: index -- Name: Understanding Operators - Dir: understanding - Topics: - - Name: What are Operators? - File: olm-what-operators-are - - Name: Packaging format - File: olm-packaging-format - - Name: Common terms - File: olm-common-terms - - Name: Operator Lifecycle Manager (OLM) - Dir: olm - Topics: - - Name: Concepts and resources - File: olm-understanding-olm - - Name: Architecture - File: olm-arch - - Name: Workflow - File: olm-workflow - - Name: Dependency resolution - File: olm-understanding-dependency-resolution - - Name: Operator groups - File: olm-understanding-operatorgroups - - Name: Multitenancy and Operator colocation - File: olm-colocation - - Name: Operator conditions - File: olm-operatorconditions - - Name: Metrics - File: olm-understanding-metrics - - Name: Webhooks - File: olm-webhooks - - Name: OperatorHub - File: olm-understanding-operatorhub - - Name: Red Hat-provided Operator catalogs - File: olm-rh-catalogs - - Name: Operators in multitenant clusters - File: olm-multitenancy - - Name: CRDs - Dir: crds - Topics: - - Name: Managing resources from CRDs - File: crd-managing-resources-from-crds -- Name: User tasks - Dir: user - Topics: - - Name: Creating applications from installed Operators - File: olm-creating-apps-from-installed-operators -- Name: Administrator tasks - Dir: admin - Topics: - - Name: Adding Operators to a cluster - File: olm-adding-operators-to-cluster - - Name: Updating installed Operators - File: olm-upgrading-operators - - Name: Deleting Operators from a cluster - File: olm-deleting-operators-from-cluster - - Name: Configuring proxy support - File: olm-configuring-proxy-support - - Name: Viewing Operator status - File: olm-status - - Name: Managing Operator conditions - File: olm-managing-operatorconditions - - Name: Managing custom catalogs - File: olm-managing-custom-catalogs - - Name: Catalog source pod scheduling - File: olm-cs-podsched - - Name: Troubleshooting Operator issues - File: olm-troubleshooting-operator-issues -- Name: Developing Operators - Dir: operator_sdk - Topics: - - Name: About the Operator SDK - File: osdk-about - - Name: Installing the Operator SDK CLI - File: osdk-installing-cli - - Name: Go-based Operators - Dir: golang - Topics: -# Quick start excluded, because it requires cluster-admin permissions. -# - Name: Getting started -# File: osdk-golang-quickstart - - Name: Tutorial - File: osdk-golang-tutorial - - Name: Project layout - File: osdk-golang-project-layout - - Name: Updating Go-based projects - File: osdk-golang-updating-projects - - Name: Ansible-based Operators - Dir: ansible - Topics: -# Quick start excluded, because it requires cluster-admin permissions. -# - Name: Getting started -# File: osdk-ansible-quickstart - - Name: Tutorial - File: osdk-ansible-tutorial - - Name: Project layout - File: osdk-ansible-project-layout - - Name: Updating Ansible-based projects - File: osdk-ansible-updating-projects - - Name: Ansible support - File: osdk-ansible-support - - Name: Kubernetes Collection for Ansible - File: osdk-ansible-k8s-collection - - Name: Using Ansible inside an Operator - File: osdk-ansible-inside-operator - - Name: Custom resource status management - File: osdk-ansible-cr-status - - Name: Helm-based Operators - Dir: helm - Topics: -# Quick start excluded, because it requires cluster-admin permissions. -# - Name: Getting started -# File: osdk-helm-quickstart - - Name: Tutorial - File: osdk-helm-tutorial - - Name: Project layout - File: osdk-helm-project-layout - - Name: Updating Helm-based projects - File: osdk-helm-updating-projects - - Name: Helm support - File: osdk-helm-support - - Name: Defining cluster service versions (CSVs) - File: osdk-generating-csvs - - Name: Working with bundle images - File: osdk-working-bundle-images - - Name: Complying with pod security admission - File: osdk-complying-with-psa - - Name: Validating Operators using the scorecard - File: osdk-scorecard - - Name: Validating Operator bundles - File: osdk-bundle-validate - - Name: High-availability or single-node cluster detection and support - File: osdk-ha-sno - - Name: Configuring built-in monitoring with Prometheus - File: osdk-monitoring-prometheus - - Name: Configuring leader election - File: osdk-leader-election - - Name: Object pruning utility - File: osdk-pruning-utility - - Name: Migrating package manifest projects to bundle format - File: osdk-pkgman-to-bundle - - Name: Operator SDK CLI reference - File: osdk-cli-ref - - Name: Migrating to Operator SDK v0.1.0 - File: osdk-migrating-to-v0-1-0 -# ROSA customers can't configure/edit the cluster Operators -# - Name: Cluster Operators reference -# File: operator-reference ---- -Name: Networking -Dir: networking -Distros: openshift-dedicated -Topics: -- Name: About networking - File: about-managed-networking -- Name: Networking Operators - Dir: networking_operators - Distros: openshift-dedicated - Topics: - - Name: DNS Operator in OpenShift Dedicated - File: dns-operator - - Name: Ingress Operator in OpenShift Dedicated - File: ingress-operator -- Name: Network verification - File: network-verification -- Name: Configuring a cluster-wide proxy during installation - File: configuring-cluster-wide-proxy -- Name: CIDR range definitions - File: cidr-range-definitions -- Name: Network security - Dir: network_security - Distros: openshift-dedicated - Topics: - - Name: Understanding network policy APIs - File: network-policy-apis - - Name: Network policy - Dir: network_policy - Distros: openshift-dedicated - Topics: - - Name: About network policy - File: about-network-policy - - Name: Creating a network policy - File: creating-network-policy - - Name: Viewing a network policy - File: viewing-network-policy - - Name: Deleting a network policy - File: deleting-network-policy - - Name: Configuring multitenant isolation with network policy - File: multitenant-network-policy -- Name: OVN-Kubernetes network plugin - Dir: ovn_kubernetes_network_provider - Topics: - - Name: About the OVN-Kubernetes network plugin - File: about-ovn-kubernetes - - Name: Migrating from the OpenShift SDN network plugin - File: migrate-from-openshift-sdn-osd - # - Name: Migrating from the OpenShift SDN network plugin - # File: migrate-from-openshift-sdn -- Name: OpenShift SDN network plugin - Dir: ovn_kubernetes_network_provider - Topics: - - Name: Enabling multicast for a project - File: enabling-multicast -- Name: Configuring Routes - Dir: routes - Topics: - - Name: Route configuration - File: route-configuration - - Name: Secured routes - File: secured-routes ---- -Name: Building applications -Dir: applications -Distros: openshift-dedicated -Topics: -- Name: Building applications overview - File: index -- Name: Projects - Dir: projects - Topics: - - Name: Working with projects - File: working-with-projects -# cannot impersonate resource "users" in API group -# - Name: Creating a project as another user -# File: creating-project-other-user - - Name: Configuring project creation - File: configuring-project-creation -- Name: Creating applications - Dir: creating_applications - Topics: - - Name: Using templates - File: using-templates - - Name: Creating applications using the Developer perspective - File: odc-creating-applications-using-developer-perspective - - Name: Creating applications from installed Operators - File: creating-apps-from-installed-operators - - Name: Creating applications using the CLI - File: creating-applications-using-cli - - Name: Creating applications using Ruby on Rails - File: templates-using-ruby-on-rails -- Name: Viewing application composition using the Topology view - File: odc-viewing-application-composition-using-topology-view -# cannot create required namespace -# - Name: Exporting applications -# File: odc-exporting-applications -- Name: Working with Helm charts - Dir: working_with_helm_charts - Topics: - - Name: Understanding Helm - File: understanding-helm - - Name: Installing Helm - File: installing-helm - - Name: Configuring custom Helm chart repositories - File: configuring-custom-helm-chart-repositories - - Name: Working with Helm releases - File: odc-working-with-helm-releases -- Name: Deployments - Dir: deployments - Topics: - - Name: Custom domains for applications - File: osd-config-custom-domains-applications - - Name: Understanding Deployments and DeploymentConfigs - File: what-deployments-are - - Name: Managing deployment processes - File: managing-deployment-processes - - Name: Using deployment strategies - File: deployment-strategies - - Name: Using route-based deployment strategies - File: route-based-deployment-strategies -- Name: Quotas - Dir: quotas - Topics: - - Name: Resource quotas per project - File: quotas-setting-per-project - - Name: Resource quotas across multiple projects - File: quotas-setting-across-multiple-projects -- Name: Using config maps with applications - File: config-maps -- Name: Monitoring project and application metrics using the Developer perspective - File: odc-monitoring-project-and-application-metrics-using-developer-perspective -- Name: Monitoring application health - File: application-health -- Name: Editing applications - File: odc-editing-applications -- Name: Working with quotas - File: working-with-quotas -- Name: Pruning objects to reclaim resources - File: pruning-objects -- Name: Idling applications - File: idling-applications -- Name: Deleting applications - File: odc-deleting-applications -- Name: Using the Red Hat Marketplace - File: red-hat-marketplace ---- -Name: Nodes -Dir: nodes -Distros: openshift-dedicated -Topics: -- Name: Overview of nodes - File: index -- Name: Working with pods - Dir: pods - Topics: - - Name: About pods - File: nodes-pods-using - - Name: Viewing pods - File: nodes-pods-viewing - - Name: Configuring a cluster for pods - File: nodes-pods-configuring - Distros: openshift-dedicated -# Cannot create namespace to install VPA; revisit after Operator book converted -# - Name: Automatically adjust pod resource levels with the vertical pod autoscaler -# File: nodes-pods-vertical-autoscaler - - Name: Providing sensitive data to pods - File: nodes-pods-secrets - - Name: Creating and using config maps - File: nodes-pods-configmaps -# Cannot create required "kubeletconfigs" -# - Name: Using Device Manager to make devices available to nodes -# File: nodes-pods-plugins -# Distros: openshift-dedicated - - Name: Including pod priority in pod scheduling decisions - File: nodes-pods-priority - Distros: openshift-dedicated - - Name: Placing pods on specific nodes using node selectors - File: nodes-pods-node-selectors - Distros: openshift-dedicated -# Cannot create namespace to install Run Once; revisit after Operator book converted -# - Name: Run Once Duration Override Operator -# Dir: run_once_duration_override -# Distros: openshift-dedicated -# Topics: -# - Name: Run Once Duration Override Operator overview -# File: index -# - Name: Run Once Duration Override Operator release notes -# File: run-once-duration-override-release-notes -# - Name: Overriding the active deadline for run-once pods -# File: run-once-duration-override-install -# - Name: Uninstalling the Run Once Duration Override Operator -# File: run-once-duration-override-uninstall -- Name: Automatically scaling pods with the Custom Metrics Autoscaler Operator - Dir: cma - Distros: openshift-dedicated - Topics: - - Name: Release notes - Dir: nodes-cma-rn - Topics: - - Name: Custom Metrics Autoscaler Operator release notes - File: nodes-cma-autoscaling-custom-rn - - Name: Past releases - File: nodes-cma-autoscaling-custom-rn-past - - Name: Custom Metrics Autoscaler Operator overview - File: nodes-cma-autoscaling-custom - - Name: Installing the custom metrics autoscaler - File: nodes-cma-autoscaling-custom-install - - Name: Understanding the custom metrics autoscaler triggers - File: nodes-cma-autoscaling-custom-trigger - - Name: Understanding the custom metrics autoscaler trigger authentications - File: nodes-cma-autoscaling-custom-trigger-auth - - Name: Pausing the custom metrics autoscaler - File: nodes-cma-autoscaling-custom-pausing - - Name: Gathering audit logs - File: nodes-cma-autoscaling-custom-audit-log - - Name: Gathering debugging data - File: nodes-cma-autoscaling-custom-debugging - - Name: Viewing Operator metrics - File: nodes-cma-autoscaling-custom-metrics - - Name: Understanding how to add custom metrics autoscalers - File: nodes-cma-autoscaling-custom-adding - - Name: Removing the Custom Metrics Autoscaler Operator - File: nodes-cma-autoscaling-custom-removing -- Name: Controlling pod placement onto nodes (scheduling) - Dir: scheduling - Distros: openshift-dedicated - Topics: - - Name: About pod placement using the scheduler - File: nodes-scheduler-about - - Name: Placing pods relative to other pods using pod affinity and anti-affinity rules - File: nodes-scheduler-pod-affinity - - Name: Controlling pod placement on nodes using node affinity rules - File: nodes-scheduler-node-affinity - - Name: Placing pods onto overcommited nodes - File: nodes-scheduler-overcommit -# Per OSDOCS-9791, OSD customers cannot add taints to individual nodes. -# - Name: Controlling pod placement using node taints -# File: nodes-scheduler-taints-tolerations - - Name: Placing pods on specific nodes using node selectors - File: nodes-scheduler-node-selectors - - Name: Controlling pod placement using pod topology spread constraints - File: nodes-scheduler-pod-topology-spread-constraints -# - Name: Placing a pod on a specific node by name -# File: nodes-scheduler-node-names -# - Name: Placing a pod in a specific project -# File: nodes-scheduler-node-projects -# - Name: Keeping your cluster balanced using the descheduler -# File: nodes-scheduler-descheduler -# Cannot create namespace to install Desceduler Operator; revisit after Operator book converted -# - Name: Descheduler -# Dir: descheduler -# Topics: -# - Name: Descheduler overview -# File: index -# - Name: Descheduler release notes -# File: nodes-descheduler-release-notes -# - Name: Evicting pods using the descheduler -# File: nodes-descheduler-configuring -# - Name: Uninstalling the descheduler -# File: nodes-descheduler-uninstalling -# Cannot create namespace to install Secondary Scheduler Operator; revisit after Operator book converted -# - Name: Secondary scheduler -# Dir: secondary_scheduler -# Distros: openshift-dedicated -# Topics: -# - Name: Secondary scheduler overview -# File: index -# - Name: Secondary Scheduler Operator release notes -# File: nodes-secondary-scheduler-release-notes -# - Name: Scheduling pods using a secondary scheduler -# File: nodes-secondary-scheduler-configuring -# - Name: Uninstalling the Secondary Scheduler Operator -# File: nodes-secondary-scheduler-uninstalling -- Name: Using jobs and daemon sets - Dir: jobs - Topics: - - Name: Running background tasks on nodes automatically with daemon sets - File: nodes-pods-daemonsets - Distros: openshift-dedicated - - Name: Running tasks in pods using jobs - File: nodes-nodes-jobs -- Name: Working with nodes - Dir: nodes - Distros: openshift-dedicated - Topics: - - Name: Viewing and listing the nodes in your cluster - File: nodes-nodes-viewing -# cannot use oc adm cordon; cannot patch resource "machinesets"; cannot patch resource "nodes" -# - Name: Working with nodes -# File: nodes-nodes-working -# cannot create resource "kubeletconfigs", "schedulers", "machineconfigs", "kubeletconfigs" -# - Name: Managing nodes -# File: nodes-nodes-managing -# cannot create resource "kubeletconfigs" -# - Name: Managing graceful node shutdown -# File: nodes-nodes-graceful-shutdown -# cannot create resource "kubeletconfigs" -# - Name: Managing the maximum number of pods per node -# File: nodes-nodes-managing-max-pods - - Name: Using the Node Tuning Operator - File: nodes-node-tuning-operator - - Name: Remediating, fencing, and maintaining nodes - File: nodes-remediating-fencing-maintaining-rhwa -# Cannot create namespace needed to oc debug and reboot; revisit after Operator book converted -# - Name: Understanding node rebooting -# File: nodes-nodes-rebooting -# cannot create resource "kubeletconfigs" -# - Name: Freeing node resources using garbage collection -# File: nodes-nodes-garbage-collection -# cannot create resource "kubeletconfigs" -# - Name: Allocating resources for nodes -# File: nodes-nodes-resources-configuring -# cannot create resource "kubeletconfigs" -# - Name: Allocating specific CPUs for nodes in a cluster -# File: nodes-nodes-resources-cpus -# cannot create resource "kubeletconfigs" -# - Name: Configuring the TLS security profile for the kubelet -# File: nodes-nodes-tls -# Distros: openshift-dedicated -# - Name: Monitoring for problems in your nodes -# File: nodes-nodes-problem-detector -# cannot patch resource "nodes" -# - Name: Creating infrastructure nodes -# File: nodes-nodes-creating-infrastructure-nodes -- Name: Working with containers - Dir: containers - Topics: - - Name: Understanding containers - File: nodes-containers-using - - Name: Using Init Containers to perform tasks before a pod is deployed - File: nodes-containers-init - Distros: openshift-dedicated - - Name: Using volumes to persist container data - File: nodes-containers-volumes - - Name: Mapping volumes using projected volumes - File: nodes-containers-projected-volumes - - Name: Allowing containers to consume API objects - File: nodes-containers-downward-api - - Name: Copying files to or from a container - File: nodes-containers-copying-files - - Name: Executing remote commands in a container - File: nodes-containers-remote-commands - - Name: Using port forwarding to access applications in a container - File: nodes-containers-port-forwarding -# cannot patch resource "configmaps" -# - Name: Using sysctls in containers -# File: nodes-containers-sysctls -- Name: Working with clusters - Dir: clusters - Topics: - - Name: Viewing system event information in a cluster - File: nodes-containers-events - - Name: Analyzing cluster resource levels - File: nodes-cluster-resource-levels - Distros: openshift-dedicated - - Name: Setting limit ranges - File: nodes-cluster-limit-ranges - - Name: Configuring cluster memory to meet container memory and risk requirements - File: nodes-cluster-resource-configure - Distros: openshift-dedicated - - Name: Configuring your cluster to place pods on overcommited nodes - File: nodes-cluster-overcommit - Distros: openshift-dedicated - - Name: Configuring the Linux cgroup version on your nodes - File: nodes-cluster-cgroups-2 - Distros: openshift-enterprise - - Name: Configuring the Linux cgroup version on your nodes - File: nodes-cluster-cgroups-okd - Distros: openshift-origin -# The TechPreviewNoUpgrade Feature Gate is not allowed -# - Name: Enabling features using FeatureGates -# File: nodes-cluster-enabling-features -# Distros: openshift-rosa -# Error: nodes.config.openshift.io "cluster" could not be patched -# - Name: Improving cluster stability in high latency environments using worker latency profiles -# File: nodes-cluster-worker-latency-profiles -# Not supported per Michael McNeill -#- Name: Remote worker nodes on the network edge -# Dir: edge -# Topics: -# - Name: Using remote worker node at the network edge -# File: nodes-edge-remote-workers -# Not supported per Michael McNeill -#- Name: Worker nodes for single-node OpenShift clusters -# Dir: nodes -# Topics: -# - Name: Adding worker nodes to single-node OpenShift clusters -# File: nodes-sno-worker-nodes ---- -Name: Observability -Dir: observability -Distros: openshift-dedicated -Topics: -- Name: Observability overview - Dir: overview - Topics: - - Name: About Observability - File: index -- Name: Monitoring - Dir: monitoring - Distros: openshift-dedicated - Topics: - - Name: Monitoring overview - File: monitoring-overview - - Name: Accessing monitoring for user-defined projects - File: sd-accessing-monitoring-for-user-defined-projects - - Name: Configuring the monitoring stack - File: configuring-the-monitoring-stack - - Name: Disabling monitoring for user-defined projects - File: sd-disabling-monitoring-for-user-defined-projects - - Name: Enabling alert routing for user-defined projects - File: enabling-alert-routing-for-user-defined-projects - - Name: Managing metrics - File: managing-metrics - - Name: Managing alerts - File: managing-alerts - - Name: Reviewing monitoring dashboards - File: reviewing-monitoring-dashboards - - Name: Accessing third-party monitoring APIs - File: accessing-third-party-monitoring-apis - - Name: Troubleshooting monitoring issues - File: troubleshooting-monitoring-issues - - Name: Config map reference for the Cluster Monitoring Operator - File: config-map-reference-for-the-cluster-monitoring-operator -#- Name: Logging -# Dir: logging -# Distros: openshift-dedicated -# Topics: -# - Name: Release notes -# Dir: logging_release_notes -# Topics: -# - Name: Logging 5.9 -# File: logging-5-9-release-notes -# - Name: Logging 5.8 -# File: logging-5-8-release-notes -# - Name: Logging 5.7 -# File: logging-5-7-release-notes -# - Name: Support -# File: cluster-logging-support -# - Name: Troubleshooting logging -# Dir: troubleshooting -# Topics: -# - Name: Viewing Logging status -# File: cluster-logging-cluster-status -# - Name: Troubleshooting log forwarding -# File: log-forwarding-troubleshooting -# - Name: Troubleshooting logging alerts -# File: troubleshooting-logging-alerts -# - Name: Viewing the status of the Elasticsearch log store -# File: cluster-logging-log-store-status -# - Name: About Logging -# File: cluster-logging -# - Name: Installing Logging -# File: cluster-logging-deploying -# - Name: Updating Logging -# File: cluster-logging-upgrading -# - Name: Visualizing logs -# Dir: log_visualization -# Topics: -# - Name: About log visualization -# File: log-visualization -# - Name: Log visualization with the web console -# File: log-visualization-ocp-console -# - Name: Viewing cluster dashboards -# File: cluster-logging-dashboards -# - Name: Log visualization with Kibana -# File: logging-kibana -# - Name: Configuring your Logging deployment -# Dir: config -# Topics: -# - Name: Configuring CPU and memory limits for Logging components -# File: cluster-logging-memory -# #- Name: Configuring systemd-journald and Fluentd -# # File: cluster-logging-systemd -# Dir: log_collection_forwarding -# Topics: -# - Name: About log collection and forwarding -# File: log-forwarding -# - Name: Log output types -# - Name: Enabling JSON log forwarding -# File: cluster-logging-enabling-json-logging -# - Name: Configuring log forwarding -# File: configuring-log-forwarding -# - Name: Configuring the logging collector -# File: cluster-logging-collector -# - Name: Collecting and storing Kubernetes events -# File: cluster-logging-eventrouter -# - Name: Log storage -# Dir: log_storage -# Topics: -# - Name: About log storage -# File: about-log-storage -# - Name: Installing log storage -# File: installing-log-storage -# - Name: Configuring the LokiStack log store -# File: cluster-logging-loki -# - Name: Configuring the Elasticsearch log store -# File: logging-config-es-store -# - Name: Logging alerts -# Dir: logging_alerts -# Topics: -# - Name: Default logging alerts -# File: default-logging-alerts -# - Name: Custom logging alerts -# File: custom-logging-alerts -# - Name: Performance and reliability tuning -# Dir: performance_reliability -# Topics: -# - Name: Flow control mechanisms -# File: logging-flow-control-mechanisms -# - Name: Filtering logs by content -# File: logging-content-filtering -# - Name: Filtering logs by metadata -# File: logging-input-spec-filtering -# - Name: Scheduling resources -# Dir: scheduling_resources -# Topics: -# - Name: Using node selectors to move logging resources -# File: logging-node-selectors -# - Name: Using tolerations to control logging pod placement -# File: logging-taints-tolerations -# - Name: Uninstalling Logging -# File: cluster-logging-uninstall -# - Name: Exported fields -# File: cluster-logging-exported-fields -# - Name: API reference -# Dir: api_reference -# Topics: - # - Name: 5.8 Logging API reference - # File: logging-5-8-reference - # - Name: 5.7 Logging API reference - # File: logging-5-7-reference -# - Name: 5.6 Logging API reference -# File: logging-5-6-reference -# - Name: Glossary -# File: logging-common-terms ---- -Name: Service Mesh -Dir: service_mesh -Distros: openshift-dedicated -Topics: -- Name: Service Mesh 2.x - Dir: v2x - Topics: - - Name: About OpenShift Service Mesh - File: ossm-about - - Name: Service Mesh 2.x release notes - File: servicemesh-release-notes - - Name: Service Mesh architecture - File: ossm-architecture - - Name: Service Mesh deployment models - File: ossm-deployment-models - - Name: Service Mesh and Istio differences - File: ossm-vs-community - - Name: Preparing to install Service Mesh - File: preparing-ossm-installation - - Name: Installing the Operators - File: installing-ossm - - Name: Creating the ServiceMeshControlPlane - File: ossm-create-smcp - - Name: Adding workloads to a service mesh - File: ossm-create-mesh - - Name: Enabling sidecar injection - File: prepare-to-deploy-applications-ossm - - Name: Upgrading Service Mesh - File: upgrading-ossm - - Name: Managing users and profiles - File: ossm-profiles-users - - Name: Security - File: ossm-security - - Name: Traffic management - File: ossm-traffic-manage - - Name: Metrics, logs, and traces - File: ossm-observability - - Name: Performance and scalability - File: ossm-performance-scalability - - Name: Deploying to production - File: ossm-deploy-production - - Name: Federation - File: ossm-federation - - Name: Extensions - File: ossm-extensions - - Name: 3scale WebAssembly for 2.1 - File: ossm-threescale-webassembly-module - - Name: 3scale Istio adapter for 2.0 - File: threescale-adapter - - Name: Troubleshooting Service Mesh - File: ossm-troubleshooting-istio - - Name: Control plane configuration reference - File: ossm-reference-smcp - - Name: Kiali configuration reference - File: ossm-reference-kiali - - Name: Jaeger configuration reference - File: ossm-reference-jaeger - - Name: Uninstalling Service Mesh - File: removing-ossm -# Service Mesh 1.x is tech preview -# - Name: Service Mesh 1.x -# Dir: v1x -# Topics: -# - Name: Service Mesh 1.x release notes -# File: servicemesh-release-notes -# - Name: Service Mesh architecture -# File: ossm-architecture -# - Name: Service Mesh and Istio differences -# File: ossm-vs-community -# - Name: Preparing to install Service Mesh -# File: preparing-ossm-installation -# - Name: Installing Service Mesh -# File: installing-ossm -# - Name: Security -# File: ossm-security -# - Name: Traffic management -# File: ossm-traffic-manage -# - Name: Deploying applications on Service Mesh -# File: prepare-to-deploy-applications-ossm -# - Name: Data visualization and observability -# File: ossm-observability -# - Name: Custom resources -# File: ossm-custom-resources -# - Name: 3scale Istio adapter for 1.x -# File: threescale-adapter -# - Name: Removing Service Mesh -# File: removing-ossm ---- -Name: Serverless -Dir: serverless -Distros: openshift-dedicated -Topics: -- Name: About Serverless - Dir: about - Topics: - - Name: Serverless overview - File: about-serverless -# --- -# Name: Virtualization -# Dir: virt -# Distros: openshift-dedicated -# Topics: -# - Name: About -# Dir: about_virt -# Topics: -# - Name: About OpenShift Virtualization -# File: about-virt -# Distros: openshift-dedicated -# - Name: About OKD Virtualization -# File: about-virt -# Distros: openshift-origin -# - Name: Security policies -# File: virt-security-policies -# - Name: Architecture -# File: virt-architecture -# Distros: openshift-dedicated -# #- Name: Release notes -# # Dir: release_notes -# # Topics: -# # - Name: OpenShift Virtualization release notes -# # File: virt-release-notes-placeholder -# # Distros: openshift-rosa -# - Name: Getting started -# Dir: getting_started -# Topics: -# - Name: Getting started with OpenShift Virtualization -# File: virt-getting-started -# Distros: openshift-dedicated -# - Name: Getting started with OKD Virtualization -# File: virt-getting-started -# Distros: openshift-origin -# - Name: virtctl and libguestfs -# File: virt-using-the-cli-tools -# Distros: openshift-dedicated -# - Name: Installing -# Dir: install -# Topics: -# - Name: Preparing your cluster -# File: preparing-cluster-for-virt -# - Name: Installing OpenShift Virtualization -# File: installing-virt -# - Name: Uninstalling OpenShift Virtualization -# File: uninstalling-virt -# - Name: Post-installation configuration -# Dir: post_installation_configuration -# Topics: -# - Name: Post-installation configuration -# File: virt-post-install-config -# - Name: Node placement rules -# File: virt-node-placement-virt-components -# - Name: Network configuration -# File: virt-post-install-network-config -# - Name: Storage configuration -# File: virt-post-install-storage-config -# - Name: Updating -# Dir: updating -# Topics: -# - Name: Updating OpenShift Virtualization -# File: upgrading-virt -# Distros: openshift-dedicated -# - Name: Virtual machines -# Dir: virtual_machines -# Topics: -# - Name: Creating a virtual machine -# Dir: creating_vm -# Topics: -# - Name: Overview -# File: virt-vm-overview -# - Name: Setting up your environment -# File: virt-setting-up-environment -# - Name: Creating VMs from instance types -# File: virt-creating-vms-from-instance-types -# - Name: Creating VMs from templates -# File: virt-creating-vms-from-templates -# - Name: Advanced VM creation -# Dir: creating_vms_advanced -# Topics: -# - Name: Overview -# File: virt-advanced-vm-overview -# - Name: Creating VMs in the web console -# Dir: creating_vms_advanced_web -# Topics: -# - Name: Creating VMs from Red Hat images -# File: virt-creating-vms-from-rh-images-overview -# - Name: Creating VMs by importing images from web pages -# File: virt-creating-vms-from-web-images -# - Name: Creating VMs by uploading images -# File: virt-creating-vms-uploading-images -# - Name: Cloning VMs -# File: virt-cloning-vms -# - Name: Creating VMs using the CLI -# Dir: creating_vms_cli -# Topics: -# - Name: Creating VMs using a VirtualMachine manifest -# File: virt-create-vm-manifest-virtctl -# - Name: Creating VMs by using container disks -# File: virt-creating-vms-from-container-disks -# - Name: Creating VMs by cloning PVCs -# File: virt-creating-vms-by-cloning-pvcs -#- Name: Managing VMs -# Dir: managing_vms -# Topics: -# - Name: Installing the QEMU guest agent and VirtIO drivers -# File: virt-installing-qemu-guest-agent -# - Name: Connecting to VM consoles -# File: virt-accessing-vm-consoles -# - Name: Configuring SSH access to VMs -# File: virt-accessing-vm-ssh -# - Name: Editing virtual machines -# File: virt-edit-vms -# - Name: Editing boot order -# File: virt-edit-boot-order -# - Name: Deleting virtual machines -# File: virt-delete-vms -# - Name: Exporting virtual machines -# File: virt-exporting-vms -# - Name: Managing virtual machine instances -# File: virt-manage-vmis -# - Name: Controlling virtual machine states -# File: virt-controlling-vm-states -# - Name: Using virtual Trusted Platform Module devices -# File: virt-using-vtpm-devices -# - Name: Managing virtual machines with OpenShift Pipelines -# File: virt-managing-vms-openshift-pipelines -# - Name: Advanced virtual machine management -# Dir: advanced_vm_management -# Topics: -# - Name: Working with resource quotas for virtual machines -# File: virt-working-with-resource-quotas-for-vms -# - Name: Specifying nodes for virtual machines -# File: virt-specifying-nodes-for-vms -# - Name: Configuring the default CPU model -# File: virt-configuring-default-cpu-model -# - Name: UEFI mode for virtual machines -# File: virt-uefi-mode-for-vms -# - Name: Configuring PXE booting for virtual machines -# File: virt-configuring-pxe-booting -# - Name: Using huge pages with virtual machines -# File: virt-using-huge-pages-with-vms -# - Name: Enabling dedicated resources for a virtual machine -# File: virt-dedicated-resources-vm -# - Name: Scheduling virtual machines -# File: virt-schedule-vms -# - Name: Configuring PCI passthrough -# File: virt-configuring-pci-passthrough -# - Name: Configuring virtual GPUs -# File: virt-configuring-virtual-gpus -# - Name: Enabling descheduler evictions on virtual machines -# File: virt-enabling-descheduler-evictions -# - Name: About high availability for virtual machines -# File: virt-high-availability-for-vms -# - Name: Control plane tuning -# File: virt-vm-control-plane-tuning -# - Name: Assigning compute resources -# File: virt-assigning-compute-resources -# - Name: About multi-queue functionality -# File: virt-about-multi-queue -# - Name: VM disks -# Dir: virtual_disks -# Topics: -# - Name: Hot-plugging VM disks -# File: virt-hot-plugging-virtual-disks -# - Name: Expanding VM disks -# File: virt-expanding-vm-disks -# - Name: Configuring shared volumes -# File: virt-configuring-shared-volumes-for-vms -# - Name: Networking -# Dir: vm_networking -# Topics: -# - Name: Networking configuration overview -# File: virt-networking-overview -# - Name: Connecting a VM to the default pod network -# File: virt-connecting-vm-to-default-pod-network -# - Name: Exposing a VM by using a service -# File: virt-exposing-vm-with-service -# # Not supported in ROSA/OSD -# # - Name: Connecting a VM to a Linux bridge network -# # File: virt-connecting-vm-to-linux-bridge -# # - Name: Connecting a VM to an SR-IOV network -# # File: virt-connecting-vm-to-sriov -# # - Name: Using DPDK with SR-IOV -# # File: virt-using-dpdk-with-sriov -# - Name: Connecting a VM to an OVN-Kubernetes secondary network -# File: virt-connecting-vm-to-ovn-secondary-network -# # Tecp preview not supported ROSA/OSD -# # - Name: Hot plugging secondary network interfaces -# # File: virt-hot-plugging-network-interfaces -# - Name: Connecting a VM to a service mesh -# File: virt-connecting-vm-to-service-mesh -# - Name: Configuring a dedicated network for live migration -# File: virt-dedicated-network-live-migration -# - Name: Configuring and viewing IP addresses -# File: virt-configuring-viewing-ips-for-vms -# # Tech Preview features not supported in ROSA/OSD -# # - Name: Accessing a VM by using the cluster FQDN -# # File: virt-accessing-vm-secondary-network-fqdn -# - Name: Managing MAC address pools for network interfaces -# File: virt-using-mac-address-pool-for-vms -# - Name: Storage -# Dir: storage -# Topics: -# - Name: Storage configuration overview -# File: virt-storage-config-overview -# - Name: Configuring storage profiles -# File: virt-configuring-storage-profile -# - Name: Managing automatic boot source updates -# File: virt-automatic-bootsource-updates -# - Name: Reserving PVC space for file system overhead -# File: virt-reserving-pvc-space-fs-overhead -# - Name: Configuring local storage by using HPP -# File: virt-configuring-local-storage-with-hpp -# - Name: Enabling user permissions to clone data volumes across namespaces -# File: virt-enabling-user-permissions-to-clone-datavolumes -# - Name: Configuring CDI to override CPU and memory quotas -# File: virt-configuring-cdi-for-namespace-resourcequota -# - Name: Preparing CDI scratch space -# File: virt-preparing-cdi-scratch-space -# - Name: Using preallocation for data volumes -# File: virt-using-preallocation-for-datavolumes -# - Name: Managing data volume annotations -# File: virt-managing-data-volume-annotations -# # Virtual machine live migration -# - Name: Live migration -# Dir: live_migration -# Topics: -# - Name: About live migration -# File: virt-about-live-migration -# - Name: Configuring live migration -# File: virt-configuring-live-migration -# - Name: Initiating and canceling live migration -# File: virt-initiating-live-migration -# # Node maintenance mode -# - Name: Nodes -# Dir: nodes -# Topics: -# - Name: Node maintenance -# File: virt-node-maintenance -# - Name: Managing node labeling for obsolete CPU models -# File: virt-managing-node-labeling-obsolete-cpu-models -# - Name: Preventing node reconciliation -# File: virt-preventing-node-reconciliation -# # Hiding in ROSA/OSD as user cannot cordon and drain nodes -# # - Name: Deleting a failed node to trigger VM failover -# # File: virt-triggering-vm-failover-resolving-failed-node -# - Name: Monitoring -# Dir: monitoring -# Topics: -# - Name: Monitoring overview -# File: virt-monitoring-overview -# # Hiding in ROSA/OSD as TP not supported -# # - Name: Cluster checkup framework -# # File: virt-running-cluster-checkups -# - Name: Prometheus queries for virtual resources -# File: virt-prometheus-queries -# - Name: Virtual machine custom metrics -# File: virt-exposing-custom-metrics-for-vms -# - Name: Virtual machine health checks -# File: virt-monitoring-vm-health -# - Name: Runbooks -# File: virt-runbooks -# - Name: Support -# Dir: support -# Topics: -# - Name: Support overview -# File: virt-support-overview -# - Name: Collecting data for Red Hat Support -# File: virt-collecting-virt-data -# Distros: openshift-dedicated -# - Name: Troubleshooting -# File: virt-troubleshooting -# - Name: Backup and restore -# Dir: backup_restore -# Topics: -# - Name: Backup and restore by using VM snapshots -# File: virt-backup-restore-snapshots -# - Name: Backing up and restoring virtual machines -# File: virt-backup-restore-overview -# # - Name: Collecting OKD Virtualization data for community report -# # File: virt-collecting-virt-data -# # Distros: openshift-origin diff --git a/_topic_maps/_topic_map_rosa.yml b/_topic_maps/_topic_map_rosa.yml deleted file mode 100644 index 2496b3fcb454..000000000000 --- a/_topic_maps/_topic_map_rosa.yml +++ /dev/null @@ -1,2115 +0,0 @@ -# This configuration file dictates the organization of the topic groups and -# topics on the main page of the doc site for this branch. Each record -# consists of the following: -# -# --- <= Record delimiter -# Name: Origin of the Species <= Display name of topic group -# Dir: origin_of_the_species <= Directory name of topic group -# Topics: -# - Name: The Majestic Marmoset <= Topic name -# File: the_majestic_marmoset <= Topic file under group dir +/- -# - Name: The Curious Crocodile <= Topic 2 name -# File: the_curious_crocodile <= Topic 2 file -# - Name: The Numerous Nematodes <= Sub-topic group name -# Dir: the_numerous_nematodes <= Sub-topic group dir -# Topics: -# - Name: The Wily Worm <= Sub-topic name -# File: the_wily_worm <= Sub-topic file under / -# - Name: The Acrobatic Ascarid <= Sub-topic 2 name -# File: the_acrobatic_ascarid <= Sub-topic 2 file under / -# -# The ordering of the records in this document determines the ordering of the -# topic groups and topics on the main page. - ---- -Name: About -Dir: welcome -Distros: openshift-rosa -Topics: -- Name: Welcome - File: index -- Name: Learn more about ROSA with HCP - File: about-hcp -- Name: AWS STS and ROSA with HCP explained - File: cloud-experts-rosa-hcp-sts-explained -- Name: Legal notice - File: legal-notice - Distros: openshift-rosa ---- -Name: What's new -Dir: rosa_release_notes -Distros: openshift-rosa -Topics: -- Name: What's new with Red Hat OpenShift Service on AWS - File: rosa-release-notes ---- -Name: Introduction to ROSA -Dir: rosa_architecture -Distros: openshift-rosa -Topics: -- Name: Understanding ROSA - File: rosa-understanding -- Name: Policies and service definition - Dir: rosa_policy_service_definition - Distros: openshift-rosa - Topics: - - Name: About availability for ROSA - File: rosa-policy-understand-availability - - Name: Overview of responsibilities for ROSA - File: rosa-policy-responsibility-matrix - - Name: ROSA service definition - File: rosa-service-definition - - Name: ROSA instance types - File: rosa-instance-types - - Name: ROSA update life cycle - File: rosa-life-cycle - - Name: ROSA with HCP service definition - File: rosa-hcp-service-definition - - Name: ROSA with HCP instance types - File: rosa-hcp-instance-types - - Name: ROSA with HCP update life cycle - File: rosa-hcp-life-cycle - - Name: Understanding security for ROSA - File: rosa-policy-process-security - - Name: SRE and service account access - File: rosa-sre-access -# Created a new assembly in ROSA/OSD. In OCP, the assembly is in a book that is not in ROSA/OSD -# - Name: About admission plugins -# File: rosa-admission-plug-ins -# Distros: openshift-rosa -- Name: About IAM resources for STS clusters - File: rosa-sts-about-iam-resources -- Name: OpenID Connect Overview - File: rosa-oidc-overview -# - Name: Training for ROSA -# File: rosa-training ---- -Name: Architecture -Dir: architecture -Distros: openshift-rosa -Topics: -- Name: Architecture overview - File: index -- Name: Product architecture - File: architecture -- Name: Architecture models - File: rosa-architecture-models -- Name: Control plane architecture - File: control-plane -- Name: NVIDIA GPU architecture overview - File: nvidia-gpu-architecture-overview -- Name: Understanding OpenShift development - File: understanding-development -- Name: Admission plugins - File: admission-plug-ins ---- -Name: Tutorials -Dir: cloud_experts_tutorials -Distros: openshift-rosa -Topics: -- Name: Tutorials overview - File: index -#- Name: ROSA prerequisites -# File: rosa-mobb-prerequisites-tutorial -- Name: ROSA with HCP activation and account linking - File: cloud-experts-rosa-hcp-activation-and-account-linking-tutorial -- Name: ROSA with HCP private offer acceptance and sharing - File: cloud-experts-rosa-with-hcp-private-offer-acceptance-and-sharing -- Name: Verifying Permissions for a ROSA STS Deployment - File: rosa-mobb-verify-permissions-sts-deployment -- Name: Deploying ROSA with a Custom DNS Resolver - File: cloud-experts-custom-dns-resolver -- Name: Using AWS WAF and Amazon CloudFront to protect ROSA workloads - File: cloud-experts-using-cloudfront-and-waf -- Name: Using AWS WAF and AWS ALBs to protect ROSA workloads - File: cloud-experts-using-alb-and-waf -- Name: Deploying OpenShift API for Data Protection on a ROSA cluster - File: cloud-experts-deploy-api-data-protection -- Name: AWS Load Balancer Operator on ROSA - File: cloud-experts-aws-load-balancer-operator -- Name: Configuring Microsoft Entra ID (formerly Azure Active Directory) as an identity provider - File: cloud-experts-entra-id-idp -- Name: Using AWS Secrets Manager CSI on ROSA with STS - File: cloud-experts-aws-secret-manager -- Name: Using AWS Controllers for Kubernetes on ROSA - File: cloud-experts-using-aws-ack -- Name: Deploying the External DNS Operator on ROSA - File: cloud-experts-external-dns -- Name: Dynamically issuing certificates using the cert-manager Operator on ROSA - File: cloud-experts-dynamic-certificate-custom-domain -- Name: Assigning consistent egress IP for external traffic - File: cloud-experts-consistent-egress-ip -- Name: Updating component routes with custom domains and TLS certificates - File: cloud-experts-update-component-routes -- Name: Getting started with ROSA - Dir: cloud-experts-getting-started - Distros: openshift-rosa - Topics: - - Name: What is ROSA - File: cloud-experts-getting-started-what-is-rosa - - Name: ROSA with AWS STS explained - File: cloud-experts-rosa-sts-explained - - Name: OpenShift concepts - File: cloud-experts-getting-started-openshift-concepts - - Name: Deploying a cluster - Dir: cloud-experts-getting-started-deploying - Topics: - - Name: Choosing a deployment method - File: cloud-experts-getting-started-choose-deployment-method - - Name: Simple CLI guide - File: cloud-experts-getting-started-simple-cli-guide - - Name: Detailed CLI guide - File: cloud-experts-getting-started-detailed-cli-guide - - Name: Simple UI guide - File: cloud-experts-getting-started-simple-ui-guide - - Name: Detailed UI guide - File: cloud-experts-getting-started-detailed-ui - - Name: HCP deployment guide - File: cloud-experts-getting-started-hcp - - Name: Creating an admin user - File: cloud-experts-getting-started-admin - - Name: Setting up an identity provider - File: cloud-experts-getting-started-idp - - Name: Granting admin rights - File: cloud-experts-getting-started-admin-rights - - Name: Accessing your cluster - File: cloud-experts-getting-started-accessing - - Name: Managing worker nodes - File: cloud-experts-getting-started-managing-worker-nodes - - Name: Autoscaling - File: cloud-experts-getting-started-autoscaling - - Name: Upgrading your cluster - File: cloud-experts-getting-started-upgrading - - Name: Deleting your cluster - File: cloud-experts-getting-started-deleting - - Name: Obtaining support - File: cloud-experts-getting-started-support -- Name: Deploying an application - Dir: cloud-experts-deploying-application - Distros: openshift-rosa - Topics: - - Name: Introduction - File: cloud-experts-deploying-application-intro - - Name: Prerequisites - File: cloud-experts-deploying-application-prerequisites - - Name: Lab Overview - File: cloud-experts-deploying-application-lab-overview - - Name: Deployment - File: cloud-experts-deploying-application-deployment - - Name: Health Check - File: cloud-experts-deploying-application-health-check - - Name: Storage - File: cloud-experts-deploying-application-storage - - Name: ConfigMap, secrets, and environment variables - File: cloud-experts-deploying-configmaps-secrets-env-var - - Name: Networking - File: cloud-experts-deploying-application-networking - - Name: Scaling an application - File: cloud-experts-deploying-application-scaling - - Name: Logging - File: cloud-experts-deploying-application-logging - - Name: S2i deployments - File: cloud-experts-deploying-application-s2i-deployments - - Name: Using Source-to-Image (S2I) webhooks for automated deployment - File: cloud-experts-deploying-s2i-webhook-cicd - - Name: Integrating with AWS - File: cloud-experts-deploying-application-integrating-aws ---- -Name: Getting started -Dir: rosa_getting_started -Distros: openshift-rosa -Topics: -- Name: ROSA quickstart guide - File: rosa-quickstart-guide-ui -- Name: Comprehensive guide to getting started with ROSA - File: rosa-getting-started -- Name: Understanding the ROSA with STS deployment workflow - File: rosa-sts-getting-started-workflow ---- -Name: Prepare your environment -Dir: rosa_planning -Distros: openshift-rosa -Topics: -- Name: Prerequisites checklist for deploying ROSA using STS - File: rosa-cloud-expert-prereq-checklist -- Name: Detailed requirements for deploying ROSA using STS - File: rosa-sts-aws-prereqs -- Name: ROSA IAM role resources - File: rosa-sts-ocm-role -- Name: Limits and scalability - File: rosa-limits-scalability -- Name: ROSA with HCP limits and scalability - File: rosa-hcp-limits-scalability -- Name: Planning resource usage in your cluster - File: rosa-planning-environment -- Name: Required AWS service quotas - File: rosa-sts-required-aws-service-quotas -- Name: Setting up your environment - File: rosa-sts-setting-up-environment ---- -Name: Install ROSA with HCP clusters -Dir: rosa_hcp -Distros: openshift-rosa -Topics: -- Name: Creating ROSA with HCP clusters using the default options - File: rosa-hcp-sts-creating-a-cluster-quickly -- Name: Creating a ROSA cluster using Terraform - Dir: terraform - Distros: openshift-rosa - Topics: - - Name: Creating a default ROSA cluster using Terraform - File: rosa-hcp-creating-a-cluster-quickly-terraform -- Name: Creating ROSA with HCP clusters using a custom AWS KMS encryption key - File: rosa-hcp-creating-cluster-with-aws-kms-key -- Name: Creating a private cluster on ROSA with HCP - File: rosa-hcp-aws-private-creating-cluster -- Name: Creating a ROSA with HCP cluster with egress lockdown - File: rosa-hcp-egress-lockdown-install -- Name: Creating ROSA with HCP clusters with external authentication - File: rosa-hcp-sts-creating-a-cluster-ext-auth -- Name: Creating ROSA with HCP clusters without a CNI plugin - File: rosa-hcp-cluster-no-cni -- Name: Deleting a ROSA with HCP cluster - File: rosa-hcp-deleting-cluster ---- -Name: Install ROSA Classic clusters -Dir: rosa_install_access_delete_clusters -Distros: openshift-rosa -Topics: -- Name: Creating a ROSA cluster with STS using the default options - File: rosa-sts-creating-a-cluster-quickly -- Name: Creating a ROSA cluster with STS using customizations - File: rosa-sts-creating-a-cluster-with-customizations -- Name: Creating a ROSA (classic architecture) cluster using Terraform - Dir: terraform - Distros: openshift-rosa - Topics: - - Name: Creating a default ROSA (classic architecture) cluster using Terraform - File: rosa-classic-creating-a-cluster-quickly-terraform -- Name: Interactive cluster creation mode reference - File: rosa-sts-interactive-mode-reference -- Name: Creating an AWS PrivateLink cluster on ROSA - File: rosa-aws-privatelink-creating-cluster -- Name: Configuring a shared virtual private cloud for ROSA clusters - File: rosa-shared-vpc-config -- Name: Accessing a ROSA cluster - File: rosa-sts-accessing-cluster -- Name: Configuring identity providers using Red Hat OpenShift Cluster Manager - File: rosa-sts-config-identity-providers -- Name: Revoking access to a ROSA cluster - File: rosa-sts-deleting-access-cluster -- Name: Deleting a ROSA cluster - File: rosa-sts-deleting-cluster -- Name: Deploying ROSA without AWS STS - Dir: rosa_getting_started_iam - Distros: openshift-rosa - Topics: - - Name: AWS prerequisites for ROSA - File: rosa-aws-prereqs - - Name: Understanding the ROSA deployment workflow - File: rosa-getting-started-workflow - - Name: Required AWS service quotas - File: rosa-required-aws-service-quotas - - Name: Configuring your AWS account - File: rosa-config-aws-account - - Name: Installing the ROSA CLI - File: rosa-installing-rosa - - Name: Creating a ROSA cluster without AWS STS - File: rosa-creating-cluster - - Name: Configuring a private cluster - File: rosa-private-cluster -# - Name: Creating a ROSA cluster using the web console -# File: rosa-creating-cluster-console -# - Name: Accessing a ROSA cluster -# File: rosa-accessing-cluster -# - Name: Configuring identity providers using the Red Hat OpenShift Cluster Manager -# File: rosa-config-identity-providers - - Name: Deleting access to a ROSA cluster - File: rosa-deleting-access-cluster - - Name: Deleting a ROSA cluster - File: rosa-deleting-cluster - - Name: Command quick reference for creating clusters and users - File: rosa-quickstart ---- -Name: Support -Dir: support -Distros: openshift-rosa -Topics: -- Name: Support overview - File: index -- Name: Managing your cluster resources - File: managing-cluster-resources -- Name: Approved Access - File: approved-access -- Name: Getting support - File: getting-support - Distros: openshift-rosa -- Name: Remote health monitoring with connected clusters - Dir: remote_health_monitoring - Distros: openshift-rosa - Topics: - - Name: About remote health monitoring - File: about-remote-health-monitoring - - Name: Showing data collected by remote health monitoring - File: showing-data-collected-by-remote-health-monitoring -# cannot get resource "secrets" in API group "" in the namespace "openshift-config" -# - Name: Opting out of remote health reporting -# File: opting-out-of-remote-health-reporting -# cannot get resource "secrets" in API group "" in the namespace "openshift-config" -# - Name: Enabling remote health reporting -# File: enabling-remote-health-reporting - - Name: Using Insights to identify issues with your cluster - File: using-insights-to-identify-issues-with-your-cluster - - Name: Using Insights Operator - File: using-insights-operator -# Not supported per Michael McNeill -# - Name: Using remote health reporting in a restricted network -# File: remote-health-reporting-from-restricted-network -# cannot list resource "secrets" in API group "" in the namespace "openshift-config" -# - Name: Importing simple content access entitlements with Insights Operator -# File: insights-operator-simple-access -- Name: Gathering data about your cluster - File: gathering-cluster-data - Distros: openshift-rosa -- Name: Summarizing cluster specifications - File: summarizing-cluster-specifications - Distros: openshift-rosa -- Name: Troubleshooting - Dir: troubleshooting - Distros: openshift-rosa - Topics: -# rosa has own troubleshooting installations -# - Name: Troubleshooting installations -# File: troubleshooting-installations - - Name: Troubleshooting ROSA installations - File: rosa-troubleshooting-installations - - Name: Troubleshooting ROSA with HCP installations - File: rosa-troubleshooting-installations-hcp - - Name: Troubleshooting networking - File: rosa-troubleshooting-networking - - Name: Verifying node health - File: verifying-node-health -# cannot create resource "namespaces", cannot patch resource "nodes" -# - Name: Troubleshooting CRI-O container runtime issues -# File: troubleshooting-crio-issues -# requires ostree, butane, and other plug-ins -# - Name: Troubleshooting operating system issues -# File: troubleshooting-operating-system-issues -# Distros: openshift-rosa -# cannot patch resource "nodes", "nodes/proxy", "namespaces" -# - Name: Troubleshooting network issues -# File: troubleshooting-network-issues -# Distros: openshift-rosa - - Name: Troubleshooting Operator issues - File: troubleshooting-operator-issues - - Name: Investigating pod issues - File: investigating-pod-issues - - Name: Troubleshooting the Source-to-Image process - File: troubleshooting-s2i - - Name: Troubleshooting storage issues - File: troubleshooting-storage-issues -# Not supported per WINC team -# - Name: Troubleshooting Windows container workload issues -# File: troubleshooting-windows-container-workload-issues - - Name: Investigating monitoring issues - File: investigating-monitoring-issues - - Name: Diagnosing OpenShift CLI (oc) issues - File: diagnosing-oc-issues - - Name: Troubleshooting expired offline access tokens - File: rosa-troubleshooting-expired-tokens - Distros: openshift-rosa - - Name: Troubleshooting IAM roles - File: rosa-troubleshooting-iam-resources - Distros: openshift-rosa - - Name: Troubleshooting cluster deployments - File: rosa-troubleshooting-deployments - Distros: openshift-rosa ---- -Name: Web console -Dir: web_console -Distros: openshift-rosa -Topics: -- Name: Web console overview - File: web-console-overview -- Name: Accessing the web console - File: web-console -- Name: Viewing cluster information - File: using-dashboard-to-get-cluster-information -- Name: Adding user preferences - File: adding-user-preferences -# cannot patch resource "consoles", insufficient permissions to read any Cluster configuration -#- Name: Configuring the web console -# File: configuring-web-console -# Distros: openshift-rosa -#- Name: Customizing the web console -# File: customizing-the-web-console -# Distros: openshift-rosa -- Name: Dynamic plugins - Dir: dynamic-plugin - Distros: openshift-rosa - Topics: - - Name: Overview of dynamic plugins - File: overview-dynamic-plugin - - Name: Getting started with dynamic plugins - File: dynamic-plugins-get-started - - Name: Deploy your plugin on a cluster - File: deploy-plugin-cluster - - Name: Dynamic plugin example - File: dynamic-plugin-example - - Name: Dynamic plugin reference - File: dynamic-plugins-reference -- Name: Web terminal - Dir: web_terminal - Distros: openshift-rosa - Topics: - - Name: Installing the web terminal - File: installing-web-terminal -# Do not have sufficient permissions to read any cluster configuration. -# - Name: Configuring the web terminal -# File: configuring-web-terminal - - Name: Using the web terminal - File: odc-using-web-terminal - - Name: Troubleshooting the web terminal - File: troubleshooting-web-terminal - - Name: Uninstalling the web terminal - File: uninstalling-web-terminal -- Name: About quick start tutorials - File: creating-quick-start-tutorials - Distros: openshift-rosa ---- -Name: CLI tools -Dir: cli_reference -Distros: openshift-rosa -Topics: -- Name: CLI tools overview - File: index -- Name: OpenShift CLI (oc) - Dir: openshift_cli - Topics: - - Name: Getting started with the OpenShift CLI - File: getting-started-cli - - Name: Configuring the OpenShift CLI - File: configuring-cli - - Name: Usage of oc and kubectl commands - File: usage-oc-kubectl - - Name: Managing CLI profiles - File: managing-cli-profiles - - Name: Extending the OpenShift CLI with plugins - File: extending-cli-plugins - # - Name: Managing CLI plugins with Krew - # File: managing-cli-plugins-krew - # Distros: openshift-rosa - - Name: OpenShift CLI developer command reference - File: developer-cli-commands - - Name: OpenShift CLI administrator command reference - File: administrator-cli-commands - Distros: openshift-rosa -- Name: Developer CLI (odo) - File: odo-important-update - # Dir: developer_cli_odo - Distros: openshift-rosa - # Topics: - # - Name: odo release notes - # File: odo-release-notes - # - Name: Understanding odo - # File: understanding-odo - # - Name: Installing odo - # File: installing-odo - # - Name: Configuring the odo CLI - # File: configuring-the-odo-cli - # - Name: odo CLI reference - # File: odo-cli-reference -- Name: Knative CLI (kn) for use with OpenShift Serverless - File: kn-cli-tools - Distros: openshift-rosa -- Name: Pipelines CLI (tkn) - Dir: tkn_cli - Distros: openshift-rosa - Topics: - - Name: Installing tkn - File: installing-tkn - - Name: Configuring tkn - File: op-configuring-tkn - - Name: Basic tkn commands - File: op-tkn-reference -- Name: opm CLI - Dir: opm - Distros: openshift-rosa - Topics: - - Name: Installing the opm CLI - File: cli-opm-install - - Name: opm CLI reference - File: cli-opm-ref -- Name: Operator SDK - Dir: osdk - Distros: openshift-rosa - Topics: - - Name: Installing the Operator SDK CLI - File: cli-osdk-install - - Name: Operator SDK CLI reference - File: cli-osdk-ref -- Name: ROSA CLI - Dir: rosa_cli - Distros: openshift-rosa - Topics: - # - Name: CLI and web console - # File: rosa-cli-openshift-console - - Name: Getting started with the ROSA CLI - File: rosa-get-started-cli - - Name: Managing objects with the ROSA CLI - File: rosa-manage-objects-cli - - Name: Checking account and version information with the ROSA CLI - File: rosa-checking-acct-version-cli - - Name: Checking logs with the ROSA CLI - File: rosa-checking-logs-cli - - Name: Least privilege permissions for ROSA CLI commands - File: rosa-cli-permission-examples - ---- -Name: Red Hat OpenShift Cluster Manager -Dir: ocm -Distros: openshift-rosa -Topics: -- Name: Red Hat OpenShift Cluster Manager - File: ocm-overview -# - Name: Red Hat OpenShift Cluster Manager -# File: ocm-overview -# - Name: Using the OpenShift web console -# File: rosa-using-openshift-console ---- -Name: Cluster administration -Dir: rosa_cluster_admin -Distros: openshift-rosa -Topics: -# - Name: Cluster configurations -# File: rosa-cluster-config -# - Name: Cluster authentication -# File: rosa-cluster-auth -# - Name: Authorization and RBAC -# File: rosa-auth-rbac -- Name: Cluster notifications - File: rosa-cluster-notifications - Distros: openshift-rosa -- Name: Configuring private connections - Dir: cloud_infrastructure_access - Distros: openshift-rosa - Topics: - - Name: Configuring private connections - File: rosa-configuring-private-connections - - Name: Configuring AWS VPC peering - File: dedicated-aws-peering - - Name: Configuring AWS VPN - File: dedicated-aws-vpn - - Name: Configuring AWS Direct Connect - File: dedicated-aws-dc -- Name: Cluster autoscaling - File: rosa-cluster-autoscaling -- Name: Cluster autoscaling for ROSA HCP - File: rosa-cluster-autoscaling-hcp - # Remove cluster autoscaling for ROSA HCP once the HCP docs are published -- Name: Manage nodes using machine pools - Dir: rosa_nodes - Distros: openshift-rosa - Topics: - - Name: About machine pools - File: rosa-nodes-machinepools-about - - Name: Managing compute nodes - File: rosa-managing-worker-nodes - - Name: Configuring machine pools in Local Zones - File: rosa-nodes-machinepools-configuring - Distros: openshift-rosa - - Name: About autoscaling nodes on a cluster - File: rosa-nodes-about-autoscaling-nodes - - Name: Configuring cluster memory to meet container memory and risk requirements - File: nodes-cluster-resource-configure -- Name: Configuring PID limits - File: rosa-configuring-pid-limits - Distros: openshift-rosa ---- -Name: Security and compliance -Dir: security -Distros: openshift-rosa -Topics: -- Name: Viewing audit logs - File: audit-log-view -- Name: Adding additional constraints for IP-based AWS role assumption - File: rosa-adding-additional-constraints-for-ip-based-aws-role-assumption -#- Name: Security -# File: rosa-security -#- Name: Application and cluster compliance -# File: rosa-app-security-compliance ---- -Name: Authentication and authorization -Dir: authentication -Distros: openshift-rosa -Topics: -- Name: Authentication and authorization overview - File: index -- Name: Understanding authentication - File: understanding-authentication -# - Name: Configuring the internal OAuth server -# File: configuring-internal-oauth -# - Name: Configuring OAuth clients -# File: configuring-oauth-clients -- Name: Managing user-owned OAuth access tokens - File: managing-oauth-access-tokens -# - Name: Understanding identity provider configuration -# File: understanding-identity-provider -- Name: Configuring identity providers - File: sd-configuring-identity-providers -# - Name: Configuring identity providers -# Dir: identity_providers -# Topics: -# - Name: Configuring an htpasswd identity provider -# File: configuring-htpasswd-identity-provider -# - Name: Configuring a Keystone identity provider -# File: configuring-keystone-identity-provider -# - Name: Configuring an LDAP identity provider -# File: configuring-ldap-identity-provider -# - Name: Configuring a basic authentication identity provider -# File: configuring-basic-authentication-identity-provider -# - Name: Configuring a request header identity provider -# File: configuring-request-header-identity-provider -# - Name: Configuring a GitHub or GitHub Enterprise identity provider -# File: configuring-github-identity-provider -# - Name: Configuring a GitLab identity provider -# File: configuring-gitlab-identity-provider -# - Name: Configuring a Google identity provider -# File: configuring-google-identity-provider -# - Name: Configuring an OpenID Connect identity provider -# File: configuring-oidc-identity-provider -- Name: Using RBAC to define and apply permissions - File: using-rbac -# - Name: Removing the kubeadmin user -# File: remove-kubeadmin -#- Name: Configuring LDAP failover -# File: configuring-ldap-failover -- Name: Understanding and creating service accounts - File: understanding-and-creating-service-accounts -- Name: Using service accounts in applications - File: using-service-accounts-in-applications -- Name: Using a service account as an OAuth client - File: using-service-accounts-as-oauth-client -- Name: Assuming an AWS IAM role for a service account - File: assuming-an-aws-iam-role-for-a-service-account -- Name: Scoping tokens - File: tokens-scoping -- Name: Using bound service account tokens - File: bound-service-account-tokens -- Name: Managing security context constraints - File: managing-security-context-constraints -- Name: Understanding and managing pod security admission - File: understanding-and-managing-pod-security-admission -# - Name: Impersonating the system:admin user -# File: impersonating-system-admin -- Name: Syncing LDAP groups - File: ldap-syncing -# - Name: Managing cloud provider credentials -# Dir: managing_cloud_provider_credentials -# Topics: -# - Name: About the Cloud Credential Operator -# File: about-cloud-credential-operator -# - Name: Mint mode -# File: cco-mode-mint -# - Name: Passthrough mode -# File: cco-mode-passthrough -# - Name: Manual mode with long-term credentials for components -# File: cco-mode-manual -# - Name: Manual mode with short-term credentials for components -# File: cco-short-term-creds ---- -Name: Upgrading -Dir: upgrading -Distros: openshift-rosa -Topics: -- Name: Upgrading ROSA with HCP clusters - File: rosa-hcp-upgrading -- Name: Upgrading ROSA Classic clusters - File: rosa-upgrading-sts ---- -Name: CI/CD -Dir: cicd -Distros: openshift-rosa -Topics: -- Name: CI/CD overview - Dir: overview - Topics: - - Name: About CI/CD - File: index -- Name: Builds using Shipwright - Dir: builds_using_shipwright - Topics: - - Name: Overview of Builds - File: overview-openshift-builds -- Name: Builds using BuildConfig - Dir: builds - Topics: - - Name: Understanding image builds - File: understanding-image-builds - - Name: Understanding build configurations - File: understanding-buildconfigs - - Name: Creating build inputs - File: creating-build-inputs - - Name: Managing build output - File: managing-build-output - - Name: Using build strategies - File: build-strategies -# - Name: Custom image builds with Buildah -# File: custom-builds-buildah - - Name: Performing and configuring basic builds - File: basic-build-operations - - Name: Triggering and modifying builds - File: triggering-builds-build-hooks - - Name: Performing advanced builds - File: advanced-build-operations - - Name: Using Red Hat subscriptions in builds - File: running-entitled-builds - # Dedicated-admin cannot secure builds by strategy - # - Name: Securing builds by strategy - # File: securing-builds-by-strategy - # Dedicated-admin cannot edit build configuration resources - # - Name: Build configuration resources - # File: build-configuration - - Name: Troubleshooting builds - File: troubleshooting-builds -# - Name: Setting up additional trusted certificate authorities for builds -# File: setting-up-trusted-ca -- Name: Pipelines - Dir: pipelines - Topics: - - Name: About OpenShift Pipelines - File: about-pipelines -- Name: GitOps - Dir: gitops - Topics: - - Name: About OpenShift GitOps - File: about-redhat-openshift-gitops -- Name: Jenkins - Dir: jenkins - Topics: - - Name: Configuring Jenkins images - File: images-other-jenkins - - Name: Jenkins agent - File: images-other-jenkins-agent - - Name: Migrating from Jenkins to OpenShift Pipelines - File: migrating-from-jenkins-to-openshift-pipelines - - Name: Important changes to OpenShift Jenkins images - File: important-changes-to-openshift-jenkins-images ---- -Name: Images -Dir: openshift_images -Distros: openshift-rosa -Topics: -- Name: Overview of images - File: index -# replaced Configuring the Cluster Samples Operator name, cannot configure the operator -- Name: Overview of the Cluster Samples Operator - File: configuring-samples-operator - Distros: openshift-rosa -- Name: Using the Cluster Samples Operator with an alternate registry - File: samples-operator-alt-registry - Distros: openshift-rosa -- Name: Creating images - File: create-images -- Name: Managing images - Dir: managing_images - Topics: - - Name: Managing images overview - File: managing-images-overview - - Name: Tagging images - File: tagging-images - - Name: Image pull policy - File: image-pull-policy - - Name: Using image pull secrets - File: using-image-pull-secrets -- Name: Managing image streams - File: image-streams-manage - Distros: openshift-rosa -- Name: Using image streams with Kubernetes resources - File: using-imagestreams-with-kube-resources - Distros: openshift-rosa -- Name: Triggering updates on image stream changes - File: triggering-updates-on-imagestream-changes - Distros: openshift-rosa -- Name: Image configuration resources (Classic) - File: image-configuration - Distros: openshift-rosa -- Name: Image configuration resources (HCP) - File: image-configuration-hcp - Distros: openshift-rosa -- Name: Using images - Dir: using_images - Distros: openshift-rosa - Topics: - - Name: Using images overview - File: using-images-overview - - Name: Source-to-image - File: using-s21-images - - Name: Customizing source-to-image images - File: customizing-s2i-images ---- -Name: Add-on services -Dir: adding_service_cluster -Distros: openshift-rosa -Topics: -- Name: Adding services to a cluster - File: adding-service -- Name: Available services - File: rosa-available-services ---- -Name: Storage -Dir: storage -Distros: openshift-rosa -Topics: -- Name: Storage overview - File: index -- Name: Understanding ephemeral storage - File: understanding-ephemeral-storage -- Name: Understanding persistent storage - File: understanding-persistent-storage -- Name: Configuring persistent storage - Dir: persistent_storage - Topics: - - Name: Persistent storage using AWS Elastic Block Store - File: persistent-storage-aws -- Name: Using Container Storage Interface (CSI) - Dir: container_storage_interface - Topics: - - Name: Configuring CSI volumes - File: persistent-storage-csi - - Name: Managing the default storage class - File: persistent-storage-csi-sc-manage - - Name: AWS Elastic Block Store CSI Driver Operator - File: persistent-storage-csi-ebs - - Name: AWS Elastic File Service CSI Driver Operator - File: persistent-storage-csi-aws-efs -- Name: Generic ephemeral volumes - File: generic-ephemeral-vols -- Name: Dynamic provisioning - File: dynamic-provisioning ---- -Name: Registry -Dir: registry -Distros: openshift-rosa -Topics: -- Name: Registry overview - File: index -- Name: Image Registry Operator in Red Hat OpenShift Service on AWS - File: configuring-registry-operator -- Name: Accessing the registry - File: accessing-the-registry ---- -Name: Operators -Dir: operators -Distros: openshift-rosa -Topics: -- Name: Operators overview - File: index -- Name: Understanding Operators - Dir: understanding - Topics: - - Name: What are Operators? - File: olm-what-operators-are - - Name: Packaging format - File: olm-packaging-format - - Name: Common terms - File: olm-common-terms - - Name: Operator Lifecycle Manager (OLM) - Dir: olm - Topics: - - Name: Concepts and resources - File: olm-understanding-olm - - Name: Architecture - File: olm-arch - - Name: Workflow - File: olm-workflow - - Name: Dependency resolution - File: olm-understanding-dependency-resolution - - Name: Operator groups - File: olm-understanding-operatorgroups - - Name: Multitenancy and Operator colocation - File: olm-colocation - - Name: Operator conditions - File: olm-operatorconditions - - Name: Metrics - File: olm-understanding-metrics - - Name: Webhooks - File: olm-webhooks - - Name: OperatorHub - File: olm-understanding-operatorhub - - Name: Red Hat-provided Operator catalogs - File: olm-rh-catalogs - - Name: Operators in multitenant clusters - File: olm-multitenancy - - Name: CRDs - Dir: crds - Topics: - - Name: Managing resources from CRDs - File: crd-managing-resources-from-crds -- Name: User tasks - Dir: user - Topics: - - Name: Creating applications from installed Operators - File: olm-creating-apps-from-installed-operators -- Name: Administrator tasks - Dir: admin - Topics: - - Name: Adding Operators to a cluster - File: olm-adding-operators-to-cluster - - Name: Updating installed Operators - File: olm-upgrading-operators - - Name: Deleting Operators from a cluster - File: olm-deleting-operators-from-cluster - - Name: Configuring proxy support - File: olm-configuring-proxy-support - - Name: Viewing Operator status - File: olm-status - - Name: Managing Operator conditions - File: olm-managing-operatorconditions - - Name: Managing custom catalogs - File: olm-managing-custom-catalogs - - Name: Catalog source pod scheduling - File: olm-cs-podsched - - Name: Troubleshooting Operator issues - File: olm-troubleshooting-operator-issues -- Name: Developing Operators - Dir: operator_sdk - Topics: - - Name: About the Operator SDK - File: osdk-about - - Name: Installing the Operator SDK CLI - File: osdk-installing-cli - - Name: Go-based Operators - Dir: golang - Topics: -# Quick start excluded, because it requires cluster-admin permissions. -# - Name: Getting started -# File: osdk-golang-quickstart - - Name: Tutorial - File: osdk-golang-tutorial - - Name: Project layout - File: osdk-golang-project-layout - - Name: Updating Go-based projects - File: osdk-golang-updating-projects - - Name: Ansible-based Operators - Dir: ansible - Topics: -# Quick start excluded, because it requires cluster-admin permissions. -# - Name: Getting started -# File: osdk-ansible-quickstart - - Name: Tutorial - File: osdk-ansible-tutorial - - Name: Project layout - File: osdk-ansible-project-layout - - Name: Updating Ansible-based projects - File: osdk-ansible-updating-projects - - Name: Ansible support - File: osdk-ansible-support - - Name: Kubernetes Collection for Ansible - File: osdk-ansible-k8s-collection - - Name: Using Ansible inside an Operator - File: osdk-ansible-inside-operator - - Name: Custom resource status management - File: osdk-ansible-cr-status - - Name: Helm-based Operators - Dir: helm - Topics: -# Quick start excluded, because it requires cluster-admin permissions. -# - Name: Getting started -# File: osdk-helm-quickstart - - Name: Tutorial - File: osdk-helm-tutorial - - Name: Project layout - File: osdk-helm-project-layout - - Name: Updating Helm-based projects - File: osdk-helm-updating-projects - - Name: Helm support - File: osdk-helm-support - - Name: Defining cluster service versions (CSVs) - File: osdk-generating-csvs - - Name: Working with bundle images - File: osdk-working-bundle-images - - Name: Complying with pod security admission - File: osdk-complying-with-psa - - Name: Validating Operators using the scorecard - File: osdk-scorecard - - Name: Validating Operator bundles - File: osdk-bundle-validate - - Name: High-availability or single-node cluster detection and support - File: osdk-ha-sno - - Name: Configuring built-in monitoring with Prometheus - File: osdk-monitoring-prometheus - - Name: Configuring leader election - File: osdk-leader-election - - Name: Object pruning utility - File: osdk-pruning-utility - - Name: Migrating package manifest projects to bundle format - File: osdk-pkgman-to-bundle - - Name: Operator SDK CLI reference - File: osdk-cli-ref - - Name: Migrating to Operator SDK v0.1.0 - File: osdk-migrating-to-v0-1-0 -# ROSA customers can't configure/edit the cluster Operators -# - Name: Cluster Operators reference -# File: operator-reference ---- -Name: Networking -Dir: networking -Distros: openshift-rosa -Topics: -- Name: About networking - File: about-managed-networking -- Name: Networking Operators - Dir: networking_operators - Distros: openshift-rosa - Topics: - - Name: AWS Load Balancer Operator - File: aws-load-balancer-operator - - Name: DNS Operator in Red Hat OpenShift Service on AWS - File: dns-operator - - Name: Ingress Operator in Red Hat OpenShift Service on AWS - File: ingress-operator - - Name: Ingress Node Firewall Operator in Red Hat OpenShift Service on AWS - File: ingress-node-firewall-operator -- Name: Network verification - File: network-verification -- Name: Configuring a cluster-wide proxy during installation - File: configuring-cluster-wide-proxy -- Name: CIDR range definitions - File: cidr-range-definitions -- Name: Network security - Dir: network_security - Distros: openshift-rosa - Topics: - - Name: Understanding network policy APIs - File: network-policy-apis - - Name: Admin network policy - Dir: AdminNetworkPolicy - Distros: openshift-rosa - Topics: - - Name: About AdminNetworkPolicy - File: ovn-k-anp - - Name: About BaselineAdminNetworkPolicy - File: ovn-k-banp - - Name: Network policy - Dir: network_policy - Distros: openshift-rosa - Topics: - - Name: About network policy - File: about-network-policy - - Name: Creating a network policy - File: creating-network-policy - - Name: Viewing a network policy - File: viewing-network-policy - - Name: Editing a network policy - File: editing-network-policy - - Name: Deleting a network policy - File: deleting-network-policy - - Name: Defining a default network policy for projects - File: default-network-policy - - Name: Configuring multitenant isolation with network policy - File: multitenant-network-policy - - Name: Audit logging for network security - File: logging-network-security -- Name: OVN-Kubernetes network plugin - Dir: ovn_kubernetes_network_provider - Topics: - - Name: About the OVN-Kubernetes network plugin - File: about-ovn-kubernetes - - Name: Configuring an egress IP address - File: configuring-egress-ips-ovn - - Name: Migrating from OpenShift SDN network plugin to OVN-Kubernetes network plugin - File: migrate-from-openshift-sdn -- Name: OpenShift SDN network plugin - Dir: ovn_kubernetes_network_provider - Topics: - - Name: Enabling multicast for a project - File: enabling-multicast -- Name: Configuring Routes - Dir: routes - Topics: - - Name: Route configuration - File: route-configuration - - Name: Secured routes - File: secured-routes ---- -Name: Building applications -Dir: applications -Distros: openshift-rosa -Topics: -- Name: Building applications overview - File: index -- Name: Projects - Dir: projects - Topics: - - Name: Working with projects - File: working-with-projects -# cannot impersonate resource "users" in API group -# - Name: Creating a project as another user -# File: creating-project-other-user - - Name: Configuring project creation - File: configuring-project-creation -- Name: Creating applications - Dir: creating_applications - Topics: - - Name: Using templates - File: using-templates - - Name: Creating applications using the Developer perspective - File: odc-creating-applications-using-developer-perspective - - Name: Creating applications from installed Operators - File: creating-apps-from-installed-operators - - Name: Creating applications using the CLI - File: creating-applications-using-cli - - Name: Creating applications using Ruby on Rails - File: templates-using-ruby-on-rails -- Name: Viewing application composition using the Topology view - File: odc-viewing-application-composition-using-topology-view -# cannot create required namespace -# - Name: Exporting applications -# File: odc-exporting-applications -- Name: Working with Helm charts - Dir: working_with_helm_charts - Topics: - - Name: Understanding Helm - File: understanding-helm - - Name: Installing Helm - File: installing-helm - - Name: Configuring custom Helm chart repositories - File: configuring-custom-helm-chart-repositories - - Name: Working with Helm releases - File: odc-working-with-helm-releases -- Name: Deployments - Dir: deployments - Topics: - - Name: Custom domains for applications - File: rosa-config-custom-domains-applications - - Name: Understanding Deployments and DeploymentConfigs - File: what-deployments-are - - Name: Managing deployment processes - File: managing-deployment-processes - - Name: Using deployment strategies - File: deployment-strategies - - Name: Using route-based deployment strategies - File: route-based-deployment-strategies -- Name: Quotas - Dir: quotas - Topics: - - Name: Resource quotas per project - File: quotas-setting-per-project - - Name: Resource quotas across multiple projects - File: quotas-setting-across-multiple-projects -- Name: Using config maps with applications - File: config-maps -- Name: Monitoring project and application metrics using the Developer perspective - File: odc-monitoring-project-and-application-metrics-using-developer-perspective -- Name: Monitoring application health - File: application-health -- Name: Editing applications - File: odc-editing-applications -- Name: Working with quotas - File: working-with-quotas -- Name: Pruning objects to reclaim resources - File: pruning-objects -- Name: Idling applications - File: idling-applications -- Name: Deleting applications - File: odc-deleting-applications -- Name: Using the Red Hat Marketplace - File: red-hat-marketplace -# - Name: Application GitOps workflows -# File: rosa-app-gitops-workflows -# - Name: Application logging -# File: rosa-app-logging -# - Name: Applications -# File: rosa-apps -# - Name: Application metrics and alerts -# File: rosa-app-metrics and alerts -# - Name: Projects -# File: rosa-projects -# - Name: Using the internal registry -# File: rosa-using-internal-registry ---- -Name: Backup and restore -Dir: backup_and_restore -Distros: openshift-rosa -Topics: -- Name: OADP Application backup and restore - Dir: application_backup_and_restore - Topics: - - Name: Introduction to OpenShift API for Data Protection - File: oadp-intro - - Name: OADP release notes - Dir: release-notes - Topics: - - Name: OADP 1.4 release notes - File: oadp-1-4-release-notes - - Name: OADP performance - Dir: oadp-performance - Topics: - - Name: OADP recommended network settings - File: oadp-recommended-network-settings - - Name: OADP features and plugins - File: oadp-features-plugins - - Name: OADP use cases - Dir: oadp-use-cases - Topics: - - Name: Backing up an application using OADP with ROSA STS - File: oadp-rosa-backup-restore -# ODF not supported on ROSA Classic -# - Name: Backing up an application using OADP and ODF -# File: oadp-usecase-backup-using-odf - - Name: Restoring a backup to a different namespace - File: oadp-usecase-restore-different-namespace -# ODF not supported on ROSA Classic -# - Name: Including a self-signed CA certificate during backup -# File: oadp-usecase-enable-ca-cert - - Name: Installing and configuring OADP - Dir: oadp-rosa - Topics: - - Name: Installing OADP - File: oadp-rosa-backing-up-applications - - Name: Uninstalling OADP - Dir: installing - Topics: - - Name: Uninstalling OADP - File: uninstalling-oadp - - Name: OADP backing up - Dir: backing_up_and_restoring - Topics: - - Name: Backing up applications - File: backing-up-applications - - Name: Creating a Backup CR - File: oadp-creating-backup-cr -# ROSA docs do not include CSI snapshots -# - Name: Backing up persistent volumes with CSI snapshots -# File: oadp-backing-up-pvs-csi-doc -# - Name: Backing up applications with File System Backup -# File: oadp-backing-up-applications-restic-doc - - Name: Creating backup hooks - File: oadp-creating-backup-hooks-doc - - Name: Scheduling backups using Schedule CR - File: oadp-scheduling-backups-doc - - Name: Deleting backups - File: oadp-deleting-backups -# - Name: About Kopia -# File: oadp-about-kopia - - Name: OADP restoring - Dir: backing_up_and_restoring - Topics: - - Name: Restoring applications - File: restoring-applications -# - Name: OADP and ROSA -# Dir: oadp-rosa -# Topics: -# - Name: Backing up applications on ROSA STS using OADP -# File: oadp-rosa-backing-up-applications -# - Name: OADP and AWS STS -# Dir: aws-sts -# Topics: -# - Name: Backing up applications on AWS STS using OADP -# File: oadp-aws-sts -# - Name: OADP Data Mover -# Dir: installing -# Topics: -# - Name: About the OADP Data Mover -# File: about-oadp-data-mover -# - Name: Backing up and restoring volumes by using CSI snapshots data movement -# File: oadp-backup-restore-csi-snapshots -# - Name: Overriding Kopia algorithms -# File: overriding-kopia-algorithms -# - Name: Troubleshooting -# File: troubleshooting -# - Name: OADP API -# File: oadp-api -# - Name: Advanced OADP features and functionalities -# File: oadp-advanced-topics ---- -Name: Nodes -Dir: nodes -Distros: openshift-rosa -Topics: -- Name: Overview of nodes - File: index -- Name: Working with pods - Dir: pods - Topics: - - Name: About pods - File: nodes-pods-using - - Name: Viewing pods - File: nodes-pods-viewing - - Name: Configuring a cluster for pods - File: nodes-pods-configuring - Distros: openshift-rosa -# Cannot create namespace to install VPA; revisit after Operator book converted -# - Name: Automatically adjust pod resource levels with the vertical pod autoscaler -# File: nodes-pods-vertical-autoscaler - - Name: Providing sensitive data to pods - File: nodes-pods-secrets - - Name: Creating and using config maps - File: nodes-pods-configmaps -# Cannot create required kubeletconfigs -# - Name: Using Device Manager to make devices available to nodes -# File: nodes-pods-plugins -# Distros: openshift-rosa - - Name: Including pod priority in pod scheduling decisions - File: nodes-pods-priority - Distros: openshift-rosa - - Name: Placing pods on specific nodes using node selectors - File: nodes-pods-node-selectors - Distros: openshift-rosa -# Cannot create namespace to install Run Once; revisit after Operator book converted -# - Name: Run Once Duration Override Operator -# Dir: run_once_duration_override -# Distros: openshift-rosa -# Topics: -# - Name: Run Once Duration Override Operator overview -# File: index -# - Name: Run Once Duration Override Operator release notes -# File: run-once-duration-override-release-notes -# - Name: Overriding the active deadline for run-once pods -# File: run-once-duration-override-install -# - Name: Uninstalling the Run Once Duration Override Operator -# File: run-once-duration-override-uninstall -- Name: Automatically scaling pods with the Custom Metrics Autoscaler Operator - Dir: cma - Distros: openshift-rosa - Topics: - - Name: Release notes - Dir: nodes-cma-rn - Topics: - - Name: Custom Metrics Autoscaler Operator release notes - File: nodes-cma-autoscaling-custom-rn - - Name: Past releases - File: nodes-cma-autoscaling-custom-rn-past - - Name: Custom Metrics Autoscaler Operator overview - File: nodes-cma-autoscaling-custom - - Name: Installing the custom metrics autoscaler - File: nodes-cma-autoscaling-custom-install - - Name: Understanding the custom metrics autoscaler triggers - File: nodes-cma-autoscaling-custom-trigger - - Name: Understanding the custom metrics autoscaler trigger authentications - File: nodes-cma-autoscaling-custom-trigger-auth - - Name: Pausing the custom metrics autoscaler - File: nodes-cma-autoscaling-custom-pausing - - Name: Gathering audit logs - File: nodes-cma-autoscaling-custom-audit-log - - Name: Gathering debugging data - File: nodes-cma-autoscaling-custom-debugging - - Name: Viewing Operator metrics - File: nodes-cma-autoscaling-custom-metrics - - Name: Understanding how to add custom metrics autoscalers - File: nodes-cma-autoscaling-custom-adding - - Name: Removing the Custom Metrics Autoscaler Operator - File: nodes-cma-autoscaling-custom-removing -- Name: Controlling pod placement onto nodes (scheduling) - Dir: scheduling - Distros: openshift-rosa - Topics: - - Name: About pod placement using the scheduler - File: nodes-scheduler-about - - Name: Placing pods relative to other pods using pod affinity and anti-affinity rules - File: nodes-scheduler-pod-affinity - - Name: Controlling pod placement on nodes using node affinity rules - File: nodes-scheduler-node-affinity - - Name: Placing pods onto overcommited nodes - File: nodes-scheduler-overcommit -# Per OSDOCS-9791, ROSA customers cannot add taints to individual nodes. -# - Name: Controlling pod placement using node taints -# File: nodes-scheduler-taints-tolerations - - Name: Placing pods on specific nodes using node selectors - File: nodes-scheduler-node-selectors - - Name: Controlling pod placement using pod topology spread constraints - File: nodes-scheduler-pod-topology-spread-constraints -# - Name: Placing a pod on a specific node by name -# File: nodes-scheduler-node-names -# - Name: Placing a pod in a specific project -# File: nodes-scheduler-node-projects -# - Name: Keeping your cluster balanced using the descheduler -# File: nodes-scheduler-descheduler -# Cannot create namespace to install Desceduler Operator; revisit after Operator book converted -# - Name: Evicting pods using the descheduler -# File: nodes-descheduler -# Cannot create namespace to install Secondary Scheduler Operator; revisit after Operator book converted -# - Name: Secondary scheduler -# Dir: secondary_scheduler -# Distros: openshift-enterprise -# Topics: -# - Name: Secondary scheduler overview -# File: index -# - Name: Secondary Scheduler Operator release notes -# File: nodes-secondary-scheduler-release-notes -# - Name: Scheduling pods using a secondary scheduler -# File: nodes-secondary-scheduler-configuring -# - Name: Uninstalling the Secondary Scheduler Operator -# File: nodes-secondary-scheduler-uninstalling -- Name: Using jobs and daemon sets - Dir: jobs - Topics: - - Name: Running background tasks on nodes automatically with daemon sets - File: nodes-pods-daemonsets - Distros: openshift-rosa - - Name: Running tasks in pods using jobs - File: nodes-nodes-jobs -- Name: Working with nodes - Dir: nodes - Distros: openshift-rosa - Topics: - - Name: Viewing and listing the nodes in your cluster - File: nodes-nodes-viewing - - Name: Working with nodes - File: nodes-nodes-working -# cannot use oc adm cordon; cannot patch resource "machinesets"; cannot patch resource "nodes" -# - Name: Working with nodes -# File: nodes-nodes-working -# cannot create resource "kubeletconfigs", "schedulers", "machineconfigs", "kubeletconfigs" -# - Name: Managing nodes -# File: nodes-nodes-managing -# cannot create resource "kubeletconfigs" -# - Name: Managing graceful node shutdown -# File: nodes-nodes-graceful-shutdown -# cannot create resource "kubeletconfigs" -# - Name: Managing the maximum number of pods per node -# File: nodes-nodes-managing-max-pods - - Name: Using the Node Tuning Operator - File: nodes-node-tuning-operator -# - Name: Remediating, fencing, and maintaining nodes -# File: nodes-remediating-fencing-maintaining-rhwa -# Cannot create namespace needed to oc debug and reboot; revisit after Operator book converted -# - Name: Understanding node rebooting -# File: nodes-nodes-rebooting -# cannot create resource "kubeletconfigs" -# - Name: Freeing node resources using garbage collection -# File: nodes-nodes-garbage-collection -# cannot create resource "kubeletconfigs" -# - Name: Allocating resources for nodes -# File: nodes-nodes-resources-configuring -# cannot create resource "kubeletconfigs" -# - Name: Allocating specific CPUs for nodes in a cluster -# File: nodes-nodes-resources-cpus -# cannot create resource "kubeletconfigs" -# - Name: Configuring the TLS security profile for the kubelet -# File: nodes-nodes-tls -# Distros: openshift-rosa -# - Name: Monitoring for problems in your nodes -# File: nodes-nodes-problem-detector -# cannot patch resource "nodes" -# - Name: Creating infrastructure nodes -# File: nodes-nodes-creating-infrastructure-nodes -- Name: Working with containers - Dir: containers - Topics: - - Name: Understanding containers - File: nodes-containers-using - - Name: Using Init Containers to perform tasks before a pod is deployed - File: nodes-containers-init - Distros: openshift-rosa - - Name: Using volumes to persist container data - File: nodes-containers-volumes - - Name: Mapping volumes using projected volumes - File: nodes-containers-projected-volumes - - Name: Allowing containers to consume API objects - File: nodes-containers-downward-api - - Name: Copying files to or from a container - File: nodes-containers-copying-files - - Name: Executing remote commands in a container - File: nodes-containers-remote-commands - - Name: Using port forwarding to access applications in a container - File: nodes-containers-port-forwarding -# cannot patch resource "configmaps" -# - Name: Using sysctls in containers -# File: nodes-containers-sysctls -- Name: Working with clusters - Dir: clusters - Topics: - - Name: Viewing system event information in a cluster - File: nodes-containers-events - - Name: Analyzing cluster resource levels - File: nodes-cluster-resource-levels - Distros: openshift-rosa - - Name: Setting limit ranges - File: nodes-cluster-limit-ranges - - Name: Configuring cluster memory to meet container memory and risk requirements - File: nodes-cluster-resource-configure - Distros: openshift-rosa -# TODO: Remove the whole nodes-cluster-overcommit file for OSDOCS-10853? - - Name: Configuring your cluster to place pods on overcommited nodes - File: nodes-cluster-overcommit - Distros: openshift-rosa - - Name: Configuring the Linux cgroup version on your nodes - File: nodes-cluster-cgroups-2 - Distros: openshift-enterprise - - Name: Configuring the Linux cgroup version on your nodes - File: nodes-cluster-cgroups-okd - Distros: openshift-origin -# The TechPreviewNoUpgrade Feature Gate is not allowed -# - Name: Enabling features using FeatureGates -# File: nodes-cluster-enabling-features -# Distros: openshift-rosa -# Error: nodes.config.openshift.io "cluster" could not be patched -# - Name: Improving cluster stability in high latency environments using worker latency profiles -# File: nodes-cluster-worker-latency-profiles -# Not supported per Michael McNeill -#- Name: Remote worker nodes on the network edge -# Dir: edge -# Topics: -# - Name: Using remote worker node at the network edge -# File: nodes-edge-remote-workers -# Not supported per Michael McNeill -#- Name: Worker nodes for single-node OpenShift clusters -# Dir: nodes -# Distros: openshift-rosa -# Topics: -# - Name: Adding worker nodes to single-node OpenShift clusters -# File: nodes-sno-worker-nodes ---- -Name: Observability -Dir: observability -Distros: openshift-rosa -Topics: -- Name: Observability overview - Dir: overview - Topics: - - Name: About Observability - File: index -- Name: Monitoring - Dir: monitoring - Distros: openshift-rosa - Topics: - - Name: Monitoring overview - File: monitoring-overview - - Name: Accessing monitoring for user-defined projects - File: sd-accessing-monitoring-for-user-defined-projects - - Name: Configuring the monitoring stack - File: configuring-the-monitoring-stack - - Name: Disabling monitoring for user-defined projects - File: sd-disabling-monitoring-for-user-defined-projects - - Name: Enabling alert routing for user-defined projects - File: enabling-alert-routing-for-user-defined-projects - - Name: Managing metrics - File: managing-metrics - - Name: Managing alerts - File: managing-alerts - - Name: Reviewing monitoring dashboards - File: reviewing-monitoring-dashboards - - Name: Accessing third-party monitoring APIs - File: accessing-third-party-monitoring-apis - - Name: Troubleshooting monitoring issues - File: troubleshooting-monitoring-issues - - Name: Config map reference for the Cluster Monitoring Operator - File: config-map-reference-for-the-cluster-monitoring-operator -- Name: Logging - Dir: logging - Distros: openshift-rosa - Topics: - - Name: Release notes - Dir: logging_release_notes - Topics: - - Name: Logging 5.9 - File: logging-5-9-release-notes - - Name: Logging 5.8 - File: logging-5-8-release-notes - - Name: Logging 5.7 - File: logging-5-7-release-notes - - Name: Support - File: cluster-logging-support - - Name: Troubleshooting logging - Dir: troubleshooting - Topics: - - Name: Viewing Logging status - File: cluster-logging-cluster-status - - Name: Troubleshooting log forwarding - File: log-forwarding-troubleshooting - - Name: Troubleshooting logging alerts - File: troubleshooting-logging-alerts - - Name: Viewing the status of the Elasticsearch log store - File: cluster-logging-log-store-status - - Name: About Logging - File: cluster-logging - - Name: Installing Logging - File: cluster-logging-deploying - - Name: Updating Logging - File: cluster-logging-upgrading - - Name: Visualizing logs - Dir: log_visualization - Topics: - - Name: About log visualization - File: log-visualization - - Name: Log visualization with the web console - File: log-visualization-ocp-console - - Name: Viewing cluster dashboards - File: cluster-logging-dashboards - - Name: Log visualization with Kibana - File: logging-kibana - - Name: Configuring your Logging deployment - Dir: config - Topics: - - Name: Configuring CPU and memory limits for Logging components - File: cluster-logging-memory - #- Name: Configuring systemd-journald and Fluentd - # File: cluster-logging-systemd - - Name: Log collection and forwarding - Dir: log_collection_forwarding - Topics: - - Name: About log collection and forwarding - File: log-forwarding - - Name: Log output types - File: logging-output-types - - Name: Enabling JSON log forwarding - File: cluster-logging-enabling-json-logging - - Name: Configuring log forwarding - File: configuring-log-forwarding - - Name: Configuring the logging collector - File: cluster-logging-collector - - Name: Collecting and storing Kubernetes events - File: cluster-logging-eventrouter - - Name: Log storage - Dir: log_storage - Topics: - - Name: About log storage - File: about-log-storage - - Name: Installing log storage - File: installing-log-storage - - Name: Configuring the LokiStack log store - File: cluster-logging-loki - - Name: Configuring the Elasticsearch log store - File: logging-config-es-store - - Name: Logging alerts - Dir: logging_alerts - Topics: - - Name: Default logging alerts - File: default-logging-alerts - - Name: Custom logging alerts - File: custom-logging-alerts - - Name: Performance and reliability tuning - Dir: performance_reliability - Topics: - - Name: Flow control mechanisms - File: logging-flow-control-mechanisms - - Name: Filtering logs by content - File: logging-content-filtering - - Name: Filtering logs by metadata - File: logging-input-spec-filtering - - Name: Scheduling resources - Dir: scheduling_resources - Topics: - - Name: Using node selectors to move logging resources - File: logging-node-selectors - - Name: Using tolerations to control logging pod placement - File: logging-taints-tolerations - - Name: Uninstalling Logging - File: cluster-logging-uninstall - - Name: Exported fields - File: cluster-logging-exported-fields - - Name: API reference - Dir: api_reference - Topics: - # - Name: 5.8 Logging API reference - # File: logging-5-8-reference - # - Name: 5.7 Logging API reference - # File: logging-5-7-reference - - Name: 5.6 Logging API reference - File: logging-5-6-reference - - Name: Glossary - File: logging-common-terms ---- -Name: Service Mesh -Dir: service_mesh -Distros: openshift-rosa -Topics: -# Tech Preview -# - Name: Service Mesh 3.x -# Dir: v3x -# Topics: -# - Name: OpenShift Service Mesh 3.0 TP1 overview -# File: ossm-service-mesh-3-0-overview -- Name: Service Mesh 2.x - Dir: v2x - Topics: - - Name: About OpenShift Service Mesh - File: ossm-about - - Name: Service Mesh 2.x release notes - File: servicemesh-release-notes - - Name: Service Mesh architecture - File: ossm-architecture - - Name: Service Mesh deployment models - File: ossm-deployment-models - - Name: Service Mesh and Istio differences - File: ossm-vs-community - - Name: Preparing to install Service Mesh - File: preparing-ossm-installation - - Name: Installing the Operators - File: installing-ossm - - Name: Creating the ServiceMeshControlPlane - File: ossm-create-smcp - - Name: Adding workloads to a service mesh - File: ossm-create-mesh - - Name: Enabling sidecar injection - File: prepare-to-deploy-applications-ossm - - Name: Upgrading Service Mesh - File: upgrading-ossm - - Name: Managing users and profiles - File: ossm-profiles-users - - Name: Security - File: ossm-security - - Name: Traffic management - File: ossm-traffic-manage - - Name: Metrics, logs, and traces - File: ossm-observability - - Name: Performance and scalability - File: ossm-performance-scalability - - Name: Deploying to production - File: ossm-deploy-production - - Name: Federation - File: ossm-federation - - Name: Extensions - File: ossm-extensions - - Name: 3scale WebAssembly for 2.1 - File: ossm-threescale-webassembly-module - - Name: 3scale Istio adapter for 2.0 - File: threescale-adapter - - Name: Troubleshooting Service Mesh - File: ossm-troubleshooting-istio - - Name: Control plane configuration reference - File: ossm-reference-smcp - - Name: Kiali configuration reference - File: ossm-reference-kiali - - Name: Jaeger configuration reference - File: ossm-reference-jaeger - - Name: Uninstalling Service Mesh - File: removing-ossm -# Service Mesh 1.x is tech preview -# - Name: Service Mesh 1.x -# Dir: v1x -# Topics: -# - Name: Service Mesh 1.x release notes -# File: servicemesh-release-notes -# - Name: Service Mesh architecture -# File: ossm-architecture -# - Name: Service Mesh and Istio differences -# File: ossm-vs-community -# - Name: Preparing to install Service Mesh -# File: preparing-ossm-installation -# - Name: Installing Service Mesh -# File: installing-ossm -# - Name: Security -# File: ossm-security -# - Name: Traffic management -# File: ossm-traffic-manage -# - Name: Deploying applications on Service Mesh -# File: prepare-to-deploy-applications-ossm -# - Name: Data visualization and observability -# File: ossm-observability -# - Name: Custom resources -# File: ossm-custom-resources -# - Name: 3scale Istio adapter for 1.x -# File: threescale-adapter -# - Name: Removing Service Mesh -# File: removing-ossm ---- -Name: Serverless -Dir: serverless -Distros: openshift-rosa -Topics: -- Name: About Serverless - Dir: about - Topics: - - Name: Serverless overview - File: about-serverless ---- -Name: Virtualization -Dir: virt -Distros: openshift-rosa -Topics: -- Name: About - Dir: about_virt - Topics: - - Name: About OpenShift Virtualization - File: about-virt - Distros: openshift-rosa - - Name: About OKD Virtualization - File: about-virt - Distros: openshift-origin - - Name: Security policies - File: virt-security-policies - - Name: Architecture - File: virt-architecture - Distros: openshift-rosa -#- Name: Release notes -# Dir: release_notes -# Topics: -# - Name: OpenShift Virtualization release notes -# File: virt-release-notes-placeholder -# Distros: openshift-rosa -- Name: Getting started - Dir: getting_started - Topics: - - Name: Getting started with OpenShift Virtualization - File: virt-getting-started - Distros: openshift-rosa - - Name: Getting started with OKD Virtualization - File: virt-getting-started - Distros: openshift-origin - - Name: virtctl and libguestfs - File: virt-using-the-cli-tools - Distros: openshift-rosa -- Name: Installing - Dir: install - Topics: - - Name: Preparing your cluster - File: preparing-cluster-for-virt - - Name: Installing OpenShift Virtualization - File: installing-virt - - Name: Uninstalling OpenShift Virtualization - File: uninstalling-virt -- Name: Post-installation configuration - Dir: post_installation_configuration - Topics: - - Name: Post-installation configuration - File: virt-post-install-config - - Name: Node placement rules - File: virt-node-placement-virt-components - - Name: Network configuration - File: virt-post-install-network-config - - Name: Storage configuration - File: virt-post-install-storage-config - - Name: Configuring certificate rotation - File: virt-configuring-certificate-rotation -- Name: Updating - Dir: updating - Topics: - - Name: Updating OpenShift Virtualization - File: upgrading-virt - Distros: openshift-rosa -- Name: Creating a virtual machine - Dir: creating_vm - Topics: -# - Name: Overview -# File: virt-basic-vm-overview -# - Name: Setting up your environment -# File: virt-setting-up-environment - - Name: Creating VMs from instance types - File: virt-creating-vms-from-instance-types - - Name: Creating VMs from templates - File: virt-creating-vms-from-templates -- Name: Advanced VM creation - Dir: creating_vms_advanced - Topics: -# - Name: Overview -# File: virt-advanced-vm-overview - - Name: Creating VMs in the web console - Dir: creating_vms_advanced_web - Topics: - - Name: Creating VMs from Red Hat images - File: virt-creating-vms-from-rh-images-overview - - Name: Creating VMs by importing images from web pages - File: virt-creating-vms-from-web-images - - Name: Creating VMs by uploading images - File: virt-creating-vms-uploading-images - - Name: Cloning VMs - File: virt-cloning-vms - - Name: Creating VMs using the CLI - Dir: creating_vms_cli - Topics: - - Name: Creating virtual machines from the command line - File: virt-creating-vms-from-cli - - Name: Creating VMs by using container disks - File: virt-creating-vms-from-container-disks - - Name: Creating VMs by cloning PVCs - File: virt-creating-vms-by-cloning-pvcs -- Name: Managing VMs - Dir: managing_vms - Topics: - - Name: Installing the QEMU guest agent and VirtIO drivers - File: virt-installing-qemu-guest-agent - - Name: Connecting to VM consoles - File: virt-accessing-vm-consoles - - Name: Configuring SSH access to VMs - File: virt-accessing-vm-ssh - - Name: Editing virtual machines - File: virt-edit-vms - - Name: Editing boot order - File: virt-edit-boot-order - - Name: Deleting virtual machines - File: virt-delete-vms - - Name: Exporting virtual machines - File: virt-exporting-vms - - Name: Managing virtual machine instances - File: virt-manage-vmis - - Name: Controlling virtual machine states - File: virt-controlling-vm-states - - Name: Using virtual Trusted Platform Module devices - File: virt-using-vtpm-devices - - Name: Managing virtual machines with OpenShift Pipelines - File: virt-managing-vms-openshift-pipelines - - Name: Advanced virtual machine management - Dir: advanced_vm_management - Topics: - - Name: Working with resource quotas for virtual machines - File: virt-working-with-resource-quotas-for-vms - - Name: Specifying nodes for virtual machines - File: virt-specifying-nodes-for-vms - - Name: Configuring the default CPU model - File: virt-configuring-default-cpu-model - - Name: UEFI mode for virtual machines - File: virt-uefi-mode-for-vms - - Name: Configuring PXE booting for virtual machines - File: virt-configuring-pxe-booting -# Huge pages not supported in ROSA -# - Name: Using huge pages with virtual machines -# File: virt-using-huge-pages-with-vms -# CPU Manager not supported in ROSA -# - Name: Enabling dedicated resources for a virtual machine -# File: virt-dedicated-resources-vm - - Name: Scheduling virtual machines - File: virt-schedule-vms -# Cannot create required machine config in ROSA as required -# - Name: Configuring PCI passthrough -# File: virt-configuring-pci-passthrough -# Cannot create required machine config in ROSA as required -# - Name: Configuring virtual GPUs -# File: virt-configuring-virtual-gpus -# Feature is TP, thus not supported in ROSA -# - Name: Enabling descheduler evictions on virtual machines -# File: virt-enabling-descheduler-evictions - - Name: About high availability for virtual machines - File: virt-high-availability-for-vms - - Name: Control plane tuning - File: virt-vm-control-plane-tuning -# Need to review following are supported: -# - Name: Assigning compute resources -# File: virt-assigning-compute-resources -# - Name: About multi-queue functionality -# File: virt-about-multi-queue - - Name: VM disks - Dir: virtual_disks - Topics: - - Name: Hot-plugging VM disks - File: virt-hot-plugging-virtual-disks - - Name: Expanding VM disks - File: virt-expanding-vm-disks -# Need to check if supported: -# - Name: Configuring shared volumes -# File: virt-configuring-shared-volumes-for-vms - - Name: Migrating VM disks to a different storage class - File: virt-migrating-storage-class -- Name: Networking - Dir: vm_networking - Topics: - - Name: Networking configuration overview - File: virt-networking-overview - - Name: Connecting a VM to the default pod network - File: virt-connecting-vm-to-default-pod-network - - Name: Connecting a VM to a primary user-defined network - File: virt-connecting-vm-to-primary-udn - - Name: Exposing a VM by using a service - File: virt-exposing-vm-with-service -# Not supported in ROSA/OSD -# - Name: Connecting a VM to a Linux bridge network -# File: virt-connecting-vm-to-linux-bridge -# - Name: Connecting a VM to an SR-IOV network -# File: virt-connecting-vm-to-sriov -# - Name: Using DPDK with SR-IOV -# File: virt-using-dpdk-with-sriov - - Name: Connecting a VM to an OVN-Kubernetes secondary network - File: virt-connecting-vm-to-ovn-secondary-network - - Name: Hot plugging secondary network interfaces - File: virt-hot-plugging-network-interfaces - - Name: Connecting a VM to a service mesh - File: virt-connecting-vm-to-service-mesh - - Name: Configuring a dedicated network for live migration - File: virt-dedicated-network-live-migration - - Name: Configuring and viewing IP addresses - File: virt-configuring-viewing-ips-for-vms -# Tech Preview features not supported in ROSA/OSD -# - Name: Accessing a VM by using the cluster FQDN -# File: virt-accessing-vm-secondary-network-fqdn - - Name: Managing MAC address pools for network interfaces - File: virt-using-mac-address-pool-for-vms -- Name: Storage - Dir: storage - Topics: - - Name: Storage configuration overview - File: virt-storage-config-overview - - Name: Configuring storage profiles - File: virt-configuring-storage-profile - - Name: Managing automatic boot source updates - File: virt-automatic-bootsource-updates - - Name: Reserving PVC space for file system overhead - File: virt-reserving-pvc-space-fs-overhead - - Name: Configuring local storage by using HPP - File: virt-configuring-local-storage-with-hpp - - Name: Enabling user permissions to clone data volumes across namespaces - File: virt-enabling-user-permissions-to-clone-datavolumes - - Name: Configuring CDI to override CPU and memory quotas - File: virt-configuring-cdi-for-namespace-resourcequota - - Name: Preparing CDI scratch space - File: virt-preparing-cdi-scratch-space - - Name: Using preallocation for data volumes - File: virt-using-preallocation-for-datavolumes - - Name: Managing data volume annotations - File: virt-managing-data-volume-annotations -# Virtual machine live migration -- Name: Live migration - Dir: live_migration - Topics: - - Name: About live migration - File: virt-about-live-migration - - Name: Configuring live migration - File: virt-configuring-live-migration - - Name: Initiating and canceling live migration - File: virt-initiating-live-migration -# Node maintenance mode -- Name: Nodes - Dir: nodes - Topics: - - Name: Node maintenance - File: virt-node-maintenance - - Name: Managing node labeling for obsolete CPU models - File: virt-managing-node-labeling-obsolete-cpu-models - - Name: Preventing node reconciliation - File: virt-preventing-node-reconciliation -# Hiding in ROSA as user cannot cordon and drain nodes -# - Name: Deleting a failed node to trigger VM failover -# File: virt-triggering-vm-failover-resolving-failed-node -- Name: Monitoring - Dir: monitoring - Topics: - - Name: Monitoring overview - File: virt-monitoring-overview -# Hiding in ROSA/OSD as TP not supported -# - Name: Cluster checkup framework -# File: virt-running-cluster-checkups - - Name: Prometheus queries for virtual resources - File: virt-prometheus-queries - - Name: Virtual machine custom metrics - File: virt-exposing-custom-metrics-for-vms - - Name: Virtual machine health checks - File: virt-monitoring-vm-health - - Name: Runbooks - File: virt-runbooks -- Name: Support - Dir: support - Topics: - - Name: Support overview - File: virt-support-overview - - Name: Collecting data for Red Hat Support - File: virt-collecting-virt-data - Distros: openshift-rosa - - Name: Troubleshooting - File: virt-troubleshooting -- Name: Backup and restore - Dir: backup_restore - Topics: - - Name: Backup and restore by using VM snapshots - File: virt-backup-restore-snapshots - - Name: Backing up and restoring virtual machines - File: virt-backup-restore-overview -# - Name: Removed topics (Placeholder for topics removed from topic map) -# Dir: Removed_topics -# Topics: -# - Name: Collecting OKD Virtualization data for community report -# File: virt-collecting-virt-data -# - Name: Preparing to upgrade ROSA to 4.9 -# File: rosa-upgrading-cluster-prepare -# - Name: Upgrading ROSA Classic clusters -# File: rosa-upgrading diff --git a/_topic_maps/_topic_map_rosa_hcp.yml b/_topic_maps/_topic_map_rosa_hcp.yml deleted file mode 100644 index ea71a7a5f248..000000000000 --- a/_topic_maps/_topic_map_rosa_hcp.yml +++ /dev/null @@ -1,1685 +0,0 @@ -# This configuration file dictates the organization of the topic groups and -# topics on the main page of the doc site for this branch. Each record -# consists of the following: -# -# --- <= Record delimiter -# Name: Origin of the Species <= Display name of topic group -# Dir: origin_of_the_species <= Directory name of topic group -# Topics: -# - Name: The Majestic Marmoset <= Topic name -# File: the_majestic_marmoset <= Topic file under group dir +/- -# - Name: The Curious Crocodile <= Topic 2 name -# File: the_curious_crocodile <= Topic 2 file -# - Name: The Numerous Nematodes <= Sub-topic group name -# Dir: the_numerous_nematodes <= Sub-topic group dir -# Topics: -# - Name: The Wily Worm <= Sub-topic name -# File: the_wily_worm <= Sub-topic file under / -# - Name: The Acrobatic Ascarid <= Sub-topic 2 name -# File: the_acrobatic_ascarid <= Sub-topic 2 file under / -# -# The ordering of the records in this document determines the ordering of the -# topic groups and topics on the main page. ---- -Name: What's new -Dir: rosa_release_notes -Distros: openshift-rosa-hcp -Topics: -- Name: What's new with Red Hat OpenShift Service on AWS - File: rosa-release-notes ---- -Name: Introduction to ROSA -Dir: rosa_architecture -Distros: openshift-rosa-hcp -Topics: -- Name: Welcome - File: index -- Name: Legal notice - File: legal-notice -- Name: ROSA with HCP overview - File: about-hcp -- Name: AWS STS and ROSA with HCP explained - File: cloud-experts-rosa-hcp-sts-explained -- Name: Architecture models - File: rosa-architecture-models -- Name: Policies and service definition - Dir: rosa_policy_service_definition - Distros: openshift-rosa-hcp - Topics: - - Name: About availability for ROSA - File: rosa-policy-understand-availability - - Name: Overview of responsibilities for ROSA - File: rosa-policy-responsibility-matrix - - Name: ROSA with HCP service definition - File: rosa-hcp-service-definition - - Name: ROSA with HCP instance types - File: rosa-hcp-instance-types - - Name: ROSA with HCP update life cycle - File: rosa-hcp-life-cycle - - Name: SRE and service account access - File: rosa-sre-access - - Name: Understanding security for ROSA - File: rosa-policy-process-security -#Temporarily included the following to keep working through xref errors -- Name: About IAM resources - File: rosa-sts-about-iam-resources - Distros: openshift-rosa-hcp ---- -Name: Learning about ROSA -Dir: rosa_learning -Distros: openshift-rosa-hcp -Topics: -- Name: Creating a cluster workshop - Dir: creating_cluster_workshop - Distros: openshift-rosa-hcp - Topics: - - Name: Deploying a cluster - File: learning-getting-started-hcp-for-hcp - - Name: Creating an admin user - File: learning-getting-started-admin - - Name: Setting up an identity provider - File: learning-getting-started-idp - - Name: Granting admin rights - File: learning-getting-started-admin-rights - - Name: Accessing your cluster - File: learning-getting-started-accessing - - Name: Managing worker nodes - File: learning-getting-started-managing-worker-nodes - - Name: Autoscaling - File: learning-getting-started-autoscaling - - Name: Upgrading your cluster - File: learning-getting-started-upgrading - - Name: Deleting your cluster - File: learning-getting-started-deleting - - Name: Obtaining support - File: learning-getting-started-support -- Name: Deploying an application workshop - Dir: deploying_application_workshop - Topics: - - Name: Workshop overview - File: learning-lab-overview - - Name: Deployment - File: learning-deploying-application-deployment - - Name: Health Check - File: learning-deploying-application-health-check - - Name: Storage - File: learning-deploying-application-storage - - Name: ConfigMap, secrets, and environment variables - File: learning-deploying-configmaps-secrets-env-var - - Name: Networking - File: learning-deploying-application-networking - - Name: Scaling an application - File: learning-deploying-application-scaling - - Name: S2i deployments - File: learning-deploying-application-s2i-deployments - - Name: Using Source-to-Image (S2I) webhooks for automated deployment - File: learning-deploying-s2i-webhook-cicd ---- -Name: Tutorials -Dir: cloud_experts_tutorials -Distros: openshift-rosa-hcp -Topics: -- Name: Tutorials overview - File: index -- Name: ROSA with HCP activation and account linking - File: cloud-experts-rosa-hcp-activation-and-account-linking-tutorial -- Name: ROSA with HCP private offer acceptance and sharing - File: cloud-experts-rosa-with-hcp-private-offer-acceptance-and-sharing -- Name: Deploying ROSA with a Custom DNS Resolver - File: cloud-experts-custom-dns-resolver -- Name: Using AWS WAF and Amazon CloudFront to protect ROSA workloads - File: cloud-experts-using-cloudfront-and-waf -- Name: Using AWS WAF and AWS ALBs to protect ROSA workloads - File: cloud-experts-using-alb-and-waf -- Name: Deploying OpenShift API for Data Protection on a ROSA cluster - File: cloud-experts-deploy-api-data-protection -- Name: AWS Load Balancer Operator on ROSA - File: cloud-experts-aws-load-balancer-operator -- Name: Configuring Microsoft Entra ID (formerly Azure Active Directory) as an identity provider - File: cloud-experts-entra-id-idp -- Name: Using AWS Secrets Manager CSI on ROSA with STS - File: cloud-experts-aws-secret-manager -- Name: Using AWS Controllers for Kubernetes on ROSA - File: cloud-experts-using-aws-ack -- Name: Dynamically issuing certificates using the cert-manager Operator on ROSA - File: cloud-experts-dynamic-certificate-custom-domain -- Name: Assigning consistent egress IP for external traffic - File: cloud-experts-consistent-egress-ip -# --- -# Name: Getting started -# Dir: rosa_getting_started -# Distros: openshift-rosa-hcp -# Topics: -# - Name: ROSA quickstart guide -# File: rosa-quickstart-guide-ui -# - Name: Comprehensive guide to getting started with ROSA -# File: rosa-getting-started -# - Name: Understanding the ROSA with STS deployment workflow -# File: rosa-sts-getting-started-workflow ---- -Name: Prepare your environment -Dir: rosa_planning -Distros: openshift-rosa-hcp -Topics: -- Name: Prerequisites checklist for deploying ROSA with HCP - File: rosa-cloud-expert-prereq-checklist -- Name: Detailed requirements for deploying ROSA with HCP - File: rosa-sts-aws-prereqs -- Name: Required IAM roles and resources - File: rosa-hcp-prepare-iam-roles-resources -- Name: ROSA with HCP limits and scalability - File: rosa-hcp-limits-scalability -- Name: Required AWS service quotas - File: rosa-sts-required-aws-service-quotas -- Name: Setting up your environment - File: rosa-sts-setting-up-environment -- Name: Planning resource usage in your cluster - File: rosa-planning-environment -# - Name: Preparing Terraform to install ROSA clusters -# File: rosa-understanding-terraform ---- -Name: Install ROSA with HCP clusters -Dir: rosa_hcp -Distros: openshift-rosa-hcp -Topics: -- Name: ROSA with HCP quick start guide - File: rosa-hcp-quickstart-guide -- Name: Creating ROSA with HCP clusters using the default options - File: rosa-hcp-sts-creating-a-cluster-quickly -- Name: Creating ROSA with HCP clusters using a custom AWS KMS encryption key - File: rosa-hcp-creating-cluster-with-aws-kms-key -- Name: Creating a private cluster on ROSA with HCP - File: rosa-hcp-aws-private-creating-cluster -- Name: Creating a ROSA with HCP cluster with egress lockdown - File: rosa-hcp-egress-lockdown-install -- Name: Creating ROSA with HCP clusters with external authentication - File: rosa-hcp-sts-creating-a-cluster-ext-auth ---- -Name: Web console -Dir: web_console -Distros: openshift-rosa-hcp -Topics: -- Name: Web console overview - File: web-console-overview -- Name: Accessing the web console - File: web-console -- Name: Viewing cluster information - File: using-dashboard-to-get-cluster-information -- Name: Adding user preferences - File: adding-user-preferences -- Name: Dynamic plugins - Dir: dynamic-plugin - Distros: openshift-rosa-hcp - Topics: - - Name: Overview of dynamic plugins - File: overview-dynamic-plugin - - Name: Getting started with dynamic plugins - File: dynamic-plugins-get-started - - Name: Deploy your plugin on a cluster - File: deploy-plugin-cluster - - Name: Dynamic plugin example - File: dynamic-plugin-example - - Name: Dynamic plugin reference - File: dynamic-plugins-reference -- Name: Web terminal - Dir: web_terminal - Distros: openshift-rosa-hcp - Topics: - - Name: Installing the web terminal - File: installing-web-terminal - - Name: Using the web terminal - File: odc-using-web-terminal - - Name: Troubleshooting the web terminal - File: troubleshooting-web-terminal - - Name: Uninstalling the web terminal - File: uninstalling-web-terminal -- Name: About quick start tutorials - File: creating-quick-start-tutorials - Distros: openshift-rosa-hcp ---- -Name: CLI tools -Dir: cli_reference -Distros: openshift-rosa-hcp -Topics: -- Name: CLI tools overview - File: index -- Name: OpenShift CLI (oc) - Dir: openshift_cli - Topics: - - Name: Getting started with the OpenShift CLI - File: getting-started-cli - - Name: Configuring the OpenShift CLI - File: configuring-cli - - Name: Usage of oc and kubectl commands - File: usage-oc-kubectl - - Name: Managing CLI profiles - File: managing-cli-profiles - - Name: Extending the OpenShift CLI with plugins - File: extending-cli-plugins - # - Name: Managing CLI plugins with Krew - # File: managing-cli-plugins-krew - # Distros: openshift-rosa-hcp - - Name: OpenShift CLI developer command reference - File: developer-cli-commands - - Name: OpenShift CLI administrator command reference - File: administrator-cli-commands - Distros: openshift-rosa-hcp -- Name: Developer CLI (odo) - File: odo-important-update - # Dir: developer_cli_odo - Distros: openshift-rosa-hcp - # Topics: - # - Name: odo release notes - # File: odo-release-notes - # - Name: Understanding odo - # File: understanding-odo - # - Name: Installing odo - # File: installing-odo - # - Name: Configuring the odo CLI - # File: configuring-the-odo-cli - # - Name: odo CLI reference - # File: odo-cli-reference -- Name: Knative CLI (kn) for use with OpenShift Serverless - File: kn-cli-tools - Distros: openshift-rosa-hcp -- Name: Pipelines CLI (tkn) - Dir: tkn_cli - Distros: openshift-rosa-hcp - Topics: - - Name: Installing tkn - File: installing-tkn - - Name: Configuring tkn - File: op-configuring-tkn - - Name: Basic tkn commands - File: op-tkn-reference -- Name: opm CLI - Dir: opm - Distros: openshift-rosa-hcp - Topics: - - Name: Installing the opm CLI - File: cli-opm-install - - Name: opm CLI reference - File: cli-opm-ref -- Name: Operator SDK - Dir: osdk - Distros: openshift-rosa-hcp - Topics: - - Name: Installing the Operator SDK CLI - File: cli-osdk-install - - Name: Operator SDK CLI reference - File: cli-osdk-ref -- Name: ROSA CLI - Dir: rosa_cli - Distros: openshift-rosa-hcp - Topics: - # - Name: CLI and web console - # File: rosa-cli-openshift-console - - Name: Getting started with the ROSA CLI - File: rosa-get-started-cli - - Name: Managing objects with the ROSA CLI - File: rosa-manage-objects-cli - - Name: Checking account and version information with the ROSA CLI - File: rosa-checking-acct-version-cli - - Name: Checking logs with the ROSA CLI - File: rosa-checking-logs-cli - - Name: Least privilege permissions for ROSA CLI commands - File: rosa-cli-permission-examples - - Name: Managing AWS billing accounts with the ROSA CLI - File: rosa-updating-billing-account-cli ---- -Name: Red Hat OpenShift Cluster Manager -Dir: ocm -Distros: openshift-rosa-hcp -Topics: -- Name: Red Hat OpenShift Cluster Manager - File: ocm-overview -# - Name: Red Hat OpenShift Cluster Manager -# File: ocm-overview -# - Name: Using the OpenShift web console -# File: rosa-using-openshift-console -# OSDOCS-11789: Adding the minimum chapters of support and troubleshooting -# docs needed to ensure that xrefs in "Planning your environment" work; -# omit as required by further HCP migration work. ---- -Name: Support -Dir: support -Distros: openshift-rosa-hcp -Topics: -- Name: Support overview - File: index -- Name: Managing your cluster resources - File: managing-cluster-resources -- Name: Approved Access - File: approved-access -- Name: Getting support - File: getting-support -- Name: Remote health monitoring with connected clusters - Dir: remote_health_monitoring - Topics: - - Name: About remote health monitoring - File: about-remote-health-monitoring - - Name: Showing data collected by remote health monitoring - File: showing-data-collected-by-remote-health-monitoring -# cannot get resource "secrets" in API group "" in the namespace "openshift-config" -# - Name: Opting out of remote health reporting -# File: opting-out-of-remote-health-reporting -# cannot get resource "secrets" in API group "" in the namespace "openshift-config" -# - Name: Enabling remote health reporting -# File: enabling-remote-health-reporting - - Name: Using Insights to identify issues with your cluster - File: using-insights-to-identify-issues-with-your-cluster - - Name: Using Insights Operator - File: using-insights-operator -# Not supported per Michael McNeill -# - Name: Using remote health reporting in a restricted network -# File: remote-health-reporting-from-restricted-network -# cannot list resource "secrets" in API group "" in the namespace "openshift-config" -# - Name: Importing simple content access entitlements with Insights Operator -# File: insights-operator-simple-access -- Name: Gathering data about your cluster - File: gathering-cluster-data -- Name: Summarizing cluster specifications - File: summarizing-cluster-specifications -- Name: Troubleshooting - Dir: troubleshooting - Topics: -# rosa has own troubleshooting installations -# - Name: Troubleshooting installations -# File: troubleshooting-installations - - Name: Troubleshooting ROSA installations - File: rosa-troubleshooting-installations - - Name: Troubleshooting ROSA with HCP installations - File: rosa-troubleshooting-installations-hcp - - Name: Troubleshooting networking - File: rosa-troubleshooting-networking - - Name: Verifying node health - File: verifying-node-health -# cannot create resource "namespaces", cannot patch resource "nodes" -# - Name: Troubleshooting CRI-O container runtime issues -# File: troubleshooting-crio-issues -# requires ostree, butane, and other plug-ins -# - Name: Troubleshooting operating system issues -# File: troubleshooting-operating-system-issues -# Distros: openshift-rosa -# cannot patch resource "nodes", "nodes/proxy", "namespaces" -# - Name: Troubleshooting network issues -# File: troubleshooting-network-issues -# Distros: openshift-rosa - - Name: Troubleshooting Operator issues - File: troubleshooting-operator-issues - - Name: Investigating pod issues - File: investigating-pod-issues - - Name: Troubleshooting the Source-to-Image process - File: troubleshooting-s2i - - Name: Troubleshooting storage issues - File: troubleshooting-storage-issues -# Not supported per WINC team -# - Name: Troubleshooting Windows container workload issues -# File: troubleshooting-windows-container-workload-issues - - Name: Investigating monitoring issues - File: investigating-monitoring-issues - - Name: Diagnosing OpenShift CLI (oc) issues - File: diagnosing-oc-issues - - Name: Troubleshooting expired offline access tokens - File: rosa-troubleshooting-expired-tokens - - Name: Troubleshooting IAM roles - File: rosa-troubleshooting-iam-resources - - Name: Troubleshooting cluster deployments - File: rosa-troubleshooting-deployments - - Name: Red Hat managed resources - File: sd-managed-resources ---- -Name: Cluster administration -Dir: rosa_cluster_admin -Distros: openshift-rosa-hcp -Topics: -- Name: Cluster notifications - File: rosa-cluster-notifications -- Name: Configuring private connections - Dir: cloud_infrastructure_access - Topics: - - Name: Configuring private connections - File: rosa-configuring-private-connections - - Name: Configuring AWS VPC peering - File: dedicated-aws-peering - - Name: Configuring AWS VPN - File: dedicated-aws-vpn - - Name: Configuring AWS Direct Connect - File: dedicated-aws-dc -- Name: Cluster autoscaling - File: rosa-cluster-autoscaling -- Name: Manage nodes using machine pools - Dir: rosa_nodes - Topics: - - Name: About machine pools - File: rosa-nodes-machinepools-about - - Name: Managing compute nodes - File: rosa-managing-worker-nodes -# Local zones not yet implemented in HCP - # - Name: Configuring machine pools in Local Zones - # File: rosa-nodes-machinepools-configuring - - Name: About autoscaling nodes on a cluster - File: rosa-nodes-about-autoscaling-nodes - - Name: Configuring cluster memory to meet container memory and risk requirements - File: nodes-cluster-resource-configure -- Name: Configuring PID limits - File: rosa-configuring-pid-limits -- Name: Managing multi-architecture clusters - File: rosa-multi-arch-cluster-managing ---- -Name: Security and compliance -Dir: security -Distros: openshift-rosa-hcp -Topics: -- Name: Adding additional constraints for IP-based AWS role assumption - File: rosa-adding-additional-constraints-for-ip-based-aws-role-assumption ---- -Name: Authentication and authorization -Dir: authentication -Distros: openshift-rosa-hcp -Topics: -- Name: Authentication and authorization overview - File: index -- Name: Understanding authentication - File: understanding-authentication -# - Name: Configuring the internal OAuth server -# File: configuring-internal-oauth -# - Name: Configuring OAuth clients -# File: configuring-oauth-clients -- Name: Managing user-owned OAuth access tokens - File: managing-oauth-access-tokens -# - Name: Understanding identity provider configuration -# File: understanding-identity-provider -- Name: Configuring identity providers - File: sd-configuring-identity-providers -# - Name: Configuring identity providers -# Dir: identity_providers -# Topics: -# - Name: Configuring an htpasswd identity provider -# File: configuring-htpasswd-identity-provider -# - Name: Configuring a Keystone identity provider -# File: configuring-keystone-identity-provider -# - Name: Configuring an LDAP identity provider -# File: configuring-ldap-identity-provider -# - Name: Configuring a basic authentication identity provider -# File: configuring-basic-authentication-identity-provider -# - Name: Configuring a request header identity provider -# File: configuring-request-header-identity-provider -# - Name: Configuring a GitHub or GitHub Enterprise identity provider -# File: configuring-github-identity-provider -# - Name: Configuring a GitLab identity provider -# File: configuring-gitlab-identity-provider -# - Name: Configuring a Google identity provider -# File: configuring-google-identity-provider -# - Name: Configuring an OpenID Connect identity provider -# File: configuring-oidc-identity-provider -- Name: Using RBAC to define and apply permissions - File: using-rbac -# - Name: Removing the kubeadmin user -# File: remove-kubeadmin -#- Name: Configuring LDAP failover -# File: configuring-ldap-failover -- Name: Understanding and creating service accounts - File: understanding-and-creating-service-accounts -- Name: Using service accounts in applications - File: using-service-accounts-in-applications -- Name: Using a service account as an OAuth client - File: using-service-accounts-as-oauth-client -- Name: Assuming an AWS IAM role for a service account - File: assuming-an-aws-iam-role-for-a-service-account -- Name: Scoping tokens - File: tokens-scoping -- Name: Using bound service account tokens - File: bound-service-account-tokens -- Name: Managing security context constraints - File: managing-security-context-constraints -- Name: Understanding and managing pod security admission - File: understanding-and-managing-pod-security-admission -# - Name: Impersonating the system:admin user -# File: impersonating-system-admin -- Name: Syncing LDAP groups - File: ldap-syncing -# - Name: Managing cloud provider credentials -# Dir: managing_cloud_provider_credentials -# Topics: -# - Name: About the Cloud Credential Operator -# File: about-cloud-credential-operator -# - Name: Mint mode -# File: cco-mode-mint -# - Name: Passthrough mode -# File: cco-mode-passthrough -# - Name: Manual mode with long-term credentials for components -# File: cco-mode-manual -# - Name: Manual mode with short-term credentials for components -# File: cco-short-term-creds ---- -Name: Upgrading -Dir: upgrading -Distros: openshift-rosa-hcp -Topics: -- Name: Upgrading ROSA with HCP - File: rosa-hcp-upgrading ---- -Name: CI/CD -Dir: cicd -Distros: openshift-rosa-hcp -Topics: -- Name: CI/CD overview - Dir: overview - Topics: - - Name: About CI/CD - File: index -- Name: Builds using Shipwright - Dir: builds_using_shipwright - Topics: - - Name: Overview of Builds - File: overview-openshift-builds -- Name: Builds using BuildConfig - Dir: builds - Topics: - - Name: Understanding image builds - File: understanding-image-builds - - Name: Understanding build configurations - File: understanding-buildconfigs - - Name: Creating build inputs - File: creating-build-inputs - - Name: Managing build output - File: managing-build-output - - Name: Using build strategies - File: build-strategies -# - Name: Custom image builds with Buildah -# File: custom-builds-buildah - - Name: Performing and configuring basic builds - File: basic-build-operations - - Name: Triggering and modifying builds - File: triggering-builds-build-hooks - - Name: Performing advanced builds - File: advanced-build-operations - - Name: Using Red Hat subscriptions in builds - File: running-entitled-builds - # Dedicated-admin cannot secure builds by strategy - # - Name: Securing builds by strategy - # File: securing-builds-by-strategy - # Dedicated-admin cannot edit build configuration resources - # - Name: Build configuration resources - # File: build-configuration - - Name: Troubleshooting builds - File: troubleshooting-builds -# - Name: Setting up additional trusted certificate authorities for builds -# File: setting-up-trusted-ca -- Name: Pipelines - Dir: pipelines - Topics: - - Name: About OpenShift Pipelines - File: about-pipelines -- Name: GitOps - Dir: gitops - Topics: - - Name: About OpenShift GitOps - File: about-redhat-openshift-gitops -- Name: Jenkins - Dir: jenkins - Topics: - - Name: Configuring Jenkins images - File: images-other-jenkins - - Name: Jenkins agent - File: images-other-jenkins-agent - - Name: Migrating from Jenkins to OpenShift Pipelines - File: migrating-from-jenkins-to-openshift-pipelines - - Name: Important changes to OpenShift Jenkins images - File: important-changes-to-openshift-jenkins-images ---- -Name: Storage -Dir: storage -Distros: openshift-rosa-hcp -Topics: -- Name: Storage overview - File: index -- Name: Understanding ephemeral storage - File: understanding-ephemeral-storage -- Name: Understanding persistent storage - File: understanding-persistent-storage -- Name: Configuring persistent storage - Dir: persistent_storage - Topics: - - Name: Persistent storage using AWS Elastic Block Store - File: persistent-storage-aws -- Name: Using Container Storage Interface (CSI) - Dir: container_storage_interface - Topics: - - Name: Configuring CSI volumes - File: persistent-storage-csi - - Name: Managing the default storage class - File: persistent-storage-csi-sc-manage - - Name: AWS Elastic Block Store CSI Driver Operator - File: persistent-storage-csi-ebs - - Name: AWS Elastic File Service CSI Driver Operator - File: persistent-storage-csi-aws-efs -- Name: Generic ephemeral volumes - File: generic-ephemeral-vols -- Name: Dynamic provisioning - File: dynamic-provisioning ---- -Name: Registry -Dir: registry -Distros: openshift-rosa-hcp -Topics: -- Name: Registry overview - File: index -- Name: Image Registry Operator in Red Hat OpenShift Service on AWS - File: configuring-registry-operator -- Name: Accessing the registry - File: accessing-the-registry ---- -Name: Operators -Dir: operators -Distros: openshift-rosa-hcp -Topics: -- Name: Operators overview - File: index -- Name: Understanding Operators - Dir: understanding - Topics: - - Name: What are Operators? - File: olm-what-operators-are - - Name: Packaging format - File: olm-packaging-format - - Name: Common terms - File: olm-common-terms - - Name: Operator Lifecycle Manager (OLM) - Dir: olm - Topics: - - Name: Concepts and resources - File: olm-understanding-olm - - Name: Architecture - File: olm-arch - - Name: Workflow - File: olm-workflow - - Name: Dependency resolution - File: olm-understanding-dependency-resolution - - Name: Operator groups - File: olm-understanding-operatorgroups - - Name: Multitenancy and Operator colocation - File: olm-colocation - - Name: Operator conditions - File: olm-operatorconditions - - Name: Metrics - File: olm-understanding-metrics - - Name: Webhooks - File: olm-webhooks - - Name: OperatorHub - File: olm-understanding-operatorhub - - Name: Red Hat-provided Operator catalogs - File: olm-rh-catalogs - - Name: Operators in multitenant clusters - File: olm-multitenancy - - Name: CRDs - Dir: crds - Topics: - - Name: Managing resources from CRDs - File: crd-managing-resources-from-crds -- Name: User tasks - Dir: user - Topics: - - Name: Creating applications from installed Operators - File: olm-creating-apps-from-installed-operators -- Name: Administrator tasks - Dir: admin - Topics: - - Name: Adding Operators to a cluster - File: olm-adding-operators-to-cluster - - Name: Updating installed Operators - File: olm-upgrading-operators - - Name: Deleting Operators from a cluster - File: olm-deleting-operators-from-cluster - - Name: Configuring proxy support - File: olm-configuring-proxy-support - - Name: Viewing Operator status - File: olm-status - - Name: Managing Operator conditions - File: olm-managing-operatorconditions - - Name: Managing custom catalogs - File: olm-managing-custom-catalogs - - Name: Catalog source pod scheduling - File: olm-cs-podsched - - Name: Troubleshooting Operator issues - File: olm-troubleshooting-operator-issues -- Name: Developing Operators - Dir: operator_sdk - Topics: - - Name: About the Operator SDK - File: osdk-about - - Name: Installing the Operator SDK CLI - File: osdk-installing-cli - - Name: Go-based Operators - Dir: golang - Topics: -# Quick start excluded, because it requires cluster-admin permissions. -# - Name: Getting started -# File: osdk-golang-quickstart - - Name: Tutorial - File: osdk-golang-tutorial - - Name: Project layout - File: osdk-golang-project-layout - - Name: Updating Go-based projects - File: osdk-golang-updating-projects - - Name: Ansible-based Operators - Dir: ansible - Topics: -# Quick start excluded, because it requires cluster-admin permissions. -# - Name: Getting started -# File: osdk-ansible-quickstart - - Name: Tutorial - File: osdk-ansible-tutorial - - Name: Project layout - File: osdk-ansible-project-layout - - Name: Updating Ansible-based projects - File: osdk-ansible-updating-projects - - Name: Ansible support - File: osdk-ansible-support - - Name: Kubernetes Collection for Ansible - File: osdk-ansible-k8s-collection - - Name: Using Ansible inside an Operator - File: osdk-ansible-inside-operator - - Name: Custom resource status management - File: osdk-ansible-cr-status - - Name: Helm-based Operators - Dir: helm - Topics: -# Quick start excluded, because it requires cluster-admin permissions. -# - Name: Getting started -# File: osdk-helm-quickstart - - Name: Tutorial - File: osdk-helm-tutorial - - Name: Project layout - File: osdk-helm-project-layout - - Name: Updating Helm-based projects - File: osdk-helm-updating-projects - - Name: Helm support - File: osdk-helm-support -# - Name: Hybrid Helm Operator <= Tech Preview -# File: osdk-hybrid-helm -# - Name: Updating Hybrid Helm-based projects (Technology Preview) -# File: osdk-hybrid-helm-updating-projects -# - Name: Java-based Operators <= Tech Preview -# Dir: java -# Topics: -# - Name: Getting started -# File: osdk-java-quickstart -# - Name: Tutorial -# File: osdk-java-tutorial -# - Name: Project layout -# File: osdk-java-project-layout -# - Name: Updating Java-based projects -# File: osdk-java-updating-projects - - Name: Defining cluster service versions (CSVs) - File: osdk-generating-csvs - - Name: Working with bundle images - File: osdk-working-bundle-images - - Name: Complying with pod security admission - File: osdk-complying-with-psa - - Name: Validating Operators using the scorecard - File: osdk-scorecard - - Name: Validating Operator bundles - File: osdk-bundle-validate - - Name: High-availability or single-node cluster detection and support - File: osdk-ha-sno - - Name: Configuring built-in monitoring with Prometheus - File: osdk-monitoring-prometheus - - Name: Configuring leader election - File: osdk-leader-election - - Name: Object pruning utility - File: osdk-pruning-utility - - Name: Migrating package manifest projects to bundle format - File: osdk-pkgman-to-bundle - - Name: Operator SDK CLI reference - File: osdk-cli-ref - - Name: Migrating to Operator SDK v0.1.0 - File: osdk-migrating-to-v0-1-0 -# ROSA customers can't configure/edit the cluster Operators -# - Name: Cluster Operators reference -# File: operator-reference ---- -Name: Building applications -Dir: applications -Distros: openshift-rosa-hcp -Topics: -- Name: Building applications overview - File: index -- Name: Projects - Dir: projects - Topics: - - Name: Working with projects - File: working-with-projects -# cannot impersonate resource "users" in API group -# - Name: Creating a project as another user -# File: creating-project-other-user - - Name: Configuring project creation - File: configuring-project-creation -- Name: Creating applications - Dir: creating_applications - Topics: - - Name: Creating applications using the Developer perspective - File: odc-creating-applications-using-developer-perspective - - Name: Creating applications from installed Operators - File: creating-apps-from-installed-operators - - Name: Creating applications using the CLI - File: creating-applications-using-cli -- Name: Viewing application composition using the Topology view - File: odc-viewing-application-composition-using-topology-view -# cannot create required namespace -# - Name: Exporting applications -# File: odc-exporting-applications -- Name: Working with Helm charts - Dir: working_with_helm_charts - Topics: - - Name: Understanding Helm - File: understanding-helm - - Name: Installing Helm - File: installing-helm - - Name: Configuring custom Helm chart repositories - File: configuring-custom-helm-chart-repositories - - Name: Working with Helm releases - File: odc-working-with-helm-releases -- Name: Deployments - Dir: deployments - Topics: - - Name: Custom domains for applications - File: rosa-config-custom-domains-applications - - Name: Understanding Deployments and DeploymentConfigs - File: what-deployments-are - - Name: Managing deployment processes - File: managing-deployment-processes - - Name: Using deployment strategies - File: deployment-strategies - - Name: Using route-based deployment strategies - File: route-based-deployment-strategies -- Name: Quotas - Dir: quotas - Topics: - - Name: Resource quotas per project - File: quotas-setting-per-project - - Name: Resource quotas across multiple projects - File: quotas-setting-across-multiple-projects -- Name: Using config maps with applications - File: config-maps -- Name: Monitoring project and application metrics using the Developer perspective - File: odc-monitoring-project-and-application-metrics-using-developer-perspective -- Name: Monitoring application health - File: application-health -- Name: Editing applications - File: odc-editing-applications -- Name: Working with quotas - File: working-with-quotas -- Name: Pruning objects to reclaim resources - File: pruning-objects -- Name: Idling applications - File: idling-applications -- Name: Deleting applications - File: odc-deleting-applications -- Name: Using the Red Hat Marketplace - File: red-hat-marketplace ---- -Name: Backup and restore -Dir: backup_and_restore -Distros: openshift-rosa-hcp -Topics: -- Name: OADP Application backup and restore - Dir: application_backup_and_restore - Topics: - - Name: Introduction to OpenShift API for Data Protection - File: oadp-intro - - Name: OADP release notes - Dir: release-notes - Topics: - - Name: OADP 1.4 release notes - File: oadp-1-4-release-notes - - Name: OADP performance - Dir: oadp-performance - Topics: - - Name: OADP recommended network settings - File: oadp-recommended-network-settings - - Name: OADP features and plugins - File: oadp-features-plugins - - Name: OADP use cases - Dir: oadp-use-cases - Topics: - - Name: Backing up an application using OADP with ROSA STS - File: oadp-rosa-backup-restore - - Name: Backing up an application using OADP and ODF - File: oadp-usecase-backup-using-odf - - Name: Restoring a backup to a different namespace - File: oadp-usecase-restore-different-namespace - - Name: Including a self-signed CA certificate during backup - File: oadp-usecase-enable-ca-cert - - Name: Installing and configuring OADP - Dir: oadp-rosa - Topics: - - Name: Installing OADP - File: oadp-rosa-backing-up-applications -# TODO: Include this when the Operators book is added to ROSA HCP -# - Name: Uninstalling OADP -# Dir: installing -# Topics: -# - Name: Uninstalling OADP -# File: uninstalling-oadp - - Name: OADP backing up - Dir: backing_up_and_restoring - Topics: - - Name: Backing up applications - File: backing-up-applications - - Name: Creating a Backup CR - File: oadp-creating-backup-cr -# ROSA docs do not include CSI snapshots -# - Name: Backing up persistent volumes with CSI snapshots -# File: oadp-backing-up-pvs-csi-doc -# - Name: Backing up applications with File System Backup -# File: oadp-backing-up-applications-restic-doc - - Name: Creating backup hooks - File: oadp-creating-backup-hooks-doc - - Name: Scheduling backups using Schedule CR - File: oadp-scheduling-backups-doc - - Name: Deleting backups - File: oadp-deleting-backups -# - Name: About Kopia -# File: oadp-about-kopia - - Name: OADP restoring - Dir: backing_up_and_restoring - Topics: - - Name: Restoring applications - File: restoring-applications -# - Name: OADP and ROSA -# Dir: oadp-rosa -# Topics: -# - Name: Backing up applications on ROSA STS using OADP -# File: oadp-rosa-backing-up-applications -# - Name: OADP and AWS STS -# Dir: aws-sts -# Topics: -# - Name: Backing up applications on AWS STS using OADP -# File: oadp-aws-sts -# - Name: OADP Data Mover -# Dir: installing -# Topics: -# - Name: About the OADP Data Mover -# File: about-oadp-data-mover -# - Name: Backing up and restoring volumes by using CSI snapshots data movement -# File: oadp-backup-restore-csi-snapshots -# - Name: Overriding Kopia algorithms -# File: overriding-kopia-algorithms -# - Name: Troubleshooting -# File: troubleshooting -# - Name: OADP API -# File: oadp-api -# - Name: Advanced OADP features and functionalities -# File: oadp-advanced-topics ---- -Name: Nodes -Dir: nodes -Distros: openshift-rosa-hcp -Topics: -- Name: Overview of nodes - File: index -- Name: Working with pods - Dir: pods - Topics: - - Name: About pods - File: nodes-pods-using - - Name: Viewing pods - File: nodes-pods-viewing - - Name: Configuring a cluster for pods - File: nodes-pods-configuring -# Cannot create namespace to install VPA; revisit after Operator book converted -# - Name: Automatically adjust pod resource levels with the vertical pod autoscaler -# File: nodes-pods-vertical-autoscaler - - Name: Providing sensitive data to pods - File: nodes-pods-secrets - - Name: Creating and using config maps - File: nodes-pods-configmaps -# Cannot create required kubeletconfigs -# - Name: Using Device Manager to make devices available to nodes -# File: nodes-pods-plugins - - Name: Including pod priority in pod scheduling decisions - File: nodes-pods-priority - - Name: Placing pods on specific nodes using node selectors - File: nodes-pods-node-selectors -# Cannot create namespace to install Run Once; revisit after Operator book converted -# - Name: Run Once Duration Override Operator -# Dir: run_once_duration_override -# Topics: -# - Name: Run Once Duration Override Operator overview -# File: index -# - Name: Run Once Duration Override Operator release notes -# File: run-once-duration-override-release-notes -# - Name: Overriding the active deadline for run-once pods -# File: run-once-duration-override-install -# - Name: Uninstalling the Run Once Duration Override Operator -# File: run-once-duration-override-uninstall -- Name: Automatically scaling pods with the Custom Metrics Autoscaler Operator - Dir: cma - Topics: - - Name: Release notes - Dir: nodes-cma-rn - Topics: - - Name: Custom Metrics Autoscaler Operator release notes - File: nodes-cma-autoscaling-custom-rn - - Name: Past releases - File: nodes-cma-autoscaling-custom-rn-past - - Name: Custom Metrics Autoscaler Operator overview - File: nodes-cma-autoscaling-custom - - Name: Installing the custom metrics autoscaler - File: nodes-cma-autoscaling-custom-install - - Name: Understanding the custom metrics autoscaler triggers - File: nodes-cma-autoscaling-custom-trigger - - Name: Understanding the custom metrics autoscaler trigger authentications - File: nodes-cma-autoscaling-custom-trigger-auth - - Name: Pausing the custom metrics autoscaler - File: nodes-cma-autoscaling-custom-pausing - - Name: Gathering audit logs - File: nodes-cma-autoscaling-custom-audit-log - - Name: Gathering debugging data - File: nodes-cma-autoscaling-custom-debugging - - Name: Viewing Operator metrics - File: nodes-cma-autoscaling-custom-metrics - - Name: Understanding how to add custom metrics autoscalers - File: nodes-cma-autoscaling-custom-adding - - Name: Removing the Custom Metrics Autoscaler Operator - File: nodes-cma-autoscaling-custom-removing -- Name: Controlling pod placement onto nodes (scheduling) - Dir: scheduling - Topics: - - Name: About pod placement using the scheduler - File: nodes-scheduler-about - - Name: Placing pods relative to other pods using pod affinity and anti-affinity rules - File: nodes-scheduler-pod-affinity - - Name: Controlling pod placement on nodes using node affinity rules - File: nodes-scheduler-node-affinity - - Name: Placing pods onto overcommited nodes - File: nodes-scheduler-overcommit -# Per OSDOCS-9791, ROSA customers cannot add taints to individual nodes. -# - Name: Controlling pod placement using node taints -# File: nodes-scheduler-taints-tolerations - - Name: Placing pods on specific nodes using node selectors - File: nodes-scheduler-node-selectors - - Name: Controlling pod placement using pod topology spread constraints - File: nodes-scheduler-pod-topology-spread-constraints -# - Name: Placing a pod on a specific node by name -# File: nodes-scheduler-node-names -# - Name: Placing a pod in a specific project -# File: nodes-scheduler-node-projects -# - Name: Keeping your cluster balanced using the descheduler -# File: nodes-scheduler-descheduler -# Cannot create namespace to install Desceduler Operator; revisit after Operator book converted -# - Name: Evicting pods using the descheduler -# File: nodes-descheduler -# Cannot create namespace to install Secondary Scheduler Operator; revisit after Operator book converted -# - Name: Secondary scheduler -# Dir: secondary_scheduler -# Distros: openshift-enterprise -# Topics: -# - Name: Secondary scheduler overview -# File: index -# - Name: Secondary Scheduler Operator release notes -# File: nodes-secondary-scheduler-release-notes -# - Name: Scheduling pods using a secondary scheduler -# File: nodes-secondary-scheduler-configuring -# - Name: Uninstalling the Secondary Scheduler Operator -# File: nodes-secondary-scheduler-uninstalling -- Name: Using jobs and daemon sets - Dir: jobs - Topics: - - Name: Running background tasks on nodes automatically with daemon sets - File: nodes-pods-daemonsets - - Name: Running tasks in pods using jobs - File: nodes-nodes-jobs -- Name: Working with nodes - Dir: nodes - Topics: - - Name: Viewing and listing the nodes in your cluster - File: nodes-nodes-viewing - - Name: Working with nodes - File: nodes-nodes-working -# cannot use oc adm cordon; cannot patch resource "machinesets"; cannot patch resource "nodes" -# - Name: Working with nodes -# File: nodes-nodes-working -# cannot create resource "kubeletconfigs", "schedulers", "machineconfigs", "kubeletconfigs" -# - Name: Managing nodes -# File: nodes-nodes-managing -# cannot create resource "kubeletconfigs" -# - Name: Managing graceful node shutdown -# File: nodes-nodes-graceful-shutdown -# cannot create resource "kubeletconfigs" -# - Name: Managing the maximum number of pods per node -# File: nodes-nodes-managing-max-pods - - Name: Using the Node Tuning Operator - File: nodes-node-tuning-operator -# - Name: Remediating, fencing, and maintaining nodes -# File: nodes-remediating-fencing-maintaining-rhwa -# Cannot create namespace needed to oc debug and reboot; revisit after Operator book converted -# - Name: Understanding node rebooting -# File: nodes-nodes-rebooting -# cannot create resource "kubeletconfigs" -# - Name: Freeing node resources using garbage collection -# File: nodes-nodes-garbage-collection -# cannot create resource "kubeletconfigs" -# - Name: Allocating resources for nodes -# File: nodes-nodes-resources-configuring -# cannot create resource "kubeletconfigs" -# - Name: Allocating specific CPUs for nodes in a cluster -# File: nodes-nodes-resources-cpus -# cannot create resource "kubeletconfigs" -# - Name: Configuring the TLS security profile for the kubelet -# File: nodes-nodes-tls -# Distros: openshift-rosa -# - Name: Monitoring for problems in your nodes -# File: nodes-nodes-problem-detector -# cannot patch resource "nodes" -# - Name: Creating infrastructure nodes -# File: nodes-nodes-creating-infrastructure-nodes -- Name: Working with containers - Dir: containers - Topics: - - Name: Understanding containers - File: nodes-containers-using - - Name: Using Init Containers to perform tasks before a pod is deployed - File: nodes-containers-init - - Name: Using volumes to persist container data - File: nodes-containers-volumes - - Name: Mapping volumes using projected volumes - File: nodes-containers-projected-volumes - - Name: Allowing containers to consume API objects - File: nodes-containers-downward-api - - Name: Copying files to or from a container - File: nodes-containers-copying-files - - Name: Executing remote commands in a container - File: nodes-containers-remote-commands - - Name: Using port forwarding to access applications in a container - File: nodes-containers-port-forwarding -# cannot patch resource "configmaps" -# - Name: Using sysctls in containers -# File: nodes-containers-sysctls -- Name: Working with clusters - Dir: clusters - Topics: - - Name: Viewing system event information in a cluster - File: nodes-containers-events - - Name: Analyzing cluster resource levels - File: nodes-cluster-resource-levels - - Name: Setting limit ranges - File: nodes-cluster-limit-ranges - - Name: Configuring cluster memory to meet container memory and risk requirements - File: nodes-cluster-resource-configure - - Name: Configuring your cluster to place pods on overcommited nodes - File: nodes-cluster-overcommit -# - Name: Configuring the Linux cgroup version on your nodes -# File: nodes-cluster-cgroups-2 -# - Name: Configuring the Linux cgroup version on your nodes -# File: nodes-cluster-cgroups-okd -# The TechPreviewNoUpgrade Feature Gate is not allowed -# - Name: Enabling features using FeatureGates -# File: nodes-cluster-enabling-features -# Distros: openshift-rosa -# Error: nodes.config.openshift.io "cluster" could not be patched -# - Name: Improving cluster stability in high latency environments using worker latency profiles -# File: nodes-cluster-worker-latency-profiles -# Not supported per Michael McNeill -#- Name: Remote worker nodes on the network edge -# Dir: edge -# Topics: -# - Name: Using remote worker node at the network edge -# File: nodes-edge-remote-workers -# Not supported per Michael McNeill -#- Name: Worker nodes for single-node OpenShift clusters -# Dir: nodes -# Distros: openshift-rosa -# Topics: -# - Name: Adding worker nodes to single-node OpenShift clusters -# File: nodes-sno-worker-nodes ---- -Name: Observability -Dir: observability -Distros: openshift-rosa-hcp -Topics: -- Name: Observability overview - Dir: overview - Topics: - - Name: About Observability - File: index -- Name: Monitoring - Dir: monitoring - Distros: openshift-rosa-hcp - Topics: - - Name: Monitoring overview - File: monitoring-overview - - Name: Accessing monitoring for user-defined projects - File: sd-accessing-monitoring-for-user-defined-projects - - Name: Configuring the monitoring stack - File: configuring-the-monitoring-stack - - Name: Disabling monitoring for user-defined projects - File: sd-disabling-monitoring-for-user-defined-projects - - Name: Enabling alert routing for user-defined projects - File: enabling-alert-routing-for-user-defined-projects - - Name: Managing metrics - File: managing-metrics - - Name: Managing alerts - File: managing-alerts - - Name: Reviewing monitoring dashboards - File: reviewing-monitoring-dashboards - - Name: Accessing third-party monitoring APIs - File: accessing-third-party-monitoring-apis - - Name: Troubleshooting monitoring issues - File: troubleshooting-monitoring-issues - - Name: Config map reference for the Cluster Monitoring Operator - File: config-map-reference-for-the-cluster-monitoring-operator -- Name: Logging - Dir: logging - Distros: openshift-rosa-hcp - Topics: - - Name: Release notes - Dir: logging_release_notes - Topics: - - Name: Logging 5.9 - File: logging-5-9-release-notes - - Name: Logging 5.8 - File: logging-5-8-release-notes - - Name: Logging 5.7 - File: logging-5-7-release-notes - - Name: Support - File: cluster-logging-support - - Name: Troubleshooting logging - Dir: troubleshooting - Topics: - - Name: Viewing Logging status - File: cluster-logging-cluster-status - - Name: Troubleshooting log forwarding - File: log-forwarding-troubleshooting - - Name: Troubleshooting logging alerts - File: troubleshooting-logging-alerts - - Name: Viewing the status of the Elasticsearch log store - File: cluster-logging-log-store-status - - Name: About Logging - File: cluster-logging - - Name: Installing Logging - File: cluster-logging-deploying - - Name: Updating Logging - File: cluster-logging-upgrading - - Name: Visualizing logs - Dir: log_visualization - Topics: - - Name: About log visualization - File: log-visualization - - Name: Log visualization with the web console - File: log-visualization-ocp-console - - Name: Viewing cluster dashboards - File: cluster-logging-dashboards - - Name: Log visualization with Kibana - File: logging-kibana - - Name: Configuring your Logging deployment - Dir: config - Topics: - - Name: Configuring CPU and memory limits for Logging components - File: cluster-logging-memory - #- Name: Configuring systemd-journald and Fluentd - # File: cluster-logging-systemd - - Name: Log collection and forwarding - Dir: log_collection_forwarding - Topics: - - Name: About log collection and forwarding - File: log-forwarding - - Name: Log output types - File: logging-output-types - - Name: Enabling JSON log forwarding - File: cluster-logging-enabling-json-logging - - Name: Configuring log forwarding - File: configuring-log-forwarding - - Name: Configuring the logging collector - File: cluster-logging-collector - - Name: Collecting and storing Kubernetes events - File: cluster-logging-eventrouter - - Name: Log storage - Dir: log_storage - Topics: - - Name: About log storage - File: about-log-storage - - Name: Installing log storage - File: installing-log-storage - - Name: Configuring the LokiStack log store - File: cluster-logging-loki - - Name: Configuring the Elasticsearch log store - File: logging-config-es-store - - Name: Logging alerts - Dir: logging_alerts - Topics: - - Name: Default logging alerts - File: default-logging-alerts - - Name: Custom logging alerts - File: custom-logging-alerts - - Name: Performance and reliability tuning - Dir: performance_reliability - Topics: - - Name: Flow control mechanisms - File: logging-flow-control-mechanisms - - Name: Filtering logs by content - File: logging-content-filtering - - Name: Filtering logs by metadata - File: logging-input-spec-filtering - - Name: Scheduling resources - Dir: scheduling_resources - Topics: - - Name: Using node selectors to move logging resources - File: logging-node-selectors - - Name: Using tolerations to control logging pod placement - File: logging-taints-tolerations - - Name: Uninstalling Logging - File: cluster-logging-uninstall - - Name: Exported fields - File: cluster-logging-exported-fields - - Name: API reference - Dir: api_reference - Topics: - # - Name: 5.8 Logging API reference - # File: logging-5-8-reference - # - Name: 5.7 Logging API reference - # File: logging-5-7-reference - - Name: 5.6 Logging API reference - File: logging-5-6-reference - - Name: Glossary - File: logging-common-terms ---- -Name: Virtualization -Dir: virt -Distros: openshift-rosa-hcp -Topics: -- Name: About - Dir: about_virt - Topics: - - Name: About OpenShift Virtualization - File: about-virt - Distros: openshift-rosa-hcp - - Name: About OKD Virtualization - File: about-virt - Distros: openshift-origin - - Name: Security policies - File: virt-security-policies - - Name: Architecture - File: virt-architecture - Distros: openshift-rosa-hcp -#- Name: Release notes -# Dir: release_notes -# Topics: -# - Name: OpenShift Virtualization release notes -# File: virt-release-notes-placeholder -# Distros: openshift-rosa -- Name: Getting started - Dir: getting_started - Topics: - - Name: Getting started with OpenShift Virtualization - File: virt-getting-started - Distros: openshift-rosa-hcp - - Name: Getting started with OKD Virtualization - File: virt-getting-started - Distros: openshift-origin - - Name: virtctl and libguestfs - File: virt-using-the-cli-tools - Distros: openshift-rosa-hcp -- Name: Installing - Dir: install - Topics: - - Name: Preparing your cluster - File: preparing-cluster-for-virt - - Name: Installing OpenShift Virtualization - File: installing-virt - - Name: Uninstalling OpenShift Virtualization - File: uninstalling-virt -- Name: Post-installation configuration - Dir: post_installation_configuration - Topics: - - Name: Post-installation configuration - File: virt-post-install-config - - Name: Node placement rules - File: virt-node-placement-virt-components - - Name: Network configuration - File: virt-post-install-network-config - - Name: Storage configuration - File: virt-post-install-storage-config - - Name: Configuring certificate rotation - File: virt-configuring-certificate-rotation -- Name: Updating - Dir: updating - Topics: - - Name: Updating OpenShift Virtualization - File: upgrading-virt - Distros: openshift-rosa-hcp -- Name: Creating a virtual machine - Dir: creating_vm - Topics: -# - Name: Overview -# File: virt-basic-vm-overview -# - Name: Setting up your environment -# File: virt-setting-up-environment - - Name: Creating VMs from instance types - File: virt-creating-vms-from-instance-types - - Name: Creating VMs from templates - File: virt-creating-vms-from-templates -- Name: Advanced VM creation - Dir: creating_vms_advanced - Topics: -# - Name: Overview -# File: virt-advanced-vm-overview - - Name: Creating VMs in the web console - Dir: creating_vms_advanced_web - Topics: - - Name: Creating VMs from Red Hat images - File: virt-creating-vms-from-rh-images-overview - - Name: Creating VMs by importing images from web pages - File: virt-creating-vms-from-web-images - - Name: Creating VMs by uploading images - File: virt-creating-vms-uploading-images - - Name: Cloning VMs - File: virt-cloning-vms - - Name: Creating VMs using the CLI - Dir: creating_vms_cli - Topics: - - Name: Creating virtual machines from the command line - File: virt-creating-vms-from-cli - - Name: Creating VMs by using container disks - File: virt-creating-vms-from-container-disks - - Name: Creating VMs by cloning PVCs - File: virt-creating-vms-by-cloning-pvcs -- Name: Managing VMs - Dir: managing_vms - Topics: - - Name: Installing the QEMU guest agent and VirtIO drivers - File: virt-installing-qemu-guest-agent - - Name: Connecting to VM consoles - File: virt-accessing-vm-consoles - - Name: Configuring SSH access to VMs - File: virt-accessing-vm-ssh - - Name: Editing virtual machines - File: virt-edit-vms - - Name: Editing boot order - File: virt-edit-boot-order - - Name: Deleting virtual machines - File: virt-delete-vms - - Name: Exporting virtual machines - File: virt-exporting-vms - - Name: Managing virtual machine instances - File: virt-manage-vmis - - Name: Controlling virtual machine states - File: virt-controlling-vm-states - - Name: Using virtual Trusted Platform Module devices - File: virt-using-vtpm-devices - - Name: Managing virtual machines with OpenShift Pipelines - File: virt-managing-vms-openshift-pipelines - - Name: Advanced virtual machine management - Dir: advanced_vm_management - Topics: - - Name: Working with resource quotas for virtual machines - File: virt-working-with-resource-quotas-for-vms - - Name: Specifying nodes for virtual machines - File: virt-specifying-nodes-for-vms - - Name: Configuring the default CPU model - File: virt-configuring-default-cpu-model - - Name: UEFI mode for virtual machines - File: virt-uefi-mode-for-vms - - Name: Configuring PXE booting for virtual machines - File: virt-configuring-pxe-booting -# Huge pages not supported in ROSA -# - Name: Using huge pages with virtual machines -# File: virt-using-huge-pages-with-vms -# CPU Manager not supported in ROSA -# - Name: Enabling dedicated resources for a virtual machine -# File: virt-dedicated-resources-vm - - Name: Scheduling virtual machines - File: virt-schedule-vms -# Cannot create required machine config in ROSA as required -# - Name: Configuring PCI passthrough -# File: virt-configuring-pci-passthrough -# Cannot create required machine config in ROSA as required -# - Name: Configuring virtual GPUs -# File: virt-configuring-virtual-gpus -# Feature is TP, thus not supported in ROSA -# - Name: Enabling descheduler evictions on virtual machines -# File: virt-enabling-descheduler-evictions - - Name: About high availability for virtual machines - File: virt-high-availability-for-vms - - Name: Control plane tuning - File: virt-vm-control-plane-tuning -# Need to review following are supported: -# - Name: Assigning compute resources -# File: virt-assigning-compute-resources -# - Name: About multi-queue functionality -# File: virt-about-multi-queue - - Name: VM disks - Dir: virtual_disks - Topics: - - Name: Hot-plugging VM disks - File: virt-hot-plugging-virtual-disks - - Name: Expanding VM disks - File: virt-expanding-vm-disks -# Need to check if supported: -# - Name: Configuring shared volumes -# File: virt-configuring-shared-volumes-for-vms - - Name: Migrating VM disks to a different storage class - File: virt-migrating-storage-class -- Name: Networking - Dir: vm_networking - Topics: - - Name: Networking configuration overview - File: virt-networking-overview - - Name: Connecting a VM to the default pod network - File: virt-connecting-vm-to-default-pod-network - - Name: Connecting a VM to a primary user-defined network - File: virt-connecting-vm-to-primary-udn - - Name: Exposing a VM by using a service - File: virt-exposing-vm-with-service -# Not supported in ROSA/OSD -# - Name: Connecting a VM to a Linux bridge network -# File: virt-connecting-vm-to-linux-bridge -# - Name: Connecting a VM to an SR-IOV network -# File: virt-connecting-vm-to-sriov -# - Name: Using DPDK with SR-IOV -# File: virt-using-dpdk-with-sriov - - Name: Connecting a VM to an OVN-Kubernetes secondary network - File: virt-connecting-vm-to-ovn-secondary-network - - Name: Hot plugging secondary network interfaces - File: virt-hot-plugging-network-interfaces - - Name: Connecting a VM to a service mesh - File: virt-connecting-vm-to-service-mesh - - Name: Configuring a dedicated network for live migration - File: virt-dedicated-network-live-migration - - Name: Configuring and viewing IP addresses - File: virt-configuring-viewing-ips-for-vms -# Tech Preview features not supported in ROSA/OSD -# - Name: Accessing a VM by using the cluster FQDN -# File: virt-accessing-vm-secondary-network-fqdn - - Name: Managing MAC address pools for network interfaces - File: virt-using-mac-address-pool-for-vms -- Name: Storage - Dir: storage - Topics: - - Name: Storage configuration overview - File: virt-storage-config-overview - - Name: Configuring storage profiles - File: virt-configuring-storage-profile - - Name: Managing automatic boot source updates - File: virt-automatic-bootsource-updates - - Name: Reserving PVC space for file system overhead - File: virt-reserving-pvc-space-fs-overhead - - Name: Configuring local storage by using HPP - File: virt-configuring-local-storage-with-hpp - - Name: Enabling user permissions to clone data volumes across namespaces - File: virt-enabling-user-permissions-to-clone-datavolumes - - Name: Configuring CDI to override CPU and memory quotas - File: virt-configuring-cdi-for-namespace-resourcequota - - Name: Preparing CDI scratch space - File: virt-preparing-cdi-scratch-space - - Name: Using preallocation for data volumes - File: virt-using-preallocation-for-datavolumes - - Name: Managing data volume annotations - File: virt-managing-data-volume-annotations -# Virtual machine live migration -- Name: Live migration - Dir: live_migration - Topics: - - Name: About live migration - File: virt-about-live-migration - - Name: Configuring live migration - File: virt-configuring-live-migration - - Name: Initiating and canceling live migration - File: virt-initiating-live-migration -# Node maintenance mode -- Name: Nodes - Dir: nodes - Topics: - - Name: Node maintenance - File: virt-node-maintenance - - Name: Managing node labeling for obsolete CPU models - File: virt-managing-node-labeling-obsolete-cpu-models - - Name: Preventing node reconciliation - File: virt-preventing-node-reconciliation -# Hiding in ROSA as user cannot cordon and drain nodes -# - Name: Deleting a failed node to trigger VM failover -# File: virt-triggering-vm-failover-resolving-failed-node -- Name: Monitoring - Dir: monitoring - Topics: - - Name: Monitoring overview - File: virt-monitoring-overview -# Hiding in ROSA/OSD as TP not supported -# - Name: Cluster checkup framework -# File: virt-running-cluster-checkups - - Name: Prometheus queries for virtual resources - File: virt-prometheus-queries - - Name: Virtual machine custom metrics - File: virt-exposing-custom-metrics-for-vms - - Name: Virtual machine health checks - File: virt-monitoring-vm-health - - Name: Runbooks - File: virt-runbooks -- Name: Support - Dir: support - Topics: - - Name: Support overview - File: virt-support-overview - - Name: Collecting data for Red Hat Support - File: virt-collecting-virt-data - Distros: openshift-rosa-hcp - - Name: Troubleshooting - File: virt-troubleshooting -- Name: Backup and restore - Dir: backup_restore - Topics: - - Name: Backup and restore by using VM snapshots - File: virt-backup-restore-snapshots - - Name: Backing up and restoring virtual machines - File: virt-backup-restore-overview -# - Name: Removed topics (Placeholder for topics removed from topic map) -# Dir: Removed_topics -# Topics: -# - Name: Collecting OKD Virtualization data for community report -# File: virt-collecting-virt-data -# - Name: Preparing to upgrade ROSA to 4.9 -# File: rosa-upgrading-cluster-prepare -# - Name: Upgrading ROSA Classic clusters -# File: rosa-upgrading diff --git a/_unused_topics/README b/_unused_topics/README deleted file mode 100644 index 5636d8245a15..000000000000 --- a/_unused_topics/README +++ /dev/null @@ -1,2 +0,0 @@ -Placeholder file. Any modules that are not included will be placed here -by the `scripts/find_unused.py` script. diff --git a/_unused_topics/Storage_Shared_Resource_CSI_Driver/Modules/ephemeral-storage-additional-details-about-volumeattributes-on-shared-resource-pod-volumes.adoc b/_unused_topics/Storage_Shared_Resource_CSI_Driver/Modules/ephemeral-storage-additional-details-about-volumeattributes-on-shared-resource-pod-volumes.adoc deleted file mode 100644 index 52f2151b40b6..000000000000 --- a/_unused_topics/Storage_Shared_Resource_CSI_Driver/Modules/ephemeral-storage-additional-details-about-volumeattributes-on-shared-resource-pod-volumes.adoc +++ /dev/null @@ -1,42 +0,0 @@ -:_mod-docs-content-type: REFERENCE - -[id="ephemeral-storage-additional-details-about-volumeattributes-on-shared-resource-pod-volumes_{context}"] -= Additional details about VolumeAttributes on shared resource pod volumes - -[IMPORTANT] -==== -The Shared Resource CSI Driver feature is now generally available in link:https://docs.redhat.com/en/documentation/builds_for_red_hat_openshift/1.1[{builds-v2title} 1.1]. This feature is now deprecated in {product-title}. To use this feature, ensure you are using {builds-v2title} 1.1 or a more recent version. -==== - -[role="_abstract"] -The following attributes affect shared resource pod volumes in various ways: - -* The `refreshResource` attribute in the `volumeAttributes` properties. -* The `refreshResources` attribute in the Shared Resource CSI Driver configuration. -* The `sharedSecret` and `sharedConfigMap` attributes in the `volumeAttributes` properties. - -== The `refreshResource` attribute - -The Shared Resource CSI Driver honors the `refreshResource` attribute in `volumeAttributes` properties of the volume. This attribute controls whether updates to the contents of the underlying `Secret` or `ConfigMap` object are copied to the volume *after* the volume is initially provisioned as part of pod startup. The default value of `refreshResource` is `true`, which means that the contents are updated. - -[IMPORTANT] -==== -If the Shared Resource CSI Driver configuration has disabled the refreshing of both the shared `SharedSecret` and `SharedConfigMap` custom resource (CR) instances, then the `refreshResource` attribute in the `volumeAttribute` properties has no effect. The intent of this attribute is to disable refresh for specific volume mounts when refresh is generally allowed. -==== - -== The `refreshResources` attribute - -You can use a global switch to enable or disable refreshing of shared resources. This switch is the `refreshResources` attribute in the `csi-driver-shared-resource-config` config map for the Shared Resource CSI Driver, which you can find in the `openshift-cluster-csi-drivers` namespace. If you set this `refreshResources` attribute to `false`, none of the `Secret` or `ConfigMap` object-related content stored in the volume is updated after the initial provisioning of the volume. - -[IMPORTANT] -==== -Using this Shared Resource CSI Driver configuration to disable refreshing affects all the cluster's volume mounts that use the Shared Resource CSI Driver, regardless of the `refreshResource` attribute in the `volumeAttributes` properties of any of those volumes. -==== - -== Validation of volumeAttributes before provisioning a shared resource volume for a pod - -In the `volumeAttributes` of a single volume, you must set either a `sharedSecret` or a `sharedConfigMap` attribute to the value of a `SharedSecret` or a `SharedConfigMap` CS instance. Otherwise, when the volume is provisioned during pod startup, a validation checks the `volumeAttributes` of that volume and returns an error to the kubelet under the following conditions: - -* Both `sharedSecret` and `sharedConfigMap` attributes have specified values. -* Neither `sharedSecret` nor `sharedConfigMap` attributes have specified values. -* The value of the `sharedSecret` or `sharedConfigMap` attribute does not correspond to the name of a `SharedSecret` or `SharedConfigMap` CR instance on the cluster. diff --git a/_unused_topics/Storage_Shared_Resource_CSI_Driver/Modules/ephemeral-storage-additional-support-limitations-for-shared-resource-csi-driver.adoc b/_unused_topics/Storage_Shared_Resource_CSI_Driver/Modules/ephemeral-storage-additional-support-limitations-for-shared-resource-csi-driver.adoc deleted file mode 100644 index 36dfe6b8537c..000000000000 --- a/_unused_topics/Storage_Shared_Resource_CSI_Driver/Modules/ephemeral-storage-additional-support-limitations-for-shared-resource-csi-driver.adoc +++ /dev/null @@ -1,18 +0,0 @@ -:_mod-docs-content-type: REFERENCE - -[id="ephemeral-storage-additional-support-limitations-for-shared-resource-csi-driver_{context}"] -= Additional support limitations for the Shared Resource CSI Driver - -[IMPORTANT] -==== -The Shared Resource CSI Driver feature is now generally available in link:https://docs.redhat.com/en/documentation/builds_for_red_hat_openshift/1.1[{builds-v2title} 1.1]. This feature is now deprecated in {product-title}. To use this feature, ensure you are using {builds-v2title} 1.1 or a more recent version. -==== - -[role="_abstract"] -The Shared Resource CSI Driver has the following noteworthy limitations: - -* The driver is subject to the limitations of Container Storage Interface (CSI) inline ephemeral volumes. -* The value of the `readOnly` field must be `true`. On `Pod` creation, a validating admission webhook rejects the pod creation if `readOnly` is `false`. If for some reason the validating admission webhook cannot be contacted, on volume provisioning during pod startup, the driver returns an error to the kubelet. Requiring `readOnly` is `true` is in keeping with proposed best practices for the upstream Kubernetes CSI Driver to apply SELinux labels to associated volumes. -* The driver ignores the `FSType` field because it only supports `tmpfs` volumes. -* The driver ignores the `NodePublishSecretRef` field. Instead, it uses `SubjectAccessReviews` with the `use` verb to evaluate whether a pod can obtain a volume that contains `SharedSecret` or `SharedConfigMap` custom resource (CR) instances. -* You cannot create `SharedSecret` or `SharedConfigMap` custom resource (CR) instances whose names start with `openshift`. diff --git a/_unused_topics/Storage_Shared_Resource_CSI_Driver/Modules/ephemeral-storage-integration-between-shared-resources-insights-operator-and-openshift-builds.adoc b/_unused_topics/Storage_Shared_Resource_CSI_Driver/Modules/ephemeral-storage-integration-between-shared-resources-insights-operator-and-openshift-builds.adoc deleted file mode 100644 index 10e34800636b..000000000000 --- a/_unused_topics/Storage_Shared_Resource_CSI_Driver/Modules/ephemeral-storage-integration-between-shared-resources-insights-operator-and-openshift-builds.adoc +++ /dev/null @@ -1,15 +0,0 @@ -:_mod-docs-content-type: REFERENCE - -[id="ephemeral-storage-integration-between-shared-resources-insights-operator-and-openshift-builds_{context}"] -= Integration between shared resources, Insights Operator, and {product-title} Builds - -[role="_abstract"] -Integration between shared resources, Insights Operator, and {product-title} Builds makes using Red Hat subscriptions (RHEL entitlements) easier in {product-title} Builds. - -Previously, in {product-title} 4.9.x and earlier, you manually imported your credentials and copied them to each project or namespace where you were running builds. - -Now, in {product-title} 4.10 and later, {product-title} Builds can use Red Hat subscriptions (RHEL entitlements) by referencing shared resources and the simple content access feature provided by Insights Operator: - -* The simple content access feature imports your subscription credentials to a well-known `Secret` object. See the links in the following "Additional resources" section. -* The cluster administrator creates a `SharedSecret` custom resource (CR) instance around that `Secret` object and grants permission to particular projects or namespaces. In particular, the cluster administrator gives the `builder` service account permission to use that `SharedSecret` CR instance. -* Builds that run within those projects or namespaces can mount a CSI Volume that references the `SharedSecret` CR instance and its entitled RHEL content. diff --git a/_unused_topics/Storage_Shared_Resource_CSI_Driver/Modules/ephemeral-storage-sharing-configmaps-across-namespaces.adoc b/_unused_topics/Storage_Shared_Resource_CSI_Driver/Modules/ephemeral-storage-sharing-configmaps-across-namespaces.adoc deleted file mode 100644 index 680e5070aa38..000000000000 --- a/_unused_topics/Storage_Shared_Resource_CSI_Driver/Modules/ephemeral-storage-sharing-configmaps-across-namespaces.adoc +++ /dev/null @@ -1,36 +0,0 @@ -:_mod-docs-content-type: PROCEDURE - -[id="ephemeral-storage-sharing-configmaps-across-namespaces_{context}"] -= Sharing a config map across namespaces - -[role="_abstract"] -To share a config map across namespaces in a cluster, you create a `SharedConfigMap` custom resource (CR) instance for that config map. - -.Prerequisites - -You must have permission to perform the following actions: - -* Create instances of the `sharedconfigmaps.sharedresource.openshift.io` custom resource definition (CRD) at a cluster-scoped level. -* Manage roles and role bindings across the namespaces in the cluster to control which users can get, list, and watch those instances. -* Manage roles and role bindings across the namespaces in the cluster to control which service accounts in pods that mount your Container Storage Interface (CSI) volume can use those instances. -* Access the namespaces that contain the Secrets you want to share. - -.Procedure - -. Create a `SharedConfigMap` CR instance for the config map that you want to share across namespaces in the cluster: -+ -[source,terminal] ----- -$ oc apply -f - < - namespace: -EOF ----- - -.Next steps diff --git a/_unused_topics/Storage_Shared_Resource_CSI_Driver/Modules/ephemeral-storage-sharing-secrets-across-namespaces.adoc b/_unused_topics/Storage_Shared_Resource_CSI_Driver/Modules/ephemeral-storage-sharing-secrets-across-namespaces.adoc deleted file mode 100644 index 2b6ab91f521b..000000000000 --- a/_unused_topics/Storage_Shared_Resource_CSI_Driver/Modules/ephemeral-storage-sharing-secrets-across-namespaces.adoc +++ /dev/null @@ -1,34 +0,0 @@ -:_mod-docs-content-type: PROCEDURE - -[id="ephemeral-storage-sharing-secrets-across-namespaces_{context}"] -= Sharing secrets across namespaces - -[role="_abstract"] -To share a secret across namespaces in a cluster, you create a `SharedSecret` custom resource (CR) instance for the `Secret` object that you want to share. - -.Prerequisites - -You must have permission to perform the following actions: - -* Create instances of the `sharedsecrets.sharedresource.openshift.io` custom resource definition (CRD) at a cluster-scoped level. -* Manage roles and role bindings across the namespaces in the cluster to control which users can get, list, and watch those instances. -* Manage roles and role bindings to control whether the service account specified by a pod can mount a Container Storage Interface (CSI) volume that references the `SharedSecret` CR instance you want to use. -* Access the namespaces that contain the Secrets you want to share. - -.Procedure - -* Create a `SharedSecret` CR instance for the `Secret` object you want to share across namespaces in the cluster: -+ -[source,terminal] ----- -$ oc apply -f - < - namespace: -EOF ----- diff --git a/_unused_topics/Storage_Shared_Resource_CSI_Driver/Modules/ephemeral-storage-using-a-sharedconfigmap-object-in-a-pod.adoc b/_unused_topics/Storage_Shared_Resource_CSI_Driver/Modules/ephemeral-storage-using-a-sharedconfigmap-object-in-a-pod.adoc deleted file mode 100644 index 7575e74290a0..000000000000 --- a/_unused_topics/Storage_Shared_Resource_CSI_Driver/Modules/ephemeral-storage-using-a-sharedconfigmap-object-in-a-pod.adoc +++ /dev/null @@ -1,82 +0,0 @@ -:_mod-docs-content-type: PROCEDURE - -[id="ephemeral-storage-using-a-sharedconfigmap-object-in-a-pod_{context}"] -= Using a SharedConfigMap instance in a pod - -[role="_abstract"] -To access a `SharedConfigMap` custom resource (CR) instance from a pod, you grant a given service account RBAC permissions to use that `SharedConfigMap` CR instance. - -.Prerequisites - -* You have created a `SharedConfigMap` CR instance for the config map that you want to share across namespaces in the cluster. -* You must have permission to perform the following actions: -** Discover which `SharedConfigMap` CR instances are available by entering the `oc get sharedconfigmaps` command and getting a non-empty list back. -** Determine if the service account your pod specifies is allowed to use the given `SharedSecret` CR instance. That is, you can run `oc adm policy who-can use ` to see if the service account in your namespace is listed. -** Determine if the service account your pod specifies is allowed to use `csi` volumes, or if you, as the requesting user who created the pod directly, are allowed to use `csi` volumes. See "Understanding and managing pod security admission" for details. - -[NOTE] -==== -If neither of the last two prerequisites in this list are met, create, or ask someone to create, the necessary role-based access control (RBAC) so that you can discover `SharedConfigMap` CR instances and enable service accounts to use `SharedConfigMap` CR instances. -==== - -.Procedure - -. Grant a given service account RBAC permissions to use the `SharedConfigMap` CR instance in its pod by using `oc apply` with YAML content. -+ -[NOTE] -==== -Currently, `kubectl` and `oc` have hard-coded special case logic restricting the `use` verb to roles centered around pod security. Therefore, you cannot use `oc create role ...` to create the role needed for consuming a `SharedConfigMap` CR instance. -==== -+ -[source,terminal] ----- -$ oc apply -f - <` to see if the service account in your namespace is listed. -** Determine if the service account your pod specifies is allowed to use `csi` volumes, or if you, as the requesting user who created the pod directly, are allowed to use `csi` volumes. See "Understanding and managing pod security admission" for details. - -[NOTE] -==== -If neither of the last two prerequisites in this list are met, create, or ask someone to create, the necessary role-based access control (RBAC) so that you can discover `SharedSecret` CR instances and enable service accounts to use `SharedSecret` CR instances. -==== - -.Procedure - -. Grant a given service account RBAC permissions to use the `SharedSecret` CR instance in its pod by using `oc apply` with YAML content: -+ -[NOTE] -==== -Currently, `kubectl` and `oc` have hard-coded special case logic restricting the `use` verb to roles centered around pod security. Therefore, you cannot use `oc create role ...` to create the role needed for consuming `SharedSecret` CR instances. -==== -+ -[source,terminal] ----- -$ oc apply -f - <.yaml` as an example file name. -+ --- -[source,yaml] ----- -apiVersion: infrastructure.cluster.x-k8s.io/ # <1> -kind: # <2> -metadata: - name: # <3> - namespace: openshift-cluster-api -spec: # <4> ----- -<1> The `apiVersion` varies by platform. -For more information, see the sample Cluster API infrastructure resource YAML for your provider. -The following values are valid: -* `infrastructure.cluster.x-k8s.io/v1beta2`: The version that {aws-first} clusters use. -* `infrastructure.cluster.x-k8s.io/v1beta1`: The version that {gcp-first} and {vmw-first} clusters use. -<2> Specify the infrastructure kind for the cluster. -This value must match the value for your platform. -The following values are valid: -* `AWSCluster`: The cluster is running on {aws-short}. -* `GCPCluster`: The cluster is running on {gcp-short}. -* `AzureCluster`: The cluster is running on {azure-first}. -* `VSphereCluster`: The cluster is running on {vmw-short}. -<3> Specify the name of the cluster. -<4> Specify the details for your environment. -These parameters are provider specific. -For more information, see the sample Cluster API infrastructure resource YAML for your provider. --- - -. Create the infrastructure CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- - -.Verification - -* Confirm that the infrastructure CR is created by running the following command: -+ -[source,terminal] ----- -$ oc get ----- -+ -where `` is the value that corresponds to your platform. -+ -.Example output -[source,text] ----- -NAME CLUSTER READY - true ----- -+ -[NOTE] -==== -This output might contain additional columns that are specific to your cloud provider. -==== \ No newline at end of file diff --git a/_unused_topics/capi-yaml-infrastructure-aws.adoc b/_unused_topics/capi-yaml-infrastructure-aws.adoc deleted file mode 100644 index adaf51ac5ab9..000000000000 --- a/_unused_topics/capi-yaml-infrastructure-aws.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cluster_api_machine_management/cluster_api_provider_configurations/cluster-api-config-options-aws.adoc - -:_mod-docs-content-type: REFERENCE -[id="capi-yaml-infrastructure-aws_{context}"] -= Sample YAML for a Cluster API infrastructure cluster resource on {aws-full} - -The infrastructure cluster resource is provider-specific and defines properties that all the compute machine sets in the cluster share, such as the region and subnets. -The compute machine set references this resource when creating machines. - -In {product-title} {product-version}, the {cluster-capi-operator} generates this resource. -The following sample YAML file is for informational purposes. -User modification of this generated resource is not recommended. - -[source,yaml] ----- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 -kind: AWSCluster # <1> -metadata: - name: # <2> - namespace: openshift-cluster-api -spec: - controlPlaneEndpoint: # <3> - host: - port: 6443 - region: # <4> ----- -<1> Specifies the infrastructure kind for the cluster. -This value matches the value for your platform. -<2> Specifies the cluster ID as the name of the cluster. -<3> Specifies the address of the control plane endpoint and the port to use to access it. -<4> Specifies the {aws-short} region. \ No newline at end of file diff --git a/_unused_topics/capi-yaml-infrastructure-azure.adoc b/_unused_topics/capi-yaml-infrastructure-azure.adoc deleted file mode 100644 index 20c6c352e312..000000000000 --- a/_unused_topics/capi-yaml-infrastructure-azure.adoc +++ /dev/null @@ -1,51 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cluster_api_machine_management/cluster_api_provider_configurations/cluster-api-config-options-azure.adoc - -:_mod-docs-content-type: REFERENCE -[id="capi-yaml-infrastructure-azure_{context}"] -= Sample YAML for a Cluster API infrastructure cluster resource on {azure-full} - -The infrastructure cluster resource is provider-specific and defines properties that all the compute machine sets in the cluster share, such as the region and subnets. -The compute machine set references this resource when creating machines. - -In {product-title} {product-version}, the {cluster-capi-operator} generates this resource. -The following sample YAML file is for informational purposes. -User modification of this generated resource is not recommended. - -[source,yaml] ----- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: AzureCluster # <1> -metadata: - name: # <2> - namespace: openshift-cluster-api -spec: - azureEnvironment: AzurePublicCloud - bastionSpec: {} - controlPlaneEndpoint: # <3> - host: - port: 6443 - identityRef: # <4> - kind: AzureClusterIdentity - name: - namespace: openshift-cluster-api - location: westus # <5> - networkSpec: - apiServerLB: - backendPool: {} - nodeOutboundLB: - backendPool: - name: - name: - vnet: - name: -vnet - resourceGroup: -rg - resourceGroup: -rg ----- -<1> Specifies the infrastructure kind for the cluster. -This value matches the value for your platform. -<2> Specifies the cluster ID as the name of the cluster. -<3> Specifies the address of the control plane endpoint and the port to use to access it. -<4> The cluster identity that the {cluster-capi-operator} creates. -<5> Specifies the {azure-short} region. \ No newline at end of file diff --git a/_unused_topics/capi-yaml-infrastructure-gcp.adoc b/_unused_topics/capi-yaml-infrastructure-gcp.adoc deleted file mode 100644 index 42bbe1450495..000000000000 --- a/_unused_topics/capi-yaml-infrastructure-gcp.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cluster_api_machine_management/cluster_api_provider_configurations/cluster-api-config-options-gcp.adoc - -:_mod-docs-content-type: REFERENCE -[id="capi-yaml-infrastructure-gcp_{context}"] -= Sample YAML for a Cluster API infrastructure cluster resource on {gcp-full} - -The infrastructure cluster resource is provider-specific and defines properties that all the compute machine sets in the cluster share, such as the region and subnets. -The compute machine set references this resource when creating machines. - -In {product-title} {product-version}, the {cluster-capi-operator} generates this resource. -The following sample YAML file is for informational purposes. -User modification of this generated resource is not recommended. - -[source,yaml] ----- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: GCPCluster # <1> -metadata: - name: # <2> -spec: - controlPlaneEndpoint: # <3> - host: - port: 6443 - network: - name: -network - project: # <4> - region: # <5> ----- -<1> Specifies the infrastructure kind for the cluster. -This value matches the value for your platform. -<2> Specifies the cluster ID as the name of the cluster. -<3> Specifies the IP address of the control plane endpoint and the port used to access it. -<4> Specifies the {gcp-short} project name. -<5> Specifies the {gcp-short} region. \ No newline at end of file diff --git a/_unused_topics/capi-yaml-infrastructure-vsphere.adoc b/_unused_topics/capi-yaml-infrastructure-vsphere.adoc deleted file mode 100644 index bd213de70fbc..000000000000 --- a/_unused_topics/capi-yaml-infrastructure-vsphere.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cluster_api_machine_management/cluster_api_provider_configurations/cluster-api-config-options-vsphere.adoc - -:_mod-docs-content-type: REFERENCE -[id="capi-yaml-infrastructure-vsphere_{context}"] -= Sample YAML for a Cluster API infrastructure cluster resource on {vmw-full} - -The infrastructure cluster resource is provider-specific and defines properties that all the compute machine sets in the cluster share, such as the region and subnets. -The compute machine set references this resource when creating machines. - -In {product-title} {product-version}, the {cluster-capi-operator} generates this resource. -The following sample YAML file is for informational purposes. -User modification of this generated resource is not recommended. - -[source,yaml] ----- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: VSphereCluster # <1> -metadata: - name: # <2> -spec: - controlPlaneEndpoint: # <3> - host: - port: 6443 - identityRef: - kind: Secret - name: - server: # <4> ----- -<1> Specifies the infrastructure kind for the cluster. -This value matches the value for your platform. -<2> Specifies the cluster ID as the name of the cluster. -<3> Specifies the IP address of the control plane endpoint and the port used to access it. -<4> Specifies the {vmw-short} server for the cluster. -You can find this value on an existing {vmw-short} cluster by running the following command: -+ -[source,terminal] ----- -$ oc get infrastructure cluster \ - -o jsonpath="{.spec.platformSpec.vsphere.vcenters[0].server}" ----- \ No newline at end of file diff --git a/_unused_topics/cco-mode-gcp-workload-identity.adoc b/_unused_topics/cco-mode-gcp-workload-identity.adoc deleted file mode 100644 index 0df5ff50ee20..000000000000 --- a/_unused_topics/cco-mode-gcp-workload-identity.adoc +++ /dev/null @@ -1,124 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="cco-mode-gcp-workload-identity"] -= Using manual mode with GCP Workload Identity -include::_attributes/common-attributes.adoc[] -:context: cco-mode-gcp-workload-identity - -toc::[] - -Manual mode with GCP Workload Identity is supported for Google Cloud Platform (GCP). - -[NOTE] -==== -This credentials strategy is supported for only new {product-title} clusters and must be configured during installation. You cannot reconfigure an existing cluster that uses a different credentials strategy to use this feature. -==== - -[id="gcp-workload-identity-mode-about_{context}"] -== About manual mode with GCP Workload Identity - -In manual mode with GCP Workload Identity, the individual {product-title} cluster components can impersonate IAM service accounts using short-term, limited-privilege credentials. - -Requests for new and refreshed credentials are automated by using an appropriately configured OpenID Connect (OIDC) identity provider, combined with IAM service accounts. {product-title} signs service account tokens that are trusted by GCP, and can be projected into a pod and used for authentication. Tokens are refreshed after one hour by default. - -.Workload Identity authentication flow -image::347_OpenShift_credentials_with_STS_updates_0623_GCP.png[Detailed authentication flow between GCP and the cluster when using GCP Workload Identity] - -Using manual mode with GCP Workload Identity changes the content of the GCP credentials that are provided to individual {product-title} components. - -.GCP secret format - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - namespace: <1> - name: <2> -data: - service_account.json: <3> ----- -<1> The namespace for the component. -<2> The name of the component secret. -<3> The Base64 encoded service account. - -.Content of the Base64 encoded `service_account.json` file using long-lived credentials - -[source,json] ----- -{ - "type": "service_account", <1> - "project_id": "", - "private_key_id": "", - "private_key": "", <2> - "client_email": "", - "client_id": "", - "auth_uri": "https://accounts.google.com/o/oauth2/auth", - "token_uri": "https://oauth2.googleapis.com/token", - "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", - "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/" -} ----- -<1> The credential type is `service_account`. -<2> The private RSA key that is used to authenticate to GCP. This key must be kept secure and is not rotated. - -.Content of the Base64 encoded `service_account.json` file using GCP Workload Identity - -[source,json] ----- -{ - "type": "external_account", <1> - "audience": "//iam.googleapis.com/projects/123456789/locations/global/workloadIdentityPools/test-pool/providers/test-provider", <2> - "subject_token_type": "urn:ietf:params:oauth:token-type:jwt", - "token_url": "https://sts.googleapis.com/v1/token", - "service_account_impersonation_url": "https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/:generateAccessToken", <3> - "credential_source": { - "file": "", <4> - "format": { - "type": "text" - } - } -} ----- -<1> The credential type is `external_account`. -<2> The target audience is the GCP Workload Identity provider. -<3> The resource URL of the service account that can be impersonated with these credentials. -<4> The path to the service account token inside the pod. By convention, this is `/var/run/secrets/openshift/serviceaccount/token` for {product-title} components. - -//Supertask: Installing an OCP cluster configured for manual mode with GCP Workload Identity -[id="gcp-workload-identity-mode-installing"] -== Installing an {product-title} cluster configured for manual mode with GCP Workload Identity - -To install a cluster that is configured to use the Cloud Credential Operator (CCO) in manual mode with GCP Workload Identity: - -. xref:../../authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc#cco-ccoctl-configuring_cco-mode-gcp-workload-identity[Configure the Cloud Credential Operator utility]. -. xref:../../authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc#cco-ccoctl-creating-at-once_cco-mode-gcp-workload-identity[Create the required GCP resources]. -. xref:../../authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc#sts-mode-installing-manual-run-installer_cco-mode-gcp-workload-identity[Run the {product-title} installer]. -. xref:../../authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc#sts-mode-installing-verifying_cco-mode-gcp-workload-identity[Verify that the cluster is using short-lived credentials]. - -[NOTE] -==== -Because the cluster is operating in manual mode when using GCP Workload Identity, it is not able to create new credentials for components with the permissions that they require. When upgrading to a different minor version of {product-title}, there are often new GCP permission requirements. Before upgrading a cluster that is using GCP Workload Identity, the cluster administrator must manually ensure that the GCP permissions are sufficient for existing components and available to any new components. -==== - -[role="_additional-resources"] -.Additional resources - -* xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#cco-ccoctl-configuring_preparing-manual-creds-update[Configuring the Cloud Credential Operator utility for a cluster update] - -//Task part 1: Configuring the Cloud Credential Operator utility -include::modules/cco-ccoctl-configuring.adoc[leveloffset=+2] - -//Task part 2: Creating the required GCP resources all at once -include::modules/cco-ccoctl-creating-at-once.adoc[leveloffset=+2] - -//Task part 3: Run the OCP installer -include::modules/sts-mode-installing-manual-run-installer.adoc[leveloffset=+2] - -//Task part 4: Verify that the cluster is using short-lived credentials -include::modules/sts-mode-installing-verifying.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_{context}"] -== Additional resources - -* xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials] diff --git a/_unused_topics/cco-mode-sts.adoc b/_unused_topics/cco-mode-sts.adoc deleted file mode 100644 index 7fd809f7702d..000000000000 --- a/_unused_topics/cco-mode-sts.adoc +++ /dev/null @@ -1,114 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="cco-mode-sts"] -= Using manual mode with Amazon Web Services Security Token Service -include::_attributes/common-attributes.adoc[] -:context: cco-mode-sts - -toc::[] - -Manual mode with STS is supported for Amazon Web Services (AWS). - -[NOTE] -==== -This credentials strategy is supported for only new {product-title} clusters and must be configured during installation. You cannot reconfigure an existing cluster that uses a different credentials strategy to use this feature. -==== - -[id="sts-mode-about_{context}"] -== About manual mode with AWS Security Token Service - -In manual mode with STS, the individual {product-title} cluster components use AWS Security Token Service (STS) to assign components IAM roles that provide short-term, limited-privilege security credentials. These credentials are associated with IAM roles that are specific to each component that makes AWS API calls. - -Requests for new and refreshed credentials are automated by using an appropriately configured AWS IAM OpenID Connect (OIDC) identity provider, combined with AWS IAM roles. {product-title} signs service account tokens that are trusted by AWS IAM, and can be projected into a pod and used for authentication. Tokens are refreshed after one hour. - -.STS authentication flow -image::347_OpenShift_credentials_with_STS_updates_0623_AWS.png[Detailed authentication flow between AWS and the cluster when using AWS STS] - -Using manual mode with STS changes the content of the AWS credentials that are provided to individual {product-title} components. - -.AWS secret format using long-lived credentials - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - namespace: <1> - name: <2> -data: - aws_access_key_id: - aws_secret_access_key: ----- -<1> The namespace for the component. -<2> The name of the component secret. - -.AWS secret format with STS - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - namespace: <1> - name: <2> -stringData: - credentials: |- - [default] - sts_regional_endpoints = regional - role_name: <3> - web_identity_token_file: <4> ----- -<1> The namespace for the component. -<2> The name of the component secret. -<3> The IAM role for the component. -<4> The path to the service account token inside the pod. By convention, this is `/var/run/secrets/openshift/serviceaccount/token` for {product-title} components. - -//Supertask: Installing an OCP cluster configured for manual mode with STS -[id="sts-mode-installing_{context}"] -== Installing an {product-title} cluster configured for manual mode with STS - -To install a cluster that is configured to use the Cloud Credential Operator (CCO) in manual mode with STS: - -//[pre-4.8]. xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#sts-mode-installing-manual-config_cco-mode-sts[Create the required AWS resources] -. xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-ccoctl-configuring_cco-mode-sts[Configure the Cloud Credential Operator utility]. -. Create the required AWS resources xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-ccoctl-creating-individually_cco-mode-sts[individually], or xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-ccoctl-creating-at-once_cco-mode-sts[with a single command]. -. xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#sts-mode-installing-manual-run-installer_cco-mode-sts[Run the {product-title} installer]. -. xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#sts-mode-installing-verifying_cco-mode-sts[Verify that the cluster is using short-lived credentials]. - -[NOTE] -==== -Because the cluster is operating in manual mode when using STS, it is not able to create new credentials for components with the permissions that they require. When upgrading to a different minor version of {product-title}, there are often new AWS permission requirements. Before upgrading a cluster that is using STS, the cluster administrator must manually ensure that the AWS permissions are sufficient for existing components and available to any new components. -==== - -[role="_additional-resources"] -.Additional resources - -* xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#cco-ccoctl-configuring_preparing-manual-creds-update[Configuring the Cloud Credential Operator utility for a cluster update] - -//[pre-4.8]Task part 1: Creating AWS resources manually -//include::modules/sts-mode-installing-manual-config.adoc[leveloffset=+2] - -//Task part 1: Configuring the Cloud Credential Operator utility -include::modules/cco-ccoctl-configuring.adoc[leveloffset=+2] - -[id="sts-mode-create-aws-resources-ccoctl_{context}"] -=== Creating AWS resources with the Cloud Credential Operator utility - -You can use the CCO utility (`ccoctl`) to create the required AWS resources xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-ccoctl-creating-individually_cco-mode-sts[individually], or xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-ccoctl-creating-at-once_cco-mode-sts[with a single command]. - -//Task part 2a: Creating the required AWS resources individually -include::modules/cco-ccoctl-creating-individually.adoc[leveloffset=+3] - -//Task part 2b: Creating the required AWS resources all at once -include::modules/cco-ccoctl-creating-at-once.adoc[leveloffset=+3] - -//Task part 3: Run the OCP installer -include::modules/sts-mode-installing-manual-run-installer.adoc[leveloffset=+2] - -//Task part 4: Verify that the cluster is using short-lived credentials -include::modules/sts-mode-installing-verifying.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_{context}"] -== Additional resources - -* xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials] diff --git a/_unused_topics/cco-short-term-creds-auth-flows.adoc b/_unused_topics/cco-short-term-creds-auth-flows.adoc deleted file mode 100644 index 52a1dca7cb54..000000000000 --- a/_unused_topics/cco-short-term-creds-auth-flows.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/managing_cloud_provider_credentials/cco-short-term-creds.adoc - -:_mod-docs-content-type: REFERENCE -[id="cco-short-term-creds-auth-flows_{context}"] -= Provider authentication details - -The authentication flow for this authentication method has similarities across the supported cloud providers. - -[id="cco-short-term-creds-auth-flow-aws_{context}"] -== AWS Security Token Service - -In manual mode with STS, the individual {product-title} cluster components use the AWS Security Token Service (STS) to assign components IAM roles that provide short-term, limited-privilege security credentials. These credentials are associated with IAM roles that are specific to each component that makes AWS API calls. - -.AWS Security Token Service authentication flow -image::347_OpenShift_credentials_with_STS_updates_0623_AWS.png[Detailed authentication flow between AWS and the cluster when using AWS STS] - -[id="cco-short-term-creds-auth-flow-gcp_{context}"] -== GCP Workload Identity - -In manual mode with GCP Workload Identity, the individual {product-title} cluster components use the GCP workload identity provider to allow components to impersonate GCP service accounts using short-term, limited-privilege credentials. - -.GCP Workload Identity authentication flow -image::347_OpenShift_credentials_with_STS_updates_0623_GCP.png[Detailed authentication flow between GCP and the cluster when using GCP Workload Identity] - -//// -[id="cco-short-term-creds-auth-flow-azure_{context}"] -== Azure AD Workload Identity - -//todo: work with dev and diagrams team to get a diagram for Azure -.Azure AD Workload Identity authentication flow -image::Azure_diagram.png[Detailed authentication flow between Azure and the cluster when using Azure AD Workload Identity] -//// - -[id="cco-short-term-creds-auth-flow-refresh_{context}"] -== Automated credential refreshing - -Requests for new and refreshed credentials are automated by using an appropriately configured OpenID Connect (OIDC) identity provider combined with provider-specific service accounts or roles. {product-title} signs Kubernetes service account tokens that are trusted by the cloud provider. These tokens can be mounted into a pod and used for authentication. By default, tokens are refreshed after one hour. \ No newline at end of file diff --git a/_unused_topics/cnv-accessing-vmi-web.adoc b/_unused_topics/cnv-accessing-vmi-web.adoc deleted file mode 100644 index f733d2873fd5..000000000000 --- a/_unused_topics/cnv-accessing-vmi-web.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// - -[id="virt-accessing-vmi-web_{context}"] -= Connecting to a virtual machine with the web console - -You can connect to a virtual machine by using the web console. - -.Procedure - -. Ensure you are in the correct project. If not, click the *Project* -list and select the appropriate project. -. Click *Workloads* -> *Virtual Machines* to display the virtual -machines in the project. -. Select a virtual machine. -. In the *Overview* tab, click the `virt-launcher-` pod. -. Click the *Terminal* tab. If the terminal is blank, click the -terminal and press any key to initiate connection. diff --git a/_unused_topics/completing-installation.adoc b/_unused_topics/completing-installation.adoc deleted file mode 100644 index a3d3235f7312..000000000000 --- a/_unused_topics/completing-installation.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * TBD - -[id="completing-installation_{context}"] -= Completing and verifying the {product-title} installation - -When the bootstrap node is done with its work and has handed off control to the new {product-title} cluster, the bootstrap node is destroyed. The installation program waits for the cluster to initialize, creates a route to the {product-title} console, and presents the information and credentials you require to log in to the cluster. Here’s an example: - ----- -INFO Install complete!                                 - -INFO Run 'export KUBECONFIG=/home/joe/ocp/auth/kubeconfig' to manage the cluster with 'oc', the {product-title} CLI. - -INFO The cluster is ready when 'oc login -u kubeadmin -p ' succeeds (wait a few minutes). - -INFO Access the {product-title} web-console here: https://console-openshift-console.apps.mycluster.devel.example.com - -INFO Login to the console with user: kubeadmin, password: "password" ----- - -To access the {product-title} cluster from your web browser, log in as kubeadmin with the password, using the URL shown: - -     https://console-openshift-console.apps.mycluster.devel.example.com - -To access the {product-title} cluster from the command line, identify the location of the credentials file (export the KUBECONFIG variable) and log in as kubeadmin with the provided password: ----- -$ export KUBECONFIG=/home/joe/ocp/auth/kubeconfig - -$ oc login -u kubeadmin -p ----- - -At this point, you can begin using the {product-title} cluster. To understand the management of your {product-title} cluster going forward, you should explore the {product-title} control plane. diff --git a/_unused_topics/con-pod-reset-policy.adoc b/_unused_topics/con-pod-reset-policy.adoc deleted file mode 100644 index b317a1df6495..000000000000 --- a/_unused_topics/con-pod-reset-policy.adoc +++ /dev/null @@ -1,54 +0,0 @@ -[[nodes-configuring-nodes]] -= Understanding Pod restart policy -{product-author} -{product-version} -:data-uri: -:icons: -:experimental: -:toc: macro -:toc-title: - - -//from https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy -A Pod restart policy determines how {product-title} responds when containers in that Pod exit. -The policy applies to all containers in that Pod. - -The possible values are: - -* `Always` - Tries restarting a successfully exited container on the Pod continuously, with an exponential back-off delay (10s, 20s, 40s) until the Pod is restarted. The default is `Always`. -* `OnFailure` - Tries restarting a failed container on the Pod with an exponential back-off delay (10s, 20s, 40s) capped at 5 minutes. -* `Never` - Does not try to restart exited or failed containers on the Pod. Pods immediately fail and exit. - -//https://kubernetes-v1-4.github.io/docs/user-guide/pod-states/ -Once bound to a node, a Pod will never be bound to another node. This means that a controller is necessary in order for a Pod to survive node failure: - -[cols="3",options="header"] -|=== - -|Condition -|Controller Type -|Restart Policy - -|Pods that are expected to terminate (such as batch computations) -|xref:../../architecture/core_concepts/deployments.adoc#jobs[Job] -|`OnFailure` or `Never` - -|Pods that are expected to not terminate (such as web servers) -|xref:../../architecture/core_concepts/deployments.adoc#replication-controllers[Replication Controller] -| `Always`. - -|Pods that must run one-per-machine -|xref:../../dev_guide/daemonsets.adoc#dev-guide-daemonsets[Daemonset] -|Any -|=== - -If a container on a Pod fails and the restart policy is set to `OnFailure`, the Pod stays on the node and the container is restarted. If you do not want the container to -restart, use a restart policy of `Never`. - -//https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#handling-pod-and-container-failures -If an entire Pod fails, {product-title} starts a new Pod. Developers must address the possibility that applications might be restarted in a new Pod. In particular, -applications must handle temporary files, locks, incomplete output, and so forth caused by previous runs. - -For details on how {product-title} uses restart policy with failed containers, see -the link:https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#example-states[Example States] in the Kubernetes documentation. - diff --git a/_unused_topics/configuration-resource-configure.adoc b/_unused_topics/configuration-resource-configure.adoc deleted file mode 100644 index a65a1d4bb2fc..000000000000 --- a/_unused_topics/configuration-resource-configure.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * TBD - - -[id="configuration-resource-configure_{context}"] -= Configure the Configuration Resource - -To configure the Configuration Resource, you customize the Custom Resource Definition (CRD) that controls its Operator and deploy it to your cluster. - -.Prerequisites -* Deploy an {product-title} cluster. -* Review the CRD for the resource and provision any resources that your changes require. -* Access to the right user to do this thing. - -.Procedure - -. From some specific computer, modify the CRD for the resource to describe your intended configuration. Save the file in `whatever-the-location-is`. - -. Run the following command to update the CRD in your cluster: -+ ----- -$ oc something or other -- <1> --<2> ----- -<1> The CRD file that contains customizations for your resource. -<2> However you specify the cluster you’re changing. - -. Confirm that the resource reflects your changes. Run the following command and review the output: -+ ----- -$ oc something or other - -Output -Output -Output ----- -+ -If the output includes , the resource redeployed on your cluster. diff --git a/_unused_topics/configuring-local-provisioner.adoc b/_unused_topics/configuring-local-provisioner.adoc deleted file mode 100644 index 1e46999679b0..000000000000 --- a/_unused_topics/configuring-local-provisioner.adoc +++ /dev/null @@ -1,54 +0,0 @@ -[id="configuring-local-provisioner_{context}"] -= Configuring the local provisioner - -{product-title} depends on an external provisioner to create PVs for local devices and to clean up PVs when they are not in use to enable reuse. - -.Prerequisites - -* All local volumes must be manually mounted before they can be consumed by {product-title} as PVs. - -[NOTE] -==== -The local volume provisioner is different from most provisioners and does not support dynamic provisioning. -==== - -[NOTE] -==== -The local volume provisioner requires administrators to preconfigure the local volumes on each node and mount them under discovery directories. The provisioner then manages the volumes by creating and cleaning up PVs for each volume. -==== - -.Procedure -. Configure the external provisioner using a ConfigMap to relate directories with storage classes, for example: -+ ----- - kind: ConfigMap -metadata: - name: local-volume-config -data: - storageClassMap: | - local-ssd: - hostDir: /mnt/local-storage/ssd - mountDir: /mnt/local-storage/ssd - local-hdd: - hostDir: /mnt/local-storage/hdd - mountDir: /mnt/local-storage/hdd ----- -<1> Name of the storage class. -<2> Path to the directory on the host. It must be a subdirectory of `*/mnt/local-storage*`. -<3> Path to the directory in the provisioner Pod. We recommend using the same directory structure as used on the host and `mountDir` can be omitted in this case. - -. Create a standalone namespace for the local volume provisioner and its configuration, for example: -+ ----- -$ oc new-project local-storage ----- - -With this configuration, the provisioner creates: - -* One PV with storage class `local-ssd` for every subdirectory mounted in the `*/mnt/local-storage/ssd*` directory -* One PV with storage class `local-hdd` for every subdirectory mounted in the `*/mnt/local-storage/hdd*` directory - -[WARNING] -==== -The syntax of the ConfigMap has changed between {product-title} 3.9 and 3.10. Since this feature is in Technology Preview, the ConfigMap is not automatically converted during the update. -==== diff --git a/_unused_topics/configuring-user-agent.adoc b/_unused_topics/configuring-user-agent.adoc deleted file mode 100644 index dda5f717be47..000000000000 --- a/_unused_topics/configuring-user-agent.adoc +++ /dev/null @@ -1,10 +0,0 @@ -[id="configuring-user-agent"] -= Configuring the user agent -include::_attributes/common-attributes.adoc[] -:context: configuring-user-agent - -toc::[] - -include::modules/user-agent-overview.adoc[leveloffset=+1] - -include::modules/user-agent-configuring.adoc[leveloffset=+1] diff --git a/_unused_topics/container_storage_interface_microshift/_attributes b/_unused_topics/container_storage_interface_microshift/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/_unused_topics/container_storage_interface_microshift/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/_unused_topics/container_storage_interface_microshift/images b/_unused_topics/container_storage_interface_microshift/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/_unused_topics/container_storage_interface_microshift/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/_unused_topics/container_storage_interface_microshift/microshift-persistent-storage-csi.adoc b/_unused_topics/container_storage_interface_microshift/microshift-persistent-storage-csi.adoc deleted file mode 100644 index 4275a8ecbbdd..000000000000 --- a/_unused_topics/container_storage_interface_microshift/microshift-persistent-storage-csi.adoc +++ /dev/null @@ -1,25 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="persistent-storage-csi-microshift"] -= Configuring CSI volumes for {product-title} -include::_attributes/attributes-microshift.adoc[] -:context: persistent-storage-csi-microshift - -toc::[] - -The Container Storage Interface (CSI) allows {product-title} to consume -storage from storage back ends that implement the -link:https://github.com/container-storage-interface/spec[CSI interface] -as persistent storage. - -[NOTE] -==== -{product-title} {product-version} supports version 1.5.0 of the link:https://github.com/container-storage-interface/spec[CSI specification]. -==== - -include::modules/persistent-storage-csi-dynamic-provisioning.adoc[leveloffset=+1] -include::modules/persistent-storage-csi-mysql-example.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.12/html/storage/using-container-storage-interface-csi#persistent-storage-csi[{ocp} CSI Overview] \ No newline at end of file diff --git a/_unused_topics/container_storage_interface_microshift/modules b/_unused_topics/container_storage_interface_microshift/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/_unused_topics/container_storage_interface_microshift/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/_unused_topics/container_storage_interface_microshift/snippets b/_unused_topics/container_storage_interface_microshift/snippets deleted file mode 120000 index 7bf6da9a51d0..000000000000 --- a/_unused_topics/container_storage_interface_microshift/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets \ No newline at end of file diff --git a/_unused_topics/customize-certificates-api-add-default.adoc b/_unused_topics/customize-certificates-api-add-default.adoc deleted file mode 100644 index a70aeb11709a..000000000000 --- a/_unused_topics/customize-certificates-api-add-default.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * security/certificates/api-server.adoc - -[id="add-default-api-server_{context}"] -= Add an API server default certificate - -To allow clients outside the cluster to validate the API server's -certificate, you can replace the default certificate -with one that is issued by a public or organizational CA. - -.Prerequisites - -* You must have a valid certificate and key in the PEM format. - -.Procedure - -. Create a secret that contains the certificate and key in the -`openshift-config` namespace. -+ ----- -$ oc create secret tls \//<1> - --cert= \//<2> - --key= \//<3> - -n openshift-config ----- -<1> `` is the name of the secret that will contain -the certificate. -<2> `` is the path to the certificate on your -local file system. -<3> `` is the path to the private key associated -with this certificate. - -. Update the API server to reference the created secret. -+ ----- -$ oc patch apiserver cluster \ - --type=merge -p \ - '{"spec": {"servingCerts": {"defaultServingCertificate": - {"name": ""}}}}' <1> ----- -<1> Replace `` with the name used for the secret in -the previous step. - -. Examine the `apiserver/cluster` object and confirm the secret is now -referenced. -+ ----- -$ oc get apiserver cluster -o yaml -... -spec: - servingCerts: - defaultServingCertificate: - name: -... ----- diff --git a/_unused_topics/deploying-local-provisioner.adoc b/_unused_topics/deploying-local-provisioner.adoc deleted file mode 100644 index bfef02c41d1f..000000000000 --- a/_unused_topics/deploying-local-provisioner.adoc +++ /dev/null @@ -1,20 +0,0 @@ -[id="deploying-local-provisioner_{context}"] -= Deploying the local provisioner - -This paragraph is the procedure module introduction: a short description of the procedure. - -.Prerequisites - -* Before starting the provisioner, mount all local devices and create a ConfigMap with storage classes and their directories. - -.Procedure - -. Install the local provisioner from the `*local-storage-provisioner-template.yaml*` file. -. Create a service account that allows running Pods as a root user, using hostPath volumes, and using any SELinux context to monitor, manage, and clean local volumes, for example: -+ ----- -$ oc create serviceaccount local-storage-admin -$ oc adm policy add-scc-to-user privileged -z local-storage-admin ----- -+ -To allow the provisioner Pod to delete content on local volumes created by any Pod, root privileges and any SELinux context are required. hostPath is required to access the `*/mnt/local-storage*` path on the host. diff --git a/_unused_topics/exploring-cvo.adoc b/_unused_topics/exploring-cvo.adoc deleted file mode 100644 index 416394623c91..000000000000 --- a/_unused_topics/exploring-cvo.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * - -[id="exploring-cvo_{context}"] -= Exploring the CVO - -To see the current version that your cluster is on, type: - ----- -$ oc get clusterversion - -NAME    VERSION   AVAILABLE PROGRESSING SINCE STATUS -version 4.5.4 True      False       10h   Cluster version is 4.5.4 ----- - -Each release version is represented by a set of images. To see basic release information and a list of those images, type: - ----- -$ oc adm release info - -Name:          4.0.0-0.7 -Digest:        sha256:641c0e4f550af59ec20349187a31751ae5108270f13332d1771935520ebf34c1 -Created:   2019-03-05 13:33:12 -0500 EST -OS/Arch:   linux/amd64 -Manifests: 248 -Release Metadata: -  Version:  4.0.0-0.7 -  Upgrades: 4.0.0-0.6 -  Metadata: -        description: Beta 2 -Component Versions: -  Kubernetes 1.13.4 -Images: -  NAME                        DIGEST -  aws-machine-controllers     sha256:630e8118038ee97b8b3bbfed7d9b63e06c1346c606e11908064ea3f57bd9ff8e -  cli                         sha256:93e16a8c56ec4031b5fa68683f75910aad57b54160a1e6054b3d3e96d9a4b376 -  cloud-credential-operator   sha256:bbc8d586b2210ac44de554558fd299555e72fb662b6751589d69b173b03aa821 -…​ ----- - -To see the Operators managed on the control plane by the Cluster Version Operator, type: - ----- -$ oc get clusteroperator -NAME                                 VERSION  AVAILABLE PROGRESSING DEGRADED SINCE -cluster-autoscaler                            True      False       False   10h -cluster-storage-operator                      True      False       False   10h -console                                       True      False       False   10h -dns                                           True      False       False   10h -image-registry                                True      False       False   10h -ingress                                       True      False       False   10h -kube-apiserver                                True      False       False   10h -kube-controller-manager                       True      False       False   10h -kube-scheduler                                True      False       False   10h -machine-api                                   True      False       False   10h -machine-config                                True      False       False   10h -marketplace-operator                          True      False       False   10h -monitoring                                    True      False       False   156m -network                                       True      False       False   139m -node-tuning                                   True      False       False   10h -openshift-apiserver                           True      False       False   19m -openshift-authentication                      True      False       False   10h -openshift-cloud-credential-operator           True      False       False   10h -openshift-controller-manager                  True      False       False   10h -openshift-samples                             True      False       False   10h -operator-lifecycle-manager                    True      False       False   10h ----- - -While most of the Cluster Operators listed provide services to the {product-title} cluster, the machine-config Operator in particular is tasked with managing the {op-system} operating systems in the nodes. diff --git a/_unused_topics/identity-provider-create-CR.adoc b/_unused_topics/identity-provider-create-CR.adoc deleted file mode 100644 index 8014c4ae6ab6..000000000000 --- a/_unused_topics/identity-provider-create-CR.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-allow-all-identity-provider.adoc -// * authentication/identity_providers/configuring-deny-all-identity-provider.adoc -// * authentication/identity_providers/configuring-htpasswd-identity-provider.adoc -// * authentication/identity_providers/configuring-keystone-identity-provider.adoc -// * authentication/identity_providers/configuring-ldap-identity-provider.adoc -// * authentication/identity_providers/configuring-basic-authentication-identity-provider.adoc -// * authentication/identity_providers/configuring-request-header-identity-provider.adoc -// * authentication/identity_providers/configuring-github-identity-provider.adoc -// * authentication/identity_providers/configuring-gitlab-identity-provider.adoc -// * authentication/identity_providers/configuring-google-identity-provider.adoc -// * authentication/identity_providers/configuring-oidc-identity-provider.adoc - -[id="identity-provider-create-CR_{context}"] -= Creating the CR that describes an identity provider - -Before you can add an identity provider to your cluster, create a Custom -Resource (CR) that describes it. - -.Prerequisites - -* Create an {product-title} cluster. - -.Procedure - -Create a CR file to describe the identity provider. A generic file displaying -the structure is below. - ----- -apiVersion: config.openshift.io/v1 -kind: OAuth -metadata: - name: cluster -spec: - identityProviders: - - name: my_identity_provider <1> - mappingMethod: claim <2> - type: <3> - ... ----- -<1> A unique name defining the identity provider. This provider name is -prefixed to provider user names to form an identity name. -<2> Controls how mappings are established between this provider's identities and user objects. -<3> The type of identity provider to be configured. -+ -Provide the parameters that are required for your identity provider type. diff --git a/_unused_topics/images-s2i-java-pulling-images.adoc b/_unused_topics/images-s2i-java-pulling-images.adoc deleted file mode 100644 index dc9604744b07..000000000000 --- a/_unused_topics/images-s2i-java-pulling-images.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-s2i-java-pulling-images_{context}"] -= Pulling images for Java - -The Red Hat Enterprise Linux (RHEL) 8 image is available through the Red Hat Registry. - -.Procedure - -. To pull the RHEL 8 image, enter the following command: -[source,terminal] ----- -$ podman pull registry.redhat.io/redhat-openjdk-18/openjdk18-openshift ----- - -To use this image on {product-title}, you can either access it directly from the Red Hat Registry or push it into your {product-title} container image registry. Additionally, you can create an image stream that points to the image, either in your container image registry or at the external location. - -//// -Your {product-title} resources can then reference the link:https://github.com/jboss-openshift/application-templates/blob/master/jboss-image-streams.json[image stream definition]. -//// diff --git a/_unused_topics/images-s2i-nodejs-pulling-images.adoc b/_unused_topics/images-s2i-nodejs-pulling-images.adoc deleted file mode 100644 index 32fab99ea8ce..000000000000 --- a/_unused_topics/images-s2i-nodejs-pulling-images.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-s2i-nodejs-pulling-images_{context}"] -= Pulling images for Node.js - -//These images come in two options: - -//* RHEL 8 -//* CentOS 7 - -//*RHEL 8 Based Images* - -The RHEL 8 images are available through the Red Hat Registry. - -.Procedure - -* To pull the RHEL 8 image, enter the following command for the version of Node.js you want: -+ -.Node.js `12` -[source,terminal] ----- -$ podman pull registry.redhat.io/rhscl/nodejs-12-rhel7:latest ----- -+ -.Node.js `10` -[source,terminal] ----- -$ podman pull registry.redhat.io/rhscl/nodejs-10-rhel7:latest ----- - -//// -*CentOS 7 Based Image* - -This image is available on link:quay.io[Quay.io]. - -.Procedure - -* To pull the CentOS 7 image, enter the following command: -+ -[source,terminal] ----- -$ podman pull openshift/nodejs-010-centos7 ----- -//// - -To use these images, you can either access them directly from registry.redhat.io, or push them into your {product-registry}. Additionally, you can create an image stream that points to the image, either in your container image registry or at the external location. Your {product-title} resources can then reference the -image stream. diff --git a/_unused_topics/images-using-images-s2i-perl-configuration.adoc b/_unused_topics/images-using-images-s2i-perl-configuration.adoc deleted file mode 100644 index 563ba407e4be..000000000000 --- a/_unused_topics/images-using-images-s2i-perl-configuration.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-perl-configuration_{context}"] -= Configuring source-to-image for Perl - -The Perl image supports a number of environment variables which can be set to control the configuration and behavior of the Perl runtime. - -To set these environment variables as part of your image, you can place them into -a `.s2i/environment` file inside your source code repository, or define them in -the environment section of the build configuration's `sourceStrategy` definition. - -You can also set environment variables to be used with an existing image when creating new applications, or by updating environment variables for existing objects such as deployment configurations. - -[NOTE] -==== -Environment variables that control build behavior must be set as part of the source-to-image (S2I) build configuration or in the `.s2i/environment` file to make them available to the build steps. -==== - -.Perl Environment Variables -[cols="4a,6a",options="header"] -|=== - -|Variable name |Description - -|`ENABLE_CPAN_TEST` -|When set to `true`, this variable installs all the cpan modules and runs their tests. By default, the testing of the modules is disabled. - -|`CPAN_MIRROR` -|This variable specifies a mirror URL which cpanminus uses to install dependencies. By default, this URL is not specified. - -|`PERL_APACHE2_RELOAD` -|Set this to `true` to enable automatic reloading of modified Perl modules. By default, automatic reloading is disabled. - -|`HTTPD_START_SERVERS` -|The https://httpd.apache.org/docs/2.4/mod/mpm_common.html#startservers[StartServers] directive sets the number of child server processes created on startup. Default is 8. - -|`HTTPD_MAX_REQUEST_WORKERS` -|Number of simultaneous requests that will be handled by Apache. The default is 256, but it will be automatically lowered if memory is limited. -|=== - -//Verify` oc log` is still valid. diff --git a/_unused_topics/images-using-images-s2i-perl-hot-deploying.adoc b/_unused_topics/images-using-images-s2i-perl-hot-deploying.adoc deleted file mode 100644 index de276ad98264..000000000000 --- a/_unused_topics/images-using-images-s2i-perl-hot-deploying.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-perl-hot-deploying_{context}"] -= Hot deploying for Perl - -Hot deployment allows you to quickly make and deploy changes to your application -without having to generate a new S2I build. To enable hot deployment in this -image, you must set the `PERL_APACHE2_RELOAD` environment variable to `true`. You can use the `oc set env` command to update environment variables of existing objects. - -[WARNING] -==== -You should only use this option while developing or debugging. It is not recommended to turn this on in your production environment. -==== - -.Procedure - -. To change your source code in a running pod, use the `oc rsh` command to enter the container: -+ -[source,terminal] ----- -$ oc rsh ----- -+ -After you enter into the running container, your current directory is set to -`/opt/app-root/src`, where the source code is located. diff --git a/_unused_topics/images-using-images-s2i-perl-pulling-images.adoc b/_unused_topics/images-using-images-s2i-perl-pulling-images.adoc deleted file mode 100644 index 996f9b752d74..000000000000 --- a/_unused_topics/images-using-images-s2i-perl-pulling-images.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-perl-pulling-images_{context}"] -= Pulling images for Perl - -//Images comes in two options: - -//* RHEL 8 -//* CentOS 7 - -// *RHEL 8 Based Images* - -The RHEL 8 images are available through the Red Hat Registry. - -.Procedure - -* To pull the RHEL 8 image, enter the following command for the version of Perl you want: -+ -.Perl `5.26` -[source,terminal] ----- -$ podman pull registry.redhat.io/rhscl/perl-526-rhel7:latest ----- -+ -.Perl `5.30` -[source,terminal] ----- -$ podman pull registry.redhat.io/rhscl/perl-530-rhel7:latest ----- - -//// -*CentOS 7 Based Image* - -A CentOS image for Perl 5.16 is available on link:quay.io[Quay.io]. - -.Procedure - -* To pull the CentOS 7 image, enter the following command: -+ -[source,terminal] ----- -$ podman pull openshift/perl-516-centos7 ----- -//// - -To use these images, you can either access them directly from registry.redhat.io, or push them into your {product-registry}. Additionally, you can create an image stream that points to the image, either in your container image registry or at the external location. Your {product-title} resources can then reference the -image stream. diff --git a/_unused_topics/images-using-images-s2i-perl.adoc b/_unused_topics/images-using-images-s2i-perl.adoc deleted file mode 100644 index 01277ff90a72..000000000000 --- a/_unused_topics/images-using-images-s2i-perl.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-perl_{context}"] -= Perl overview - -{product-title} provides source-to-image (S2I) enabled Perl images for building and running Perl applications. The Perl S2I builder image assembles your application source with any required dependencies to create a new image containing your Perl application. This resulting image can be run either by {product-title} or by a container runtime. - -[id="images-using-images-s2i-perl-accessing-logs_{context}"] -== Accessing logs -Access logs are streamed to standard output and as such they can be viewed using the `oc logs` command. Error logs are stored in the `/tmp/error_log` file, which can be viewed using the `oc rsh` command to access the container. diff --git a/_unused_topics/images-using-images-s2i-php-configuration.adoc b/_unused_topics/images-using-images-s2i-php-configuration.adoc deleted file mode 100644 index 7e2ec6f6d7fd..000000000000 --- a/_unused_topics/images-using-images-s2i-php-configuration.adoc +++ /dev/null @@ -1,116 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-php-configuration_{context}"] -= Configuring source-to-image for PHP - -The PHP image supports a number of environment variables which can be set to control the configuration and behavior of the PHP runtime. - -To set these environment variables as part of your image, you can place them into a `.s2i/environment` file inside your source code repository, or define them in the environment section of the build configuration's `sourceStrategy` definition. - -You can also set environment variables to be used with an existing image when creating new applications, or by updating environment variables for existing objects such as deployment configurations. - -[NOTE] -==== -Environment variables that control build behavior must be set as part of the source-to-image (S2I) build configuration or in the `.s2i/environment` file to make them available to the build steps. -==== - -The following environment variables set their equivalent property value in the -`php.ini` file: - -.PHP Environment Variables -[cols="4a,6a,6a",options="header"] -|=== - -|Variable Name |Description |Default - -|`ERROR_REPORTING` -|Informs PHP of the errors, warnings, and notices for which you would like it to -take action. -|`E_ALL & ~E_NOTICE` - -|`DISPLAY_ERRORS` -|Controls if and where PHP outputs errors, notices, and warnings. -|`ON` - -|`DISPLAY_STARTUP_ERRORS` -|Causes any display errors that occur during PHP's startup sequence to be -handled separately from display errors. -|`OFF` - -|`TRACK_ERRORS` -|Stores the last error/warning message in `$php_errormsg` (boolean). -|`OFF` - -|`HTML_ERRORS` -|Links errors to documentation that is related to the error. -|`ON` - -|`INCLUDE_PATH` -|Path for PHP source files. -|`.:/opt/openshift/src:/opt/rh/php55/root/usr/share/pear` - -|`SESSION_PATH` -|Location for session data files. -|`/tmp/sessions` - -|`DOCUMENTROOT` -|Path that defines the document root for your application (for example, `/public`). -|`/` -|=== - -The following environment variable sets its equivalent property value in the -`opcache.ini` file: - -.Additional PHP settings -[cols="3a,6a,1a",options="header"] -|=== - -|Variable Name |Description |Default - -|`OPCACHE_MEMORY_CONSUMPTION` -|The link:http://php.net/manual/en/book.opcache.php[OPcache] shared memory -storage size. -|`16M` - -|`OPCACHE_REVALIDATE_FREQ` -|How often to check script time stamps for updates, in seconds. `0` results in -link:http://php.net/manual/en/book.opcache.php[OPcache] checking for updates on -every request. -|`2` -|=== - -You can also override the entire directory used to load the PHP configuration by setting: - -.Additional PHP settings -[cols="3a,6a",options="header"] -|=== - -| Variable Name | Description - -|`PHPRC` -|Sets the path to the `php.ini` file. - -|`*PHP_INI_SCAN_DIR*` -|Path to scan for additional `.ini` configuration files -|=== - -You can use a custom composer repository mirror URL to download packages instead of the default `packagist.org`: - -.Composer Environment Variables -[cols="4a,6a",options="header"] -|=== - -|Variable Name |Description - -|`COMPOSER_MIRROR` -|Set this variable to use a custom Composer repository mirror URL to download required packages during the build process. -Note: This only affects packages listed in `composer.json`. -|=== - -[id="images-using-images-s2i-php-apache-configuration_{context}"] -== Apache configuration - -If the `DocumentRoot` of the application is nested in the source directory `/opt/openshift/src`, you can provide your own `.htaccess` file to override the default Apache behavior and specify how application requests should be handled. The `.htaccess` file must be located at the root of the application source. diff --git a/_unused_topics/images-using-images-s2i-php-hot-deploying.adoc b/_unused_topics/images-using-images-s2i-php-hot-deploying.adoc deleted file mode 100644 index f8a852dd3447..000000000000 --- a/_unused_topics/images-using-images-s2i-php-hot-deploying.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-hot-deploying_{context}"] -= Hot deploying for PHP - -Hot deployment allows you to quickly make and deploy changes to your application without having to generate a new source-to-image (S2I) build. In order to immediately pick up changes made in your application source code, you must run your built image with the `OPCACHE_REVALIDATE_FREQ=0` environment variable. - -You can use the `oc env` command to update environment variables of existing objects. - -[WARNING] -==== -You should only use this option while developing or debugging. It is not recommended to turn this on in your production environment. -==== - -.Procedure - -. To change your source code in a running pod, use the `oc rsh` command to enter the container: -+ -[source,terminal] ----- -$ oc rsh ----- - -After you enter into the running container, your current directory is set to `/opt/app-root/src`, where the source code is located. diff --git a/_unused_topics/images-using-images-s2i-php-pulling-images.adoc b/_unused_topics/images-using-images-s2i-php-pulling-images.adoc deleted file mode 100644 index 51691eb98a56..000000000000 --- a/_unused_topics/images-using-images-s2i-php-pulling-images.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-php-pulling-images_{context}"] -= Pulling images for PHP - -//These images come in two options: - -//* RHEL 8 -//* CentOS 7 - -The RHEL 8 images are available through the Red Hat Registry. - -.Procedure - -* To pull the RHEL 8 image, enter the following command for the version of PHP you want: - -.PHP `8.1` -[source,terminal] ----- -$ podman pull registry.redhat.io/ubi9/php-81:latest ----- -+ -.PHP `7.3` -[source,terminal] ----- -$ podman pull registry.redhat.io/rhscl/php-73-rhel7:latest ----- - -//// -*CentOS 7 Based Images* - -CentOS images for PHP 5.5 and 5.6 are available on link:quay.io[Quay.io]. - -.Procedure - -* To pull the CentOS 7 image, enter the following command for the version of Node.js you want: -+ -.PHP `5.5` -[source,terminal] ----- -$ podman pull openshift/php-55-centos7 ----- -+ -.PHP `5.6` -[source,terminal] ----- -$ podman pull openshift/php-56-centos7 ----- -//// - -To use these images, you can either access them directly from registry.redhat.io, or push them into your {product-registry}. Additionally, you can create an image stream that points to the image, either in your container image registry or at the external location. Your {product-title} resources can then reference the -image stream. diff --git a/_unused_topics/images-using-images-s2i-php.adoc b/_unused_topics/images-using-images-s2i-php.adoc deleted file mode 100644 index 116276a93b06..000000000000 --- a/_unused_topics/images-using-images-s2i-php.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-php_{context}"] -= PHP overview - -{product-title} provides source-to-image (S2I) enabled PHP images for building and running PHP applications.The PHP S2I builder image assembles your application source with any required dependencies to create a new image containing your PHP application. This resulting image can be run either by {product-title} or by a container runtime. - -[id="images-using-images-s2i-php-accessing-logs_{context}"] -== Accessing logs - -Access logs are streamed to standard out and as such they can be viewed using the `oc logs` command. Error logs are stored in the `/tmp/error_log` file, which can be viewed using the `oc rsh` command to access the container. diff --git a/_unused_topics/images-using-images-s2i-python-configuration.adoc b/_unused_topics/images-using-images-s2i-python-configuration.adoc deleted file mode 100644 index f2dfd34cbbb9..000000000000 --- a/_unused_topics/images-using-images-s2i-python-configuration.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-python-configuration_{context}"] -= Configuring source-to-image for Python - -The Python image supports a number of environment variables which can be set to control the configuration and behavior of the Python runtime. - -To set these environment variables as part of your image, you can place them into a `.s2i/environment` file inside your source code repository, or define them in the environment section of the build configuration's `sourceStrategy` definition. - -You can also set environment variables to be used with an existing image when creating new applications, or by updating environment variables for existing objects such as deployment configurations. - -[NOTE] -==== -Environment variables that control build behavior must be set as part of the source-to-image (S2I) build configuration or in the `.s2i/environment` file to make them available to the build steps. -==== - -.Python Environment Variables -[cols="4a,6a",options="header"] -|=== - -|Variable name |Description - -|`APP_FILE` -|This variable specifies the file name passed to the Python interpreter which is responsible for launching the application. This variable is set to `app.py` by default. - -|`APP_MODULE` -|This variable specifies the WSGI callable. It follows the pattern `$(MODULE_NAME):$(VARIABLE_NAME)`, where the module name is a full dotted path and the variable name refers to a function inside the specified module. If you use `setup.py` for installing the application, then the module name can be read from that file and the variable defaults to `application`. - -|`APP_CONFIG` -|This variable indicates the path to a valid Python file with a http://docs.gunicorn.org/en/latest/configure.html[gunicorn configuration]. - -|`DISABLE_COLLECTSTATIC` -|Set it to a nonempty value to inhibit the execution of `manage.py collectstatic` during the build. Only affects Django projects. - -|`DISABLE_MIGRATE` -|Set it to a nonempty value to inhibit the execution of `manage.py migrate` when the produced image is run. Only affects Django projects. - -|`*PIP_INDEX_URL*` -| Set this variable to use a custom index URL or mirror to download required -packages during build process. This only affects packages listed in the -*_requirements.txt_* file. - -| `WEB_CONCURRENCY` -| Set this to change the default setting for the number of http://docs.gunicorn.org/en/stable/settings.html#workers[workers]. By default, this is set to the number of available cores times 4. -|=== diff --git a/_unused_topics/images-using-images-s2i-python-hot-deploying.adoc b/_unused_topics/images-using-images-s2i-python-hot-deploying.adoc deleted file mode 100644 index 03989935aebb..000000000000 --- a/_unused_topics/images-using-images-s2i-python-hot-deploying.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-python-hot-deploying_{context}"] -= Hot deploying - -Hot deployment allows you to quickly make and deploy changes to your application without having to generate a new source-to-image (S2I) build. If you are using Django, hot deployment works out of the box. - -To enable hot deployment while using Gunicorn, ensure you have a Gunicorn -configuration file inside your repository with https://gunicorn-docs.readthedocs.org/en/latest/settings.html#reload[the `reload` option set to `true`. Specify your configuration file using the `APP_CONFIG` environment variable. For example, see the `oc new-app` command. You can use the `oc set env` command to update environment variables of existing objects. - -[WARNING] -==== -You should only use this option while developing or debugging. It is not recommended to turn this on in your production environment. -==== - -. Procedure - -To change your source code in a running pod, use the `oc rsh` command to enter the container: -+ -[source,terminal] ----- -$ oc rsh ----- - -After you enter into the running container, your current directory is set to `/opt/app-root/src`, where the source code is located. diff --git a/_unused_topics/images-using-images-s2i-python-pulling-images.adoc b/_unused_topics/images-using-images-s2i-python-pulling-images.adoc deleted file mode 100644 index 0d90476cf0b7..000000000000 --- a/_unused_topics/images-using-images-s2i-python-pulling-images.adoc +++ /dev/null @@ -1,75 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-python-pulling-images_{context}"] -= Pulling images for Python - -//These images come in two options: - -//* RHEL 8 -//* CentOS 7 - -//*RHEL 8 Based Images* - -The RHEL 8 images are available through the Red Hat Registry. - -.Procedure - -* To pull the RHEL 7 image, enter the following command for the version of Python you want: -+ -.Python `2.7` -[source,terminal] ----- -$ podman pull egistry.redhat.io/rhscl/python-27-rhel7:latest ----- -+ -.Python `3.6` -[source,terminal] ----- -$ podman pull registry.redhat.io/ubi9/python-39:latest ----- -+ -.Python `3.8` -[source,terminal] ----- -$ podman pull registry.redhat.io/rhscl/python-38-rhel7:latest ----- - -//// -*CentOS 7 Based Images* - -These images are available on link:quay.io[Quay.io]. - -.Procedure - -* To pull the CentOS 7 image, enter the following command for the version of Python you want: -+ -.Python `2.7` -[source,terminal] ----- -$ podman pull centos/python-27-centos7 ----- -+ -.Python `3.3` -[source,terminal] ----- -$ podman pull openshift/python-33-centos7 ----- -+ -.Python `3.4` -[source,terminal] ----- -$ podman pull centos/python-34-centos7 ----- -+ -.Python `3.5` -[source,terminal] ----- -$ podman pull centos/python-35-centos7 ----- -//// - -To use these images, you can either access them directly from registry.redhat.io, or push them into your {product-registry}. Additionally, you can create an image stream that points to the image, either in your container image registry or at the external location. Your {product-title} resources can then reference the -image stream. diff --git a/_unused_topics/images-using-images-s2i-python.adoc b/_unused_topics/images-using-images-s2i-python.adoc deleted file mode 100644 index 92c996b56fa7..000000000000 --- a/_unused_topics/images-using-images-s2i-python.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-python_{context}"] -= python overview - -{product-title} provides source-to-image (S2I) enabled Python images for building and running Python applications. The Python S2I builder image assembles your application source with any required dependencies to create a new image containing your Python application. This resulting image can be run either by {product-title} or by a container runtime. diff --git a/_unused_topics/images-using-images-s2i-ruby-configuration.adoc b/_unused_topics/images-using-images-s2i-ruby-configuration.adoc deleted file mode 100644 index 07841e122384..000000000000 --- a/_unused_topics/images-using-images-s2i-ruby-configuration.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-ruby-configuration_{context}"] -= Configuring source-to-image for Ruby - -The Ruby image supports a number of environment variables which can be set to control the configuration and behavior of the Ruby runtime. - -To set these environment variables as part of your image, you can place them into a `_.s2i/environment` file inside your source code repository, or define them in the environment section of the build configuration's `sourceStrategy` definition. - -You can also set environment variables to be used with an existing image when creating new applications, or by updating environment variables for existing objects such as deployment configurations. - -[NOTE] -==== -Environment variables that control build behavior must be set as part of the source-to-image (S2I) build configuration or in the `.s2i/environment` file to make them available to the build steps. -==== - -.Ruby Environment Variables -[cols="4a,6a",options="header"] -|=== - -|Variable name |Description - -|`RACK_ENV` -|This variable specifies the environment within which the Ruby application is deployed, for example, `production`, `development`, or `test`. Each level has different behavior in terms of logging verbosity, error pages, and `ruby gem` installation. The application assets are only compiled if `RACK_ENV` is set to `production`. The default value is `production`. - -|`RAILS_ENV` -|This variable specifies the environment within which the Ruby on Rails application is deployed, for example, `production`, `development`, or `test`. Each level has different behavior in terms of logging verbosity, error pages, and `ruby gem` installation. The application assets are only compiled if `RAILS_ENV` is set to `production`. This variable is set to `${RACK_ENV}` by default. - -|`DISABLE_ASSET_COMPILATION` -|When set to `true`, this variable disables the process of asset compilation. Asset compilation only happens when the application runs in a production environment. Therefore, you can use this variable when assets have already been compiled. - -|`PUMA_MIN_THREADS`, `PUMA_MAX_THREADS` -|This variable indicates the minimum and maximum number of threads that will be available in Puma's thread pool. - -|`PUMA_WORKERS` -|This variable indicates the number of worker processes to be launched in Puma's clustered mode, when Puma runs more than two processes. If not explicitly set, the default behavior sets `PUMA_WORKERS` to a value that is appropriate for the memory available to the container and the number of cores on the host. - -|`RUBYGEM_MIRROR` -|Set this variable to use a custom RubyGems mirror URL to download required gem packages during the build process. This environment variable is only available for Ruby 2.2+ images. -|=== diff --git a/_unused_topics/images-using-images-s2i-ruby-hot-deploying.adoc b/_unused_topics/images-using-images-s2i-ruby-hot-deploying.adoc deleted file mode 100644 index 6463af2986fb..000000000000 --- a/_unused_topics/images-using-images-s2i-ruby-hot-deploying.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-ruby-hot-deploying_{context}"] -== Hot deploying for Ruby - -Hot deployment allows you to quickly make and deploy changes to your application without having to generate a new source-to-image (S2I) build. The method for enabling hot deployment in this image differs based on the application type. - -*Ruby on Rails applications* - -.Procedure - -For Ruby on Rails application, run the built Rails application with the `RAILS_ENV=development` environment variable passed to the running pod. - -* For an existing deployment configuration, you can use the `oc set env` command: -+ -[source,terminal] ----- -$ oc set env dc/rails-app RAILS_ENV=development ----- - -*Other Types of Ruby applications such as Sinatra or Padrino* - -For other types of Ruby applications, your application must be built with a gem that can reload the server every time a change to the source code is made inside the running container. Those gems are: - -* Shotgun -* Rerun -* Rack-livereload - -To be able to run your application in development mode, you must modify the S2I `run` script so that the web server is launched by the chosen gem, which checks for changes in the source code. - -After you build your application image with your version of the S2I `run` script, run the image with the `RACK_ENV=development` environment variable. For example, you can use the `oc new-app` command. You can use the `oc set env` command to update environment variables of existing objects. - -[WARNING] -==== -You should only use this option while developing or debugging. It is not recommended to turn this on in your production environment. -==== - -.Procedure - -. To change your source code in a running pod, use the `oc rsh` command to enter the container: -+ -[source,terminal] ----- -$ oc rsh ----- - -After you enter into the running container, your current directory is set to `/opt/app-root/src`, where the source code is located. diff --git a/_unused_topics/images-using-images-s2i-ruby-pulling-images.adoc b/_unused_topics/images-using-images-s2i-ruby-pulling-images.adoc deleted file mode 100644 index 9829367e28eb..000000000000 --- a/_unused_topics/images-using-images-s2i-ruby-pulling-images.adoc +++ /dev/null @@ -1,69 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-ruby-pulling-images_{context}"] -= Pulling images for Ruby - -//These images come in two options: - -//* RHEL 8 -//* CentOS 7 - -//*RHEL 8 Based Images* - -The RHEL 8 images are available through the Red Hat Registry. - -.Procedure - -* To pull the RHEL 8 image, enter the following command for the version of Ruby you want: -+ -.Ruby `2.5` -[source,terminal] ----- -$ podman pull registry.redhat.io/rhscl/ruby-25-rhel7:latest ----- -+ -.Ruby `2.6` -[source,terminal] ----- -$ podman pull registry.redhat.io/rhscl/ruby-26-rhel7:latest ----- -+ -.Ruby `2.7` -[source,terminal] ----- -$ podman pull registry.redhat.io/rhscl/ruby-27-rhel7:latest ----- - -//// -*CentOS 7 Based Images* - -These images are available on link:quay.io[Quay.io]. - -.Procedure - -* To pull the CentOS 7 image, enter the following command for the version of Ruby you want: -+ -.Ruby `2.0` -[source,terminal] ----- -$ podman pull openshift/ruby-20-centos7 ----- -+ -.Ruby `2.2` -[source,terminal] ----- -$ podman pull openshift/ruby-22-centos7 ----- -+ -.Ruby `2.3` -[source,terminal] ----- -$ podman pull centos/ruby-23-centos7 ----- -//// - -To use these images, you can either access them directly from registry.redhat.io, or push them into your {product-registry}. Additionally, you can create an image stream that points to the image, either in your container image registry or at the external location. Your {product-title} resources can then reference the -image stream. diff --git a/_unused_topics/images-using-images-s2i-ruby.adoc b/_unused_topics/images-using-images-s2i-ruby.adoc deleted file mode 100644 index feed3359d273..000000000000 --- a/_unused_topics/images-using-images-s2i-ruby.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-ruby_{context}"] -= Ruby overview - -{product-title} provides source-to-image (S2I) enabled Ruby images for building and running Ruby applications. The Ruby S2I builder image assembles your application source with any required dependencies to create a new image containing your Ruby application. This resulting image can be run either by {product-title} or by a container runtime. diff --git a/_unused_topics/installation-about-custom.adoc b/_unused_topics/installation-about-custom.adoc deleted file mode 100644 index 8e26117c63b6..000000000000 --- a/_unused_topics/installation-about-custom.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * orphaned - -[id="installation-about-custom_{context}"] -= About the custom installation - -You can use the {product-title} installation program to customize four levels -of the program: - -* {product-title} itself -* The cluster platform -* Kubernetes -* The cluster operating system - -Changes to {product-title} and its platform are managed and supported, but -changes to Kubernetes and the cluster operating system currently are not. If -you customize unsupported levels program levels, future installation and -upgrades might fail. - -When you select values for the prompts that the installation program presents, -you customize {product-title}. You can further modify the cluster platform -by modifying the `install-config.yaml` file that the installation program -uses to deploy your cluster. In this file, you can make changes like setting the -number of machines that the control plane uses, the type of virtual machine -that the cluster deploys, or the CIDR range for the Kubernetes service network. - -It is possible, but not supported, to modify the Kubernetes objects that are injected into the cluster. -A common modification is additional manifests in the initial installation. -No validation is available to confirm the validity of any modifications that -you make to these manifests, so if you modify these objects, you might render -your cluster non-functional. -[IMPORTANT] -==== -Modifying the Kubernetes objects is not supported. -==== - -Similarly it is possible, but not supported, to modify the -Ignition config files for the bootstrap and other machines. No validation is -available to confirm the validity of any modifications that -you make to these Ignition config files, so if you modify these objects, you might render -your cluster non-functional. - -[IMPORTANT] -==== -Modifying the Ignition config files is not supported. -==== - -To complete a custom installation, you use the installation program to generate -the installation files and then customize them. -The installation status is stored in a hidden -file in the asset directory and contains all of the installation files. diff --git a/_unused_topics/installation-creating-worker-machineset.adoc b/_unused_topics/installation-creating-worker-machineset.adoc deleted file mode 100644 index fab07717826c..000000000000 --- a/_unused_topics/installation-creating-worker-machineset.adoc +++ /dev/null @@ -1,144 +0,0 @@ -// Module included in the following assemblies: -// -// * none - -[id="installation-creating-worker-machineset_{context}"] -= Creating worker nodes that the cluster manages - -After your cluster initializes, you can create workers that are controlled by -a MachineSet in your Amazon Web Services (AWS) user-provisioned Infrastructure -cluster. - -.Prerequisites - -* Install a cluster on AWS using infrastructer that you provisioned. - -.Procedure - -. Optional: Launch worker nodes that are controlled by the machine API. -. View the list of MachineSets in the `openshift-machine-api` namespace: -+ ----- -$ oc get machinesets --namespace openshift-machine-api -NAME DESIRED CURRENT READY AVAILABLE AGE -test-tkh7l-worker-us-east-2a 1 1 11m -test-tkh7l-worker-us-east-2b 1 1 11m -test-tkh7l-worker-us-east-2c 1 1 11m ----- -+ -Note the `NAME` of each MachineSet. Because you use a different subnet than the -installation program expects, the worker MachineSets do not use the correct -network settings. You must edit each of these MachineSets. - -. Edit each worker MachineSet to provide the correct values for your cluster: -+ ----- -$ oc edit machineset --namespace openshift-machine-api test-tkh7l-worker-us-east-2a -o yaml -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - creationTimestamp: 2019-03-14T14:03:03Z - generation: 1 - labels: - machine.openshift.io/cluster-api-cluster: test-tkh7l - machine.openshift.io/cluster-api-machine-role: worker - machine.openshift.io/cluster-api-machine-type: worker - name: test-tkh7l-worker-us-east-2a - namespace: openshift-machine-api - resourceVersion: "2350" - selfLink: /apis/machine.openshift.io/v1beta1/namespaces/openshift-machine-api/machinesets/test-tkh7l-worker-us-east-2a - uid: e2a6c8a6-4661-11e9-a9b0-0296069fd3a2 -spec: - replicas: 1 - selector: - matchLabels: - machine.openshift.io/cluster-api-cluster: test-tkh7l - machine.openshift.io/cluster-api-machineset: test-tkh7l-worker-us-east-2a - template: - metadata: - creationTimestamp: null - labels: - machine.openshift.io/cluster-api-cluster: test-tkh7l - machine.openshift.io/cluster-api-machine-role: worker - machine.openshift.io/cluster-api-machine-type: worker - machine.openshift.io/cluster-api-machineset: test-tkh7l-worker-us-east-2a - spec: - metadata: - creationTimestamp: null - providerSpec: - value: - ami: - id: ami-07e0e0e0035b5a3fe <1> - apiVersion: awsproviderconfig.openshift.io/v1beta1 - blockDevices: - - ebs: - iops: 0 - volumeSize: 120 - volumeType: gp2 - credentialsSecret: - name: aws-cloud-credentials - deviceIndex: 0 - iamInstanceProfile: - id: test-tkh7l-worker-profile - instanceType: m4.large - kind: AWSMachineProviderConfig - metadata: - creationTimestamp: null - placement: - availabilityZone: us-east-2a - region: us-east-2 - publicIp: null - securityGroups: - - filters: - - name: tag:Name - values: - - test-tkh7l-worker-sg <2> - subnet: - filters: - - name: tag:Name - values: - - test-tkh7l-private-us-east-2a - tags: - - name: kubernetes.io/cluster/test-tkh7l - value: owned - userDataSecret: - name: worker-user-data - versions: - kubelet: "" -status: - fullyLabeledReplicas: 1 - observedGeneration: 1 - replicas: 1 ----- -<1> Specify the {op-system-first} AMI to use for your worker nodes. Use the same -value that you specified in the parameter values for your control plane and -bootstrap templates. -<2> Specify the name of the worker security group that you created in the form -`-worker-sg`. `` is the same -infrastructure name that you extracted from the Ignition config metadata, -which has the format `-`. - -//// -. Optional: Replace the `subnet` stanza with one that specifies the subnet -to deploy the machines on: -+ ----- -subnet: - filters: - - name: tag: <1> - values: - - test-tkh7l-private-us-east-2a <2> ----- -<1> Set the `` of the tag to `Name`, `ID`, or `ARN`. -<2> Specify the `Name`, `ID`, or `ARN` value for the subnet. This value must -match the `tag` type that you specify. -//// - -. View the machines in the `openshift-machine-api` namespace and confirm that -they are launching: -+ ----- -$ oc get machines --namespace openshift-machine-api -NAME INSTANCE STATE TYPE REGION ZONE AGE -test-tkh7l-worker-us-east-2a-hxlqn i-0e7f3a52b2919471e pending m4.4xlarge us-east-2 us-east-2a 3s ----- diff --git a/_unused_topics/installation-osp-troubleshooting.adoc b/_unused_topics/installation-osp-troubleshooting.adoc deleted file mode 100644 index 8b5bcff20bd9..000000000000 --- a/_unused_topics/installation-osp-troubleshooting.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * n/a - -[id="installation-osp-customizing_{context}"] - -= Troubleshooting {product-title} on OpenStack installations - -// Structure as needed in the end. This is very much a WIP. -// A few more troubleshooting and/or known issues blurbs incoming - -Unfortunately, there will always be some cases where {product-title} fails to install properly. In these events, it is helpful to understand the likely failure modes as well as how to troubleshoot the failure. - -This document discusses some troubleshooting options for {rh-openstack}-based -deployments. For general tips on troubleshooting the installation program, see the [Installer Troubleshooting](../troubleshooting.md) guide. - -== View instance logs - -{rh-openstack} CLI tools must be installed, then: - ----- -$ openstack console log show ----- - -== Connect to instances via SSH - -Get the IP address of the machine on the private network: -``` -openstack server list | grep master -| 0dcd756b-ad80-42f1-987a-1451b1ae95ba | cluster-wbzrr-master-1 | ACTIVE | cluster-wbzrr-openshift=172.24.0.21 | rhcos | m1.s2.xlarge | -| 3b455e43-729b-4e64-b3bd-1d4da9996f27 | cluster-wbzrr-master-2 | ACTIVE | cluster-wbzrr-openshift=172.24.0.18 | rhcos | m1.s2.xlarge | -| 775898c3-ecc2-41a4-b98b-a4cd5ae56fd0 | cluster-wbzrr-master-0 | ACTIVE | cluster-wbzrr-openshift=172.24.0.12 | rhcos | m1.s2.xlarge | -``` - -And connect to it using the control plane machine currently holding the API as a jumpbox: - -``` -ssh -J core@${floating IP address}<1> core@ -``` -<1> The floating IP address assigned to the control plane machine. diff --git a/_unused_topics/looking-inside-nodes.adoc b/_unused_topics/looking-inside-nodes.adoc deleted file mode 100644 index 26ab2dccd6be..000000000000 --- a/_unused_topics/looking-inside-nodes.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// - -[id="looking-inside-openshift-nodes_{context}"] -= Looking inside {product-title} nodes - -Directly accessing a node is strongly discouraged. Nodes are meant to be managed entirely from the cluster and that are considered tainted if you log in to a node and change anything. That said, there might be times when you want to troubleshoot a problem on a node or simply go onto a node in a test environment to see how things work. - -For debugging purposes, the oc debug command lets you go inside any pod and look around. For nodes, in particular, you open a tools pod on the node, then chroot to the node’s host filesystem. At that point, you are effectively working on the node. Here’s how to do that: - ----- -$ oc get nodes - -NAME STATUS ROLES AGE VERSION - -ip-10-0-0-1.us-east-2.compute.internal Ready worker 3h19m v1.25.0 - -ip-10-0-0-39.us-east-2.compute.internal Ready master 3h37m v1.25.0 - -… - -$ oc debug nodes/ip-10-0-138-39.us-east-2.compute.internal - -Starting pod/ip-10-0-138-39us-east-2computeinternal-debug …​ ----- - ----- -$ oc debug nodes/ip-10-0-138-39.us-east-2.compute.internal - -Starting pod/ip-10-0-138-39us-east-2computeinternal-debug …​ - -To use host binaries, run chroot /host - -If you don’t see a command prompt, try pressing enter. - -sh-4.3# ----- - -As noted, you can change to the root of the node’s filesystem by typing chroot /host and running commands from the host on that filesystem as though you were logged in directly from the host. Here are some examples of commands you can run to see what is happening on the node: - -* crictl: This CRI-O client command provides many of the same operations for examining images images and containers that the docker CLI offers the Docker Container Engine. One difference is that crictl can also act on pods. If you are debugging issues with containers run the {product-title} users or the {product-title} control plane, crictl is the best tool to use. -* podman: Provides many of the same features as crictl and docker CLI tools, but requires no container engine. On a node, podman can be useful for debugging container issues if the CRI-O runtime isn’t working. -* skopeo: Copy, delete, and inspect container images with skopeo. -* rpm-ostree: Use e.g. rpm-ostree status to look at the operating system state. -* journalctl: The standard journalctl command can be very useful for querying the system journal for messages that provide information about applications running on the system. - -Because the nodes are {op-system} Linux-based systems, you can use standard Linux commands to explore the nodes as well. These include ps, netstat, ip, route, rpm, and many others. You can change to the /etc directory on the host and look into configuration files for services running directly on the host. For example, look at /etc/crio/crio.conf for CRI-O settings, /etc/resolv.conf for DNS server settings, and /etc/ssh for SSH service configuration and keys. - -If you are unable to reach the nodes with oc debug, because something is wrong with the {product-title} cluster, you might be able to debug the nodes by setting up a bastion host on the cluster. For information on setting up a bastion host for {product-title}, see https://github.com/eparis/ssh-bastion[ssh-bastion]. diff --git a/_unused_topics/machine-configs-and-pools.adoc b/_unused_topics/machine-configs-and-pools.adoc deleted file mode 100644 index d627624ec571..000000000000 --- a/_unused_topics/machine-configs-and-pools.adoc +++ /dev/null @@ -1,75 +0,0 @@ -// Module included in the following assemblies: -// -// * TBD - -[id="machine-configs-and-pools_{context}"] -= Machine Configs and Machine Config Pools -Machine Config Pools manage a cluster of nodes and their corresponding -Machine Configs. Machine Configs contain configuration information for a -cluster. - -To list all Machine Config Pools that are known: - ----- -$ oc get machineconfigpools -NAME CONFIG UPDATED UPDATING DEGRADED -master master-1638c1aea398413bb918e76632f20799 False False False -worker worker-2feef4f8288936489a5a832ca8efe953 False False False ----- - -To list all Machine Configs: ----- -$ oc get machineconfig -NAME GENERATEDBYCONTROLLER IGNITIONVERSION CREATED OSIMAGEURL -00-master 4.0.0-0.150.0.0-dirty 2.2.0 16m -00-master-ssh 4.0.0-0.150.0.0-dirty 16m -00-worker 4.0.0-0.150.0.0-dirty 2.2.0 16m -00-worker-ssh 4.0.0-0.150.0.0-dirty 16m -01-master-kubelet 4.0.0-0.150.0.0-dirty 2.2.0 16m -01-worker-kubelet 4.0.0-0.150.0.0-dirty 2.2.0 16m -master-1638c1aea398413bb918e76632f20799 4.0.0-0.150.0.0-dirty 2.2.0 16m -worker-2feef4f8288936489a5a832ca8efe953 4.0.0-0.150.0.0-dirty 2.2.0 16m ----- - -To list all KubeletConfigs: - ----- -$ oc get kubeletconfigs ----- - -To get more detailed information about a KubeletConfig, including the reason for -the current condition: - ----- -$ oc describe kubeletconfig ----- - -For example: - ----- -# oc describe kubeletconfig set-max-pods - -Name: set-max-pods <1> -Namespace: -Labels: -Annotations: -API Version: machineconfiguration.openshift.io/v1 -Kind: KubeletConfig -Metadata: - Creation Timestamp: 2019-02-05T16:27:20Z - Generation: 1 - Resource Version: 19694 - Self Link: /apis/machineconfiguration.openshift.io/v1/kubeletconfigs/set-max-pods - UID: e8ee6410-2962-11e9-9bcc-664f163f5f0f -Spec: - Kubelet Config: <2> - Max Pods: 100 - Machine Config Pool Selector: <3> - Match Labels: - Custom - Kubelet: small-pods -Events: ----- - -<1> The name of the KubeletConfig. -<2> The user defined configuration. -<3> The Machine Config Pool selector to apply the KubeletConfig to. \ No newline at end of file diff --git a/_unused_topics/managing-dedicated-readers-group.adoc b/_unused_topics/managing-dedicated-readers-group.adoc deleted file mode 100644 index 511dc8313ab6..000000000000 --- a/_unused_topics/managing-dedicated-readers-group.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// administering_a_cluster/dedicated-admin-role.adoc - -[id="dedicated-managing-dedicated-readers-group_{context}"] -= Managing the dedicated-readers group - -Users with a `dedicated-reader` role are granted edit and view access to the -`dedicated-reader` project and view-only access to the other projects. - -To view a list of current dedicated readers by user name, you can use the -following command: - ----- -$ oc describe group dedicated-readers ----- - -To add a new member to the `dedicated-readers` group, if you have -`dedicated-admin` access: - ----- -$ oc adm groups add-users dedicated-readers ----- - -To remove an existing user from the `dedicated-readers` group, if you have -`dedicated-admin` access: - ----- -$ oc adm groups remove-users dedicated-readers ----- diff --git a/_unused_topics/manually-creating-iam-azure.adoc b/_unused_topics/manually-creating-iam-azure.adoc deleted file mode 100644 index a38460ef6906..000000000000 --- a/_unused_topics/manually-creating-iam-azure.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="manually-creating-iam-azure"] -= Manually creating long-term credentials for Azure -include::_attributes/common-attributes.adoc[] -:context: manually-creating-iam-azure - -toc::[] - -In environments where the cloud identity and access management (IAM) APIs are not reachable, or the administrator prefers not to store an administrator-level credential secret in the cluster `kube-system` namespace, you can put the Cloud Credential Operator (CCO) into manual mode before you install the cluster. - -include::modules/alternatives-to-storing-admin-secrets-in-kube-system.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* For a detailed description of all available CCO credential modes and their supported platforms, see xref:../../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc#about-cloud-credential-operator[About the Cloud Credential Operator]. - -include::modules/manually-create-identity-access-management.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials] - -[id="manually-creating-iam-azure-next-steps"] -== Next steps - -* Install an {product-title} cluster: -** xref:../../installing/installing_azure/ipi/installing-azure-default.adoc#installing-azure-default[Installing a cluster quickly on Azure] with default options on installer-provisioned infrastructure -** xref:../../installing/installing_azure/ipi/installing-azure-customizations.adoc#installing-azure-customizations[Install a cluster with cloud customizations on installer-provisioned infrastructure] -** xref:../../installing/installing_azure/ipi/installing-azure-network-customizations.adoc#installing-azure-network-customizations[Install a cluster with network customizations on installer-provisioned infrastructure] diff --git a/_unused_topics/manually-creating-iam-gcp.adoc b/_unused_topics/manually-creating-iam-gcp.adoc deleted file mode 100644 index b243181fe2c4..000000000000 --- a/_unused_topics/manually-creating-iam-gcp.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="manually-creating-iam-gcp"] -= Manually creating IAM for GCP -include::_attributes/common-attributes.adoc[] -:context: manually-creating-iam-gcp - -toc::[] - -In environments where the cloud identity and access management (IAM) APIs are not reachable, or the administrator prefers not to store an administrator-level credential secret in the cluster `kube-system` namespace, you can put the Cloud Credential Operator (CCO) into manual mode before you install the cluster. - -include::modules/alternatives-to-storing-admin-secrets-in-kube-system.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc#cco-mode-gcp-workload-identity[Using manual mode with GCP Workload Identity] -* xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-remove-cloud-creds[Removing cloud provider credentials] - -For a detailed description of all available CCO credential modes and their supported platforms, see xref:../../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc#about-cloud-credential-operator[About the Cloud Credential Operator]. - -include::modules/manually-create-identity-access-management.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials] - -include::modules/mint-mode.adoc[leveloffset=+1] - -include::modules/mint-mode-with-removal-of-admin-credential.adoc[leveloffset=+1] - -[id="manually-creating-iam-gcp-next-steps"] -== Next steps - -* Install an {product-title} cluster: -** xref:../../installing/installing_gcp/installing-gcp-default.adoc#installing-gcp-default[Installing a cluster quickly on GCP] with default options on installer-provisioned infrastructure -** xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[Install a cluster with cloud customizations on installer-provisioned infrastructure] -** xref:../../installing/installing_gcp/installing-gcp-network-customizations.adoc#installing-gcp-network-customizations[Install a cluster with network customizations on installer-provisioned infrastructure] diff --git a/_unused_topics/metering-resources.adoc b/_unused_topics/metering-resources.adoc deleted file mode 100644 index 7b0f67114a9a..000000000000 --- a/_unused_topics/metering-resources.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * metering/metering-install-metering.adoc - -[id="metering-resources_{context}"] -= Metering resources - -Metering has many resources, which can be used to manage the deployment and installation of Metering, as well as the reporting functionality Metering provides. - -Metering is managed using the following CustomResourceDefinitions (CRDs): - -[cols="1,7"] -|=== - -|*MeteringConfig* |Configures the Metering stack for deployment. Contains customizations and configuration options to control each component that makes up the Metering stack. - -|*Reports* |Controls what query to use, when, and how often the query should be run, and where to store the results. - -|*ReportQueries* |Contains the SQL queries used to perform analysis on the data contained with in ReportDataSources. - -|*ReportDataSources* |Controls the data available to ReportQueries and Reports. Allows configuring access to different databases for use within Metering. - -|=== diff --git a/_unused_topics/microshift-man-config-ovs-bridge.adoc b/_unused_topics/microshift-man-config-ovs-bridge.adoc deleted file mode 100644 index 5cd9ef845376..000000000000 --- a/_unused_topics/microshift-man-config-ovs-bridge.adoc +++ /dev/null @@ -1,39 +0,0 @@ -//FIXME: need updated config procedure for customers that will persist across reboots -//this module content is unused as of 4.13 - -//=== Manually configuring OVS bridge br-ex -//.Procedure -//Manually configure the OVS bridge br-ex by running the following commands. - -//* Initiate OVS: -//+ -//[source,terminal] -//---- -//$ sudo systemctl enable openvswitch --now -//---- -//* Add the network bridge: -//+ -//[source,terminal] -//---- -//$ sudo ovs-vsctl add-br br-ex -//---- -//* Add the interface to the network bridge: -//+ -//[source,terminal] -//---- -//$ sudo ovs-vsctl add-port br-ex -//---- -//The `` is the network interface name where the node IP address is assigned. -//* Get the bridge up and running: -//+ -//[source,terminal] -//---- -//$ sudo ip link set br-ex up -//---- -//* After `br-ex up` is running, assign the node IP address to `br-ex` bridge: -//[source,terminal] -//---- -//$ sudo ... -//---- -//[NOTE] -//Adding a physical interface to `br-ex` bridge will disconnect the ssh connection on the node IP address. \ No newline at end of file diff --git a/_unused_topics/microshift-nodeport-unreachable-workaround.adoc b/_unused_topics/microshift-nodeport-unreachable-workaround.adoc deleted file mode 100644 index 39fc6c0db6f4..000000000000 --- a/_unused_topics/microshift-nodeport-unreachable-workaround.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// Module included in the following assemblies: -// -// * module may be unused in 4.13 - -:_mod-docs-content-type: PROCEDURE -[id="microshift-nodeport-unreachable-workaround_{context}"] -= Manually restarting the `ovnkube-master` pod to resume node port traffic - -After you install {product-title}, NodePort service traffic might stop. To troubleshoot this issue, manually restart the `ovnkube-master` pod in the `openshift-ovn-kubernetes` namespace. - -.Prerequisites - -* The OpenShift CLI (`oc`) is installed. -* A cluster installed on infrastructure configured with the Open Virtual Network (OVN)-Kubernetes network plugin. -* Access to the `kubeconfig` file. -* The KUBECONFIG environment variable is set. - -.Procedure - -Run the commands listed in each step that follows to restore the `NodePort` service traffic after you install{product-title}: - -. Find the name of the ovn-master pod that you want to restart by running the following command: -+ -[source,terminal] ----- -$ pod=$(oc get pods -n openshift-ovn-kubernetes | grep ovnkube-master | awk -F " " '{print $1}') ----- - -. Force a restart of the of the ovnkube-master pod by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-ovn-kubernetes delete pod $pod ----- - -. Optional: To confirm that the ovnkube-master pod restarted, run the following command: -+ -[source,terminal] ----- -$ oc get pods -n openshift-ovn-kubernetes ----- -If the pod restarted, the listing of the running pods shows a different ovnkube-master pod name and age consistent with the procedure you just completed. - -. Verify that the `NodePort` service can now be reached. - diff --git a/_unused_topics/migrate-from-openshift-sdn.adoc b/_unused_topics/migrate-from-openshift-sdn.adoc deleted file mode 100644 index cee1a31540f4..000000000000 --- a/_unused_topics/migrate-from-openshift-sdn.adoc +++ /dev/null @@ -1,64 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="migrate-from-openshift-sdn"] -= Migrating from the OpenShift SDN network plugin -include::_attributes/common-attributes.adoc[] -:context: migrate-from-openshift-sdn - -toc::[] - -As a cluster administrator, you can migrate to the OVN-Kubernetes network plugin from the OpenShift SDN network plugin using the _offline_ migration method or the limited live migration method. - -To learn more about OVN-Kubernetes, read xref:../../networking/ovn_kubernetes_network_provider/about-ovn-kubernetes#about-ovn-kubernetes[About the OVN-Kubernetes network plugin]. - -include::modules/nw-ovn-kubernetes-migration-about.adoc[leveloffset=+1] -include::modules/nw-network-plugin-migration-process.adoc[leveloffset=+2] -include::modules/nw-ovn-kubernetes-migration.adoc[leveloffset=+2] - -include::modules/nw-ovn-kubernetes-live-migration-about.adoc[leveloffset=+1] -include::modules/how-the-live-migration-process-works.adoc[leveloffset=+2] -include::modules/nw-ovn-kubernetes-live-migration.adoc[leveloffset=+2] -include::modules/checking-cluster-resources-before-initiating-limited-live-migration.adoc[leveloffset=+3] -include::modules/removing-egress-router-pods-before-initiating-limited-live-migration.adoc[leveloffset=+3] -include::modules/initiating-limited-live-migration.adoc[leveloffset=+3] -include::modules/patching-ovnk-address-ranges.adoc[leveloffset=+3] -include::modules/checking-cluster-resources-after-initiating-limited-live-migration.adoc[leveloffset=+3] -include::modules/nw-ovn-kubernetes-checking-live-migration-metrics.adoc[leveloffset=+2] -include::modules/live-migration-metrics-information.adoc[leveloffset=+3] - -[role="_additional-resources"] -[id="migrate-from-openshift-sdn-additional-resources"] -== Additional resources - -* link:https://access.redhat.com/labs/ocpnc/[Red Hat OpenShift Network Calculator] - -* xref:../networking/networking_operators/cluster-network-operator.adoc#nw-operator-cr_cluster-network-operator[Configuration for the OVN-Kubernetes network plugin] - -* xref:../../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backup-etcd[Backing up etcd] - -* xref:../../networking/network_security/network_policy/about-network-policy.adoc#about-network-policy[About network policy] - -* xref:../../networking/changing-cluster-network-mtu.adoc#nw-cluster-mtu-change_changing-cluster-network-mtu[Changing the cluster MTU] - -* xref:../../networking/changing-cluster-network-mtu.adoc#mtu-value-selection_changing-cluster-network-mtu[MTU value selection] - -* xref:../../networking/network_security/network_policy/about-network-policy.adoc#nw-networkpolicy-optimize-ovn_about-network-policy[About network policy] -* OVN-Kubernetes capabilities - -- xref:../../networking/ovn_kubernetes_network_provider/configuring-egress-ips-ovn.adoc#configuring-egress-ips-ovn[Configuring an egress IP address] - -- xref:../../networking/network_security/egress_firewall/configuring-egress-firewall-ovn.adoc#configuring-egress-firewall-ovn[Configuring an egress firewall for a project] - -- link:https://access.redhat.com/solutions/7078619[OVN-Kubernetes egress firewall blocks process to deploy application as DeploymentConfig] - -- xref:../../networking/ovn_kubernetes_network_provider/enabling-multicast.adoc#nw-ovn-kubernetes-enabling-multicast[Enabling multicast for a project] -* OpenShift SDN capabilities - -- xref:../../networking/openshift_sdn/assigning-egress-ips.adoc#assigning-egress-ips[Configuring egress IPs for a project] - -- xref:../../networking/openshift_sdn/configuring-egress-firewall.adoc#configuring-egress-firewall[Configuring an egress firewall for a project] - -- xref:../../networking/openshift_sdn/enabling-multicast.adoc#enabling-multicast[Enabling multicast for a project] - -- xref:../../networking/openshift_sdn/deploying-egress-router-layer3-redirection.adoc#deploying-egress-router-layer3-redirection[Deploying an egress router pod in redirect mode] - -* xref:../../rest_api/operator_apis/network-operator-openshift-io-v1.adoc#network-operator-openshift-io-v1[Network [operator.openshift.io/v1] diff --git a/_unused_topics/mint-mode-with-removal-of-admin-credential.adoc b/_unused_topics/mint-mode-with-removal-of-admin-credential.adoc deleted file mode 100644 index 5eb9739f4b1b..000000000000 --- a/_unused_topics/mint-mode-with-removal-of-admin-credential.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/manually-creating-iam.adoc - -[id="mint-mode-with-removal-or-rotation-of-admin-credential_{context}"] -= Mint mode with removal or rotation of the administrator-level credential - -Currently, this mode is only supported on AWS and GCP. - -In this mode, a user installs {product-title} with an administrator-level credential just like the normal mint mode. However, this process removes the administrator-level credential secret from the cluster post-installation. - -The administrator can have the Cloud Credential Operator make its own request for a read-only credential that allows it to verify if all `CredentialsRequest` objects have their required permissions, thus the administrator-level credential is not required unless something needs to be changed. After the associated credential is removed, it can be deleted or deactivated on the underlying cloud, if desired. - -[NOTE] -==== -Prior to a non z-stream upgrade, you must reinstate the credential secret with the administrator-level credential. If the credential is not present, the upgrade might be blocked. -==== - -The administrator-level credential is not stored in the cluster permanently. - -Following these steps still requires the administrator-level credential in the cluster for brief periods of time. It also requires manually re-instating the secret with administrator-level credentials for each upgrade. diff --git a/_unused_topics/mint-mode.adoc b/_unused_topics/mint-mode.adoc deleted file mode 100644 index fbcc9675fd92..000000000000 --- a/_unused_topics/mint-mode.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/manually-creating-iam.adoc -// * installing/installing_gcp/manually-creating-iam-gcp.adoc - -:_mod-docs-content-type: CONCEPT -[id="mint-mode_{context}"] -= Mint mode - -Mint mode is the default Cloud Credential Operator (CCO) credentials mode for {product-title} on platforms that support it. In this mode, the CCO uses the provided administrator-level cloud credential to run the cluster. Mint mode is supported for AWS and GCP. - -In mint mode, the `admin` credential is stored in the `kube-system` namespace and then used by the CCO to process the `CredentialsRequest` objects in the cluster and create users for each with specific permissions. - -The benefits of mint mode include: - -* Each cluster component has only the permissions it requires -* Automatic, on-going reconciliation for cloud credentials, including additional credentials or permissions that might be required for upgrades - -One drawback is that mint mode requires `admin` credential storage in a cluster `kube-system` secret. diff --git a/_unused_topics/monitoring-configuring-etcd-monitoring.adoc b/_unused_topics/monitoring-configuring-etcd-monitoring.adoc deleted file mode 100644 index 66e1144babb9..000000000000 --- a/_unused_topics/monitoring-configuring-etcd-monitoring.adoc +++ /dev/null @@ -1,190 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/monitoring.adoc - -[id="configuring-etcd-monitoring_{context}"] -= Configuring etcd monitoring - -If the `etcd` service does not run correctly, successful operation of the whole {product-title} cluster is in danger. Therefore, it is reasonable to configure monitoring of `etcd`. - -.Procedure - -. Verify that the monitoring stack is running: -+ -[subs="quotes"] ----- -$ oc -n openshift-monitoring get pods -NAME READY STATUS RESTARTS AGE -alertmanager-main-0 3/3 Running 0 34m -alertmanager-main-1 3/3 Running 0 33m -alertmanager-main-2 3/3 Running 0 33m -cluster-monitoring-operator-67b8797d79-sphxj 1/1 Running 0 36m -grafana-c66997f-pxrf7 2/2 Running 0 37s -kube-state-metrics-7449d589bc-rt4mq 3/3 Running 0 33m -node-exporter-5tt4f 2/2 Running 0 33m -node-exporter-b2mrp 2/2 Running 0 33m -node-exporter-fd52p 2/2 Running 0 33m -node-exporter-hfqgv 2/2 Running 0 33m -prometheus-k8s-0 4/4 Running 1 35m -prometheus-k8s-1 0/4 ContainerCreating 0 21s -prometheus-operator-6c9fddd47f-9jfgk 1/1 Running 0 36m ----- - -. Open the configuration file for the cluster monitoring stack: -+ -[subs="quotes"] ----- -$ oc -n openshift-monitoring edit configmap cluster-monitoring-config ----- - -. Under `config.yaml: |+`, add the `etcd` section. -+ -.. If you run `etcd` in static pods on your control plane nodes (also known as master nodes), you can specify the `etcd` nodes using the selector: -+ -[subs="quotes"] ----- -... -data: - config.yaml: |+ - ... - *etcd: - targets: - selector: - openshift.io/component: etcd - openshift.io/control-plane: "true"* ----- -+ -.. If you run `etcd` on separate hosts, you must specify the nodes using IP addresses: -+ -[subs="quotes"] ----- -... -data: - config.yaml: |+ - ... - *etcd: - targets: - ips: - - "127.0.0.1" - - "127.0.0.2" - - "127.0.0.3"* ----- -+ -If `etcd` nodes IP addresses change, you must update this list. - -. Verify that the `etcd` service monitor is now running: -+ -[subs="quotes"] ----- -$ oc -n openshift-monitoring get servicemonitor -NAME AGE -alertmanager 35m -*etcd 1m* -kube-apiserver 36m -kube-controllers 36m -kube-state-metrics 34m -kubelet 36m -node-exporter 34m -prometheus 36m -prometheus-operator 37m ----- -+ -It might take up to a minute for the `etcd` service monitor to start. - -. Now you can navigate to the Web interface to see more information about status of `etcd` monitoring: -+ -.. To get the URL, run: -+ -[subs="quotes"] ----- -$ oc -n openshift-monitoring get routes -NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD -... -prometheus-k8s prometheus-k8s-openshift-monitoring.apps.msvistun.origin-gce.dev.openshift.com prometheus-k8s web reencrypt None ----- -+ -.. Using `https`, navigate to the URL listed for `prometheus-k8s`. Log in. - -. Ensure the user belongs to the `cluster-monitoring-view` role. This role provides access to viewing cluster monitoring UIs. For example, to add user `developer` to `cluster-monitoring-view`, run: - - $ oc adm policy add-cluster-role-to-user cluster-monitoring-view developer -+ - -. In the Web interface, log in as the user belonging to the `cluster-monitoring-view` role. - -. Click *Status*, then *Targets*. If you see an `etcd` entry, `etcd` is being monitored. -+ -image::etcd-no-certificate.png[] - -While `etcd` is being monitored, Prometheus is not yet able to authenticate against `etcd`, and so cannot gather metrics. To configure Prometheus authentication against `etcd`: - -. Copy the `/etc/etcd/ca/ca.crt` and `/etc/etcd/ca/ca.key` credentials files from the control plane node to the local machine: -+ -[subs="quotes"] ----- -$ ssh -i gcp-dev/ssh-privatekey cloud-user@35.237.54.213 -... ----- - -. Create the `openssl.cnf` file with these contents: -+ ----- -[ req ] -req_extensions = v3_req -distinguished_name = req_distinguished_name -[ req_distinguished_name ] -[ v3_req ] -basicConstraints = CA:FALSE -keyUsage = nonRepudiation, keyEncipherment, digitalSignature -extendedKeyUsage=serverAuth, clientAuth ----- - -. Generate the `etcd.key` private key file: -+ -[subs="quotes"] ----- -$ openssl genrsa -out etcd.key 2048 ----- - -. Generate the `etcd.csr` certificate signing request file: -+ -[subs="quotes"] ----- -$ openssl req -new -key etcd.key -out etcd.csr -subj "/CN=etcd" -config openssl.cnf ----- - -. Generate the `etcd.crt` certificate file: -+ -[subs="quotes"] ----- -$ openssl x509 -req -in etcd.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out etcd.crt -days 365 -extensions v3_req -extfile openssl.cnf ----- - -. Put the credentials into format used by {product-title}: -+ ----- -cat <<-EOF > etcd-cert-secret.yaml -apiVersion: v1 -data: - etcd-client-ca.crt: "$(cat ca.crt | base64 --wrap=0)" - etcd-client.crt: "$(cat etcd.crt | base64 --wrap=0)" - etcd-client.key: "$(cat etcd.key | base64 --wrap=0)" -kind: Secret -metadata: - name: kube-etcd-client-certs - namespace: openshift-monitoring -type: Opaque -EOF ----- -+ -This creates the *_etcd-cert-secret.yaml_* file - -. Apply the credentials file to the cluster: - ----- -$ oc apply -f etcd-cert-secret.yaml ----- - -. Visit the "Targets" page of the Web interface again. Verify that `etcd` is now being correctly monitored. It might take several minutes for changes to take effect. -+ -image::etcd-monitoring-working.png[] diff --git a/_unused_topics/monitoring-dead-mans-switch-pagerduty.adoc b/_unused_topics/monitoring-dead-mans-switch-pagerduty.adoc deleted file mode 100644 index 830af9f53b55..000000000000 --- a/_unused_topics/monitoring-dead-mans-switch-pagerduty.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-monitoring-stack.adoc - -[id="dead-mans-switch-pagerduty_{context}"] -== Dead man's switch PagerDuty - -https://www.pagerduty.com/[PagerDuty] supports "Dead man's switch" through an integration called https://deadmanssnitch.com/[Dead Man's Snitch]. You can enable it. - -.Procedure - -* Add a `PagerDuty` configuration to the default `deadmansswitch` receiver. -+ -For example, you can configure Dead Man's Snitch to page the operator if the "Dead man's switch" alert is silent for 15 minutes. With the default Alertmanager configuration, the Dead man's switch alert is repeated every five minutes. If Dead Man's Snitch triggers after 15 minutes, it indicates that the notification has been unsuccessful at least twice. - -[role="_additional-resources"] -.Additional resources - -// FIXME describe the procedure instead of linking * To learn how to add a `PagerDuty` configuration to the default `deadmansswitch` receiver, see LINK. -* To learn how to configure Dead Man's Snitch for PagerDuty, see https://www.pagerduty.com/docs/guides/dead-mans-snitch-integration-guide/[Dead Man’s Snitch Integration Guide]. diff --git a/_unused_topics/monitoring-dead-mans-switch.adoc b/_unused_topics/monitoring-dead-mans-switch.adoc deleted file mode 100644 index db473bd2202e..000000000000 --- a/_unused_topics/monitoring-dead-mans-switch.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-monitoring-stack.adoc - -[id="dead-mans-switch_{context}"] -== Dead man's switch - -{product-title} Monitoring ships with a "Dead man's switch" to ensure the availability of the monitoring infrastructure. - -The "Dead man's switch" is a simple Prometheus alerting rule that always triggers. The Alertmanager continuously sends notifications for the dead man's switch to the notification provider that supports this functionality. This also ensures that communication between the Alertmanager and the notification provider is working. - -This mechanism is supported by PagerDuty to issue alerts when the monitoring system itself is down. - diff --git a/_unused_topics/monitoring-enabling-dynamically-provisioned-storage.adoc b/_unused_topics/monitoring-enabling-dynamically-provisioned-storage.adoc deleted file mode 100644 index ea4c252779cf..000000000000 --- a/_unused_topics/monitoring-enabling-dynamically-provisioned-storage.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-monitoring-stack.adoc - -[id="enabling-dynamically-provisioned-storage_{context}"] -= Enabling dynamically-provisioned storage - -Instead of statically-provisioned storage, you can use dynamically-provisioned storage. - -.Procedure - -. To enable dynamic storage for Prometheus and Alertmanager, set the following parameters to `true` in the Ansible inventory file: -+ -* `openshift_cluster_monitoring_operator_prometheus_storage_enabled` (Default: false) -* `openshift_cluster_monitoring_operator_alertmanager_storage_enabled` (Default: false) -+ -. Optional: After you enable dynamic storage, you can also set the `storageclass` for the persistent volume claim for each component in the following parameters in the Ansible inventory file: -+ -* `openshift_cluster_monitoring_operator_prometheus_storage_class_name` (default: "") -* `openshift_cluster_monitoring_operator_alertmanager_storage_class_name` (default: "") -+ -Each of these variables applies only if its corresponding `storage_enabled` variable is set to `true`. - -[role="_additional-resources"] -.Additional resources - -* See https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/[Dynamic Volume Provisioning] for details. diff --git a/_unused_topics/monitoring-enabling-persistent-storage.adoc b/_unused_topics/monitoring-enabling-persistent-storage.adoc deleted file mode 100644 index b9bd16207584..000000000000 --- a/_unused_topics/monitoring-enabling-persistent-storage.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-monitoring-stack.adoc - -[id="enabling-persistent-storage_{context}"] -= Enabling persistent storage - -By default, persistent storage is disabled for both Prometheus time-series data and for Alertmanager notifications and silences. You can configure the cluster to persistently store any one of them or both. - -.Procedure - -* To enable persistent storage of Prometheus time-series data, set this variable to `true` in the Ansible inventory file: -+ -`openshift_cluster_monitoring_operator_prometheus_storage_enabled` -+ -To enable persistent storage of Alertmanager notifications and silences, set this variable to `true` in the Ansible inventory file: -+ -`openshift_cluster_monitoring_operator_alertmanager_storage_enabled` - diff --git a/_unused_topics/monitoring-full-list-of-configuration-variables.adoc b/_unused_topics/monitoring-full-list-of-configuration-variables.adoc deleted file mode 100644 index 65fa04e17809..000000000000 --- a/_unused_topics/monitoring-full-list-of-configuration-variables.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-monitoring-stack.adoc - -[id="full-list-of-configuration-variables_{context}"] -= Full list of configuration variables - -This table contains the full list of the inventory file variables for configuring the Cluster Monitoring Operator: - -.Cluster Monitoring Operator Ansible variables -[options="header"] -|=== - -|Variable |Description - -|`openshift_cluster_monitoring_operator_install` -| Deploy the Cluster Monitoring Operator if `true`. Otherwise, undeploy. This variable is set to `true` by default. - -|`openshift_cluster_monitoring_operator_prometheus_storage_capacity` -| The persistent volume claim size for each of the Prometheus instances. This variable applies only if `openshift_cluster_monitoring_operator_prometheus_storage_enabled` is set to `true`. Defaults to `50Gi`. - -|`openshift_cluster_monitoring_operator_alertmanager_storage_capacity` -| The persistent volume claim size for each of the Alertmanager instances. This variable applies only if `openshift_cluster_monitoring_operator_alertmanager_storage_enabled` is set to `true`. Defaults to `2Gi`. - -|`openshift_cluster_monitoring_operator_node_selector` -| Set to the desired, existing [node selector] to ensure that pods are placed onto nodes with specific labels. Defaults to `node-role.kubernetes.io/infra=true`. - -|`openshift_cluster_monitoring_operator_alertmanager_config` -| Configures Alertmanager. - -|`openshift_cluster_monitoring_operator_prometheus_storage_enabled` -| Enable persistent storage of Prometheus' time-series data. This variable is set to `false` by default. - -|`openshift_cluster_monitoring_operator_alertmanager_storage_enabled` -| Enable persistent storage of Alertmanager notifications and silences. This variable is set to `false` by default. - -|`openshift_cluster_monitoring_operator_prometheus_storage_class_name` -| If you enabled the `openshift_cluster_monitoring_operator_prometheus_storage_enabled` option, set a specific StorageClass to ensure that pods are configured to use the `PVC` with that `storageclass`. Defaults to `none`, which applies the default storage class name. - -|`openshift_cluster_monitoring_operator_alertmanager_storage_class_name` -| If you enabled the `openshift_cluster_monitoring_operator_alertmanager_storage_enabled` option, set a specific StorageClass to ensure that pods are configured to use the `PVC` with that `storageclass`. Defaults to `none`, which applies the default storage class name. -|=== - -[role="_additional-resources"] -.Additional resources -// FIXME add link once doc is available -// Used to point to ../admin_guide/scheduling/node_selector.adoc[Advanced Scheduling and Node Selectors] -// * See LINK for more information on node selectors. diff --git a/_unused_topics/monitoring-grouping-alerts.adoc b/_unused_topics/monitoring-grouping-alerts.adoc deleted file mode 100644 index ad71f4988995..000000000000 --- a/_unused_topics/monitoring-grouping-alerts.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-monitoring-stack.adoc - -[id="grouping-alerts_{context}"] -== Grouping alerts - -Once alerts are firing against the Alertmanager, it must be configured to know how to logically group them. This procedure shows how to configure alert grouping: - -.Procedure - -_FIXME get missing info and complete the procedure_ - -For this example, a new route will be added to reflect alert routing of the "frontend" team. - -. Add new routes. Multiple routes may be added beneath the original route, typically to define the receiver for the notification. This example uses a matcher to ensure that only alerts coming from the service `example-app` are used: -+ - global: - resolve_timeout: 5m - route: - group_wait: 30s - group_interval: 5m - repeat_interval: 12h - receiver: default - routes: - - match: - alertname: DeadMansSwitch - repeat_interval: 5m - receiver: deadmansswitch - - match: - service: example-app - routes: - - match: - severity: critical - receiver: team-frontend-page - receivers: - - name: default - - name: deadmansswitch -+ -The sub-route matches only on alerts that have a severity of `critical`, and sends them via the receiver called `team-frontend-page`. As the name indicates, someone should be paged for alerts that are critical. - - diff --git a/_unused_topics/monitoring-monitoring-overview.adoc b/_unused_topics/monitoring-monitoring-overview.adoc deleted file mode 100644 index 05b34c256958..000000000000 --- a/_unused_topics/monitoring-monitoring-overview.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/monitoring.adoc - -[id="monitoring-overview_{context}"] -= Monitoring overview - -{product-title} ships with a pre-configured, pre-installed, and self-updating monitoring stack that is based on the link:https://prometheus.io/[Prometheus] open source project and its wider eco-system. It provides monitoring of cluster components and ships with a set of alerts to immediately notify the cluster administrator about any occurring problems and a set of link:https://grafana.com/[Grafana] dashboards. - -The monitoring stack includes these components: - -* Cluster Monitoring Operator -* Prometheus Operator -* Prometheus -* Prometheus Adapter -* Alertmanager -* kube-state-metrics -* node-exporter -* Grafana - -The {product-title} Cluster Monitoring Operator (CMO) is the central component of the stack. It watches over the deployed monitoring components and resources and ensures that they are always up to date. - -The Prometheus Operator (PO) creates, configures, and manages Prometheus and Alertmanager instances. It also automatically generates monitoring target configurations based on familiar Kubernetes label queries. - -The Prometheus Adapter exposes cluster resource metrics (CPU and memory utilization) API for horizontal pod autoscaling. - -Node-exporter is an agent deployed on every node to collect metrics about it. - -The kube-state-metrics exporter agent converts Kubernetes objects to metrics consumable by Prometheus. - -All the components of the monitoring stack are monitored by the stack. Additionally, the stack monitors: - -* cluster-version-operator -* image-registry -* kube-apiserver -* kube-apiserver-operator -* kube-controller-manager -* kube-controller-manager-operator -* kube-scheduler -* kubelet -* monitor-ovn-kubernetes -* openshift-apiserver -* openshift-apiserver-operator -* openshift-controller-manager -* openshift-controller-manager-operator -* openshift-svcat-controller-manager-operator -* telemeter-client - -All these components are automatically updated. - -Other {product-title} framework components might be exposing metrics as well. See their respective documentation. - -[NOTE] -==== -To be able to deliver updates with guaranteed compatibility, configurability of the {product-title} Monitoring stack is limited to the explicitly available options. -==== - diff --git a/_unused_topics/monitoring-setting-persistent-storage-size.adoc b/_unused_topics/monitoring-setting-persistent-storage-size.adoc deleted file mode 100644 index 619f6133c9bc..000000000000 --- a/_unused_topics/monitoring-setting-persistent-storage-size.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-monitoring-stack.adoc - -[id="setting-persistent-storage-size_{context}"] -= Setting persistent storage size - -You can specify the size of the persistent volume claim for Prometheus and Alertmanager. - -.Procedure - -* Change these Ansible variables: -+ --- -* `openshift_cluster_monitoring_operator_prometheus_storage_capacity` (default: 50Gi) -* `openshift_cluster_monitoring_operator_alertmanager_storage_capacity` (default: 2Gi) --- -+ -Each of these variables applies only if its corresponding `storage_enabled` variable is set to `true`. - diff --git a/_unused_topics/monitoring-update-and-compatibility-guarantees.adoc b/_unused_topics/monitoring-update-and-compatibility-guarantees.adoc deleted file mode 100644 index 4f42970573fc..000000000000 --- a/_unused_topics/monitoring-update-and-compatibility-guarantees.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/monitoring.adoc - -[id="update-and-compatibility-guarantees_{context}"] -= Update and compatibility guarantees - -To be able to deliver updates with guaranteed compatibility, configurability of the {product-title} Monitoring stack is limited to the explicitly available options. This document describes known pitfalls of which types of configuration and customization are unsupported, as well as misuse of resources provided by {product-title} Monitoring. All configuration options described in this topic are explicitly supported. - -*Modification of {product-title} monitoring resources* - -The {product-title} Monitoring stack ensures its resources are _always_ in the state it expects them to be. If they are modified, {product-title} Monitoring will ensure that this will be reset. Nonetheless it is possible to pause this behavior, by setting the `paused` field in the `AppVersion` called `openshift-monitoring`. Setting the {product-title} Monitoring stack to be paused, stops all future updates and will cause modification of the otherwise managed resources. If resources are modified in an uncontrolled manner, this will cause undefined behavior during updates. - -To ensure compatible and functioning updates, the `paused` field must be set to `false` on upgrades. - -*Usage of resources created by {product-title} monitoring* - -{product-title} Monitoring creates a number of resources. These resources are not meant to be used by any other resources, as there are no guarantees about their backward compatibility. For example, a `ClusterRole` called `prometheus-k8s` is created, and has very specific roles that exist solely for the cluster monitoring Prometheus pods to be able to access the resources it requires access to. All of these resources have no compatibility guarantees going forward. While some of these resources may incidentally have the necessary information for RBAC purposes for example, they can be subject to change in any upcoming release, with no backward compatibility. - -If the `Role` or `ClusterRole` objects that are similar are needed, we recommend creating a new object that has exactly the permissions required for the case at hand, rather than using the resources created and maintained by {product-title} Monitoring. diff --git a/_unused_topics/mounting-local-volumes.adoc b/_unused_topics/mounting-local-volumes.adoc deleted file mode 100644 index c0278465a113..000000000000 --- a/_unused_topics/mounting-local-volumes.adoc +++ /dev/null @@ -1,29 +0,0 @@ -[id="mounting-local-volumes_{context}"] -= Mounting local volumes - -This paragraph is the procedure module introduction: a short description of the procedure. - -.Prerequisites - -* All local volumes must be manually mounted before they can be consumed by {product-title} as PVs. - -.Procedure - -. Mount all volumes into the `*/mnt/local-storage//*` path: -+ ----- -# device name # mount point # FS # options # extra -/dev/sdb1 /mnt/local-storage/ssd/disk1 ext4 defaults 1 2 -/dev/sdb2 /mnt/local-storage/ssd/disk2 ext4 defaults 1 2 -/dev/sdb3 /mnt/local-storage/ssd/disk3 ext4 defaults 1 2 -/dev/sdc1 /mnt/local-storage/hdd/disk1 ext4 defaults 1 2 -/dev/sdc2 /mnt/local-storage/hdd/disk2 ext4 defaults 1 2 ----- -+ -Administrators must create local devices as needed using any method such as disk partition or LVM, create suitable file systems on these devices, and mount these devices using a script or /etc/fstab entries - -. Make all volumes accessible to the processes running within the Docker containers: -+ ----- -$ chcon -R unconfined_u:object_r:svirt_sandbox_file_t:s0 /mnt/local-storage/ ----- diff --git a/_unused_topics/nodes-cluster-disabling-features-list.adoc b/_unused_topics/nodes-cluster-disabling-features-list.adoc deleted file mode 100644 index ea94c7d6509d..000000000000 --- a/_unused_topics/nodes-cluster-disabling-features-list.adoc +++ /dev/null @@ -1,265 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-cluster-disabling-features.adoc - -[id="nodes-cluster-disabling-features-list_{context}"] -= List of feature gates - -Use the following list to determine the name of the feature you want to disable: - -[options="header"] -|=== -| Feature gate| Description | Default - -| *AdvancedAuditing* -| Enables a more general API auditing pipeline, which includes support for pluggable output backends and an audit policy specifying how different requests should be audited. -| True - -| *APIListChunking* -| Enables the API clients to retrieve LIST or GET resources from API server in chunks. -| True - -| *APIResponseCompression* -| Enables the compression of API responses for LIST or GET requests. -| False - -| *AppArmor* -| Enables AppArmor-based mandatory access control on Linux nodes when using Docker. For more information, see the link:https://kubernetes.io/docs/tutorials/clusters/apparmor/[Kubernetes AppArmor documentation]. -| True - -| *AttachVolumeLimit* -| Adds support for volume plugins to report node specific volume limits. -| True - -| *BalanceAttachedNodeVolumes* -| Includes volume count on node to be considered for balanced resource allocation while scheduling. A node which has closer CPU, memory utilization, and volume count is favored by scheduler while making decisions. -| False - -| *BlockVolume* -| Enables the definition and consumption of raw block devices in pods. For more information, see -the link:https://kubernetes.io/docs/concepts/storage/persistent-volumes/#raw-block-volume-support[Kubernetes Raw Block Volume Support]. -| False - -| *CPUManager* -| Enables Container-level CPU affinity support. -For more information, see Using CPU Manager. -| True - -| *CPUCFSQuotaPeriod* -| Enables nodes to change CPUCFSQuotaPeriod. -| False - -| *CRIcontainerLogRotation* -| Enables Container log rotation for the CRI Container runtime. -| True - -| *CSIBlockVolume* -| Enables CSI to use raw block storage volumes. -| False - -| *CSIDriverRegistry* -| Enables all logic related to the CSIDriver API object in csi.storage.k8s.io. -| False - -| *CSINodeInfo* -| Enables all logic related to the CSINodeInfo API object in csi.storage.k8s.io. -| False - -| *CSIPersistentVolume* -| Enables discovering and mounting volumes provisioned through a CSI (Container Storage Interface) compatible volume plugin. For more information, -see the link:https://github.com/kubernetes/community/blob/master/contributors/design-proposals/storage/container-storage-interface.md[CSI Volume Plugins in Kubernetes Design Documentation]. -| True - -| *CustomPodDNS* -| Enables customizing the DNS settings for a pod using the *dnsConfig* property. -| True - -| *Debugcontainers* -| Enables running a debugging Container in a pod namespace to troubleshoot a running Pod. -| False - -| *DevicePlugins* -| Enables device plug-in-based resource provisioning on nodes. -| True - -| *DryRun* -| Allows requests to be processed but not stored, so that validation, merging, mutation can be tested without committing. -| False - -| *DynamicKubeletConfig* -| Enables the dynamic configuration in a cluster. -| True - -| *EnableEquivalenceClassCache* -| Enables the scheduler to cache equivalence of nodes when scheduling Pods. -| False - -| *ExpandPersistentVolumes* -| Enables the ability to Expand persistent volumes. -| True - -| *ExpandInUsePersistentVolumes* -| Enables the ability to expand persistent volumes' file system without unmounting volumes. -| False - -| *ExperimentalHostUserNamespaceDefaultingGate* -| Enables the disabling of user namespaces. This is for Containers that are using other host projects, host mounts, or Containers that are privileged or using specific non-project capabilities, such as MKNODE, SYS_MODULE, and so forth. This should only be enabled if user project remapping is enabled in the Docker daemon. -| False - -| *GCERegionalPersistentDisk* -| Enables the GCE Persistent Disk feature. -| True - -| *HugePages* -| Enables the allocation and consumption of pre-allocated huge pages. -| True - -| *HyperVcontainer* -| Enables Hyper-V isolation for Windows Containers. -| False - -| *Intializers* -| Enables the dynamic admission control as an extension to the built-in admission controllers. -| False - -| *KubeletPluginsWatcher* -| Enables probe based plugin watcher utility for discovering Kubelet plugins. -| True - -| *LocalStorageCapacityIsolation* -| Enables the consumption of local ephemeral storage and the `sizeLimit` property of an *emptyDir* volume. -| False - -| *Mountcontainers* -| Enables using utility Containers on the host as the volume mount. -| False - -| *MountPropagation* -| Enables sharing a volume mounted by one Container to other Containers or pods. -| True - -| *NodeLease* -| Kubelet uses the new Lease API to report node heartbeats, (Kube) Node Lifecycle Controller uses these heartbeats as a node health signal. -| False - -| *PersistentLocalVolumes* -| Enables the usage of local volume pods. Pod affinity has to be specified if requesting a local volume. -| True - -| *PodPriority* -| Enables the descheduling and preemption of pods based on their priorities. -| True - -| *PodReadinessGates* -| Supports Pod Readiness. -| True - -| *PodShareProcessNamespace* -| Allows all containers in a pod to share a process namespace. -| True - -| *ProcMountType* -| Enables control over ProcMountType for containers. -| False - -| *QOSReserved* -| Allows resource reservations at the QoS level preventing pods at lower QoS levels from bursting into resources requested at higher QoS levels (memory only for now). -| False - -| *ResourceLimitsPriorityFunction* -| Enables a scheduler priority function that assigns a lowest possible score of `1` to a node that satisfies at least one of the input pod CPU and memory limits. The intent is to break ties between nodes with same scores. -| False - -| *ResourceQuotaScopeSelectors* -| Enables resource quota scope selectors. -| True - -| *RotateKubeletClientCertificate* -| Enables the rotation of the client TLS certificate on the cluster. -| True - -| *RotateKubeletServerCertificate* -| Enables the rotation of the server TLS certificate on the cluster. -| True - -| *RunAsGroup* -| Enables control over the primary group ID set on the init processes of Containers. -| False - -| *RuntimeClass* -| Enables RuntimeClass, for selecting between multiple runtimes to run a pod. -| False - -| *ScheduleDaemonSetPods* -| Enables DaemonSet pods to be scheduled by the default scheduler instead of the DaemonSet controller. -| True - -| *SCTPSupport* -| Enables SCTP as new protocol for Service ports, NetworkPolicy, and ContainerPort in Pod/Containers definition. -| False - -| *ServiceNodeExclusion* -| Enables the exclusion of nodes from load balancers created by a cloud provider. -| False - -| *StorageObjectInUseProtection* -| Enables postponing the deletion of persistent volume or persistent volume claim objects if they are still being used. -| True - -| *StreamingProxyRedirects* -| Instructs the API server to intercept and follow redirects from the backend kubelet for streaming requests. -| True - -| *SupportIPVSProxyMode* -| Enables providing in-cluster service load balancing using IP virtual servers. -| True - -| *SupportPodPidsLimit* -| Enables support for limiting the number of processes (PIDs) running in a pod. -| True - -| *Sysctls* -| Enables pods to set sysctls on a pod. -| True - -| *TaintBasedEvictions* -| Enables evicting pods from nodes based on taints on nodes and tolerations on pods. -| False - -| *TaintNodesByCondition* -| Enables automatic tainting nodes based on node conditions. -| True - -| *TokenRequest* -| Enables the TokenRequest endpoint on service account resources. -| True - -| *TokenRequestProjection* -| Enables ServiceAccountTokenVolumeProjection support in ProjectedVolumes. -| True - -| *TTLAfterFinished* -| Allows TTL controller to clean up Pods and Jobs after they finish. -| False - -| *ValidateProxyRedirects* -| Controls whether the apiserver should validate that redirects are only followed to the same host. Only used if StreamingProxyRedirects is enabled. -| False - -| *VolumeScheduling* -| Enables volume-topology-aware scheduling and make the persistent volume claim (PVC) binding aware of scheduling decisions. It also enables the usage of local volumes types when used together with the *PersistentLocalVolumes* feature gate. -| True - -| *VolumeSnapshotDataSource* -| Enables volume snapshot data source support. -| False - -| *VolumeSubpath* -| Allows mounting a subpath of a volume in a container. Do not remove this feature gate even though it's GA. -| True - -| *VolumeSubpathEnvExpansion* -| Allows subpath environment variable substitution. Only applicable if the VolumeSubpath feature is also enabled. -| False - -|=== diff --git a/_unused_topics/nodes-cluster-overcommit-node-memory.adoc b/_unused_topics/nodes-cluster-overcommit-node-memory.adoc deleted file mode 100644 index e1da11dcbb73..000000000000 --- a/_unused_topics/nodes-cluster-overcommit-node-memory.adoc +++ /dev/null @@ -1,102 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-cluster-overcommit.adoc - -[id="nodes-cluster-overcommit-node-memory_{context}"] - -= Reserving memory across quality of service tiers - -You can use the `qos-reserved` parameter to specify a percentage of memory to be reserved -by a pod in a particular QoS level. This feature attempts to reserve requested resources to exclude pods -from lower OoS classes from using resources requested by pods in higher QoS classes. - -By reserving resources for higher QOS levels, pods that do not have resource limits are prevented from encroaching on the resources -requested by pods at higher QoS levels. - -.Prerequisites - -. Obtain the label associated with the static Machine Config Pool CRD for the type of node you want to configure. -Perform one of the following steps: - -.. View the Machine Config Pool: -+ ----- -$ oc describe machineconfigpool ----- -+ -For example: -+ -[source,yaml] ----- -$ oc describe machineconfigpool worker - -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfigPool -metadata: - creationTimestamp: 2019-02-08T14:52:39Z - generation: 1 - labels: - custom-kubelet: small-pods <1> ----- -<1> If a label has been added it appears under `labels`. - -.. If the label is not present, add a key/value pair: -+ ----- -$ oc label machineconfigpool worker custom-kubelet=small-pods ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to add the label: - -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfigPool -metadata: - labels: - custom-kubelet: small-pods - name: worker ----- -==== - -.Procedure - -. Create a Custom Resource (CR) for your configuration change. -+ -.Sample configuration for a disabling CPU limits -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: KubeletConfig -metadata: - name: disable-cpu-units <1> -spec: - machineConfigPoolSelector: - matchLabels: - custom-kubelet: small-pods <2> - kubeletConfig: - cgroups-per-qos: - - true - cgroup-driver: - - 'systemd' - cgroup-root: - - '/' - qos-reserved: <3> - - 'memory=50%' ----- -<1> Assign a name to CR. -<2> Specify the label to apply the configuration change. -<3> Specifies how pod resource requests are reserved at the QoS level. -{product-title} uses the `qos-reserved` parameter as follows: -- A value of `qos-reserved=memory=100%` will prevent the `Burstable` and `BestEffort` QOS classes from consuming memory -that was requested by a higher QoS class. This increases the risk of inducing OOM -on `BestEffort` and `Burstable` workloads in favor of increasing memory resource guarantees -for `Guaranteed` and `Burstable` workloads. -- A value of `qos-reserved=memory=50%` will allow the `Burstable` and `BestEffort` QOS classes -to consume half of the memory requested by a higher QoS class. -- A value of `qos-reserved=memory=0%` -will allow a `Burstable` and `BestEffort` QoS classes to consume up to the full node -allocatable amount if available, but increases the risk that a `Guaranteed` workload -will not have access to requested memory. This condition effectively disables this feature. diff --git a/_unused_topics/nodes-containers-using-about.adoc b/_unused_topics/nodes-containers-using-about.adoc deleted file mode 100644 index 2f25d0cc342d..000000000000 --- a/_unused_topics/nodes-containers-using-about.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-using.adoc - -[id="nodes-containers-using-about_{context}"] -= Understanding Containers - -The basic units of {product-title} applications are called _containers_. -link:https://www.redhat.com/en/topics/containers#overview[Linux container technologies] -are lightweight mechanisms for isolating running processes so that they are -limited to interacting with only their designated resources. - -Many application instances can be running in containers on a single host without -visibility into each others' processes, files, network, and so on. Typically, -each container provides a single service (often called a "micro-service"), such -as a web server or a database, though containers can be used for arbitrary -workloads. - -The Linux kernel has been incorporating capabilities for container technologies -for years. {product-title} and -Kubernetes add the ability to orchestrate containers across -multi-host installations. diff --git a/_unused_topics/nodes-containers-using-ssh.adoc b/_unused_topics/nodes-containers-using-ssh.adoc deleted file mode 100644 index 868386626226..000000000000 --- a/_unused_topics/nodes-containers-using-ssh.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-using.adoc - -[id="nodes-containers-using-ssh_{context}"] -= Opening a Remote Shell to Containers - -The `oc rsh` command allows you to locally access and manage tools that are on -the system. The secure shell (SSH) is the underlying technology and industry -standard that provides a secure connection to the application. Access to -applications with the shell environment is protected and restricted with -Security-Enhanced Linux (SELinux) policies. - -While in the remote shell, you can issue commands as if you are inside the -container and perform local operations like monitoring, debugging, and using CLI -commands specific to what is running in the container. - -For example, in a MySQL container, you can count the number of records in the -database by invoking the `mysql` command, then using the prompt to type in the `SELECT` command. You can -also use commands like `ps(1)` and `ls(1)` for validation. - -`BuildConfigs` and `DeployConfigs` map out how you want things to look and -pods (with containers inside) are created and dismantled as needed. Your changes -are not persistent. If you make changes directly within the container and that -container is destroyed and rebuilt, your changes will no longer exist. - -[NOTE] -==== -You can use the `oc exec` c to execute a command remotely. However, the `oc rsh` command provides an easier way -to keep a remote shell open persistently. -==== - -.Procedure - -. Open a console on a system networked to connect to the node where your pod is located. - -. Open a remote shell session to a container: -+ ----- -$ oc rsh ----- - -[NOTE] -==== -For help with usage, options, and to see examples: ----- -$ oc rsh -h ----- -==== diff --git a/_unused_topics/nodes-nodes-audit-log-advanced.adoc b/_unused_topics/nodes-nodes-audit-log-advanced.adoc deleted file mode 100644 index e790f3ec446c..000000000000 --- a/_unused_topics/nodes-nodes-audit-log-advanced.adoc +++ /dev/null @@ -1,139 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-audit-log.adoc - -[id="nodes-nodes-audit-log-advanced_{context}"] -= Advanced Audit - -*DEPRECATED for the moment* - -The advanced audit feature provides several improvements over the -basic audit functionality, including fine-grained events filtering and multiple output back ends. - -To enable the advanced audit feature, provide the following values in the -`openshift_master_audit_config` parameter: - ----- -openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/lib/origin/oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5, "policyFile": "/etc/origin/master/adv-audit.yaml", "logFormat":"json"} ----- - -[IMPORTANT] -==== -The policy file *_/etc/origin/master/adv-audit.yaml_* must be available on each control plane node. -==== - - -The following table contains additional options you can use. - -.Advanced Audit Configuration Parameters - -[cols="3a,6a",options="header"] -|=== -| Parameter Name | Description - -|`policyFile` -|Path to the file that defines the audit policy configuration. - -|`policyConfiguration` -|An embedded audit policy configuration. - -|`logFormat` -|Specifies the format of the saved audit logs. Allowed values are `legacy` (the -format used in basic audit), and `json`. - -|`webHookKubeConfig` -|Path to a `.kubeconfig`-formatted file that defines the audit webhook -configuration, where the events are sent to. - -|`webHookMode` -|Specifies the strategy for sending audit events. Allowed values are `block` -(blocks processing another event until the previous has fully processed) and -`batch` (buffers events and delivers in batches). -|=== - -[IMPORTANT] -==== -To enable the advanced audit feature, you must provide either `policyFile` *or* -`policyConfiguration` describing the audit policy rules: -==== - -.Sample Audit Policy Configuration -[source,yaml] ----- -apiVersion: audit.k8s.io/v1beta1 -kind: Policy -rules: - - # Do not log watch requests by the "system:kube-proxy" on endpoints or services - - level: None <1> - users: ["system:kube-proxy"] <2> - verbs: ["watch"] <3> - resources: <4> - - group: "" - resources: ["endpoints", "services"] - - # Do not log authenticated requests to certain non-resource URL paths. - - level: None - userGroups: ["system:authenticated"] <5> - nonResourceURLs: <6> - - "/api*" # Wildcard matching. - - "/version" - - # Log the request body of configmap changes in kube-system. - - level: Request - resources: - - group: "" # core API group - resources: ["configmaps"] - # This rule only applies to resources in the "kube-system" namespace. - # The empty string "" can be used to select non-namespaced resources. - namespaces: ["kube-system"] <7> - - # Log configmap and secret changes in all other namespaces at the metadata level. - - level: Metadata - resources: - - group: "" # core API group - resources: ["secrets", "configmaps"] - - # Log all other resources in core and extensions at the request level. - - level: Request - resources: - - group: "" # core API group - - group: "extensions" # Version of group should NOT be included. - - # A catch-all rule to log all other requests at the Metadata level. - - level: Metadata <1> - - # Log login failures from the web console or CLI. Review the logs and refine your policies. - - level: Metadata - nonResourceURLs: - - /login* <8> - - /oauth* <9> ----- -<1> There are four possible levels every event can be logged at: -+ -* `None` - Do not log events that match this rule. -+ -* `Metadata` - Log request metadata (requesting user, time stamp, resource, verb, -etc.), but not request or response body. This is the same level as the one used -in basic audit. -+ -* `Request` - Log event metadata and request body, but not response body. -+ -* `RequestResponse` - Log event metadata, request, and response bodies. -<2> A list of users the rule applies to. An empty list implies every user. -<3> A list of verbs this rule applies to. An empty list implies every verb. This is - Kubernetes verb associated with API requests (including `get`, `list`, `watch`, - `create`, `update`, `patch`, `delete`, `deletecollection`, and `proxy`). -<4> A list of resources the rule applies to. An empty list implies every resource. -Each resource is specified as a group it is assigned to (for example, an empty for -Kubernetes core API, batch, build.openshift.io, etc.), and a resource list from -that group. -<5> A list of groups the rule applies to. An empty list implies every group. -<6> A list of non-resources URLs the rule applies to. -<7> A list of namespaces the rule applies to. An empty list implies every namespace. -<8> Endpoint used by the web console. -<9> Endpoint used by the CLI. - -For more information on advanced audit, see the -link:https://kubernetes.io/docs/tasks/debug-application-cluster/audit[Kubernetes -documentation] diff --git a/_unused_topics/nodes-nodes-resources-configuring-viewing.adoc b/_unused_topics/nodes-nodes-resources-configuring-viewing.adoc deleted file mode 100644 index 8c8b62e45eeb..000000000000 --- a/_unused_topics/nodes-nodes-resources-configuring-viewing.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-nodes-resources-configuring.adoc - -[id="nodes-nodes-resources-configuring-setting_{context}"] -= Viewing Node Allocatable Resources and Capacity - -As an administrator, you can view the current capacity and allocatable resources of a specific node. - -.Procedure - -To see a node's current capacity and allocatable resources: - -. Run the following command: - ----- -$ oc get node/ -o yaml ----- - -. Locate the following section in the output: -+ -[source,yaml] ----- -... -status: -... - allocatable: - cpu: "4" - memory: 8010948Ki - pods: "110" - capacity: - cpu: "4" - memory: 8010948Ki - pods: "110" -... ----- diff --git a/_unused_topics/nodes-nodes-working-adding.adoc b/_unused_topics/nodes-nodes-working-adding.adoc deleted file mode 100644 index 5efb65bc72ba..000000000000 --- a/_unused_topics/nodes-nodes-working-adding.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-nodes-working.adoc - -[id="nodes-nodes-working-adding_{context}"] -= Adding new nodes to your cluster - -//// -this entire section is obsolete for 4.0. nodes are added to the cluster using MachineSets in 4.0. -https://github.com/openshift/openshift-docs/pull/12964#discussion_r242781872 -//// diff --git a/_unused_topics/nodes-pods-autoscaling-custom-metrics.adoc b/_unused_topics/nodes-pods-autoscaling-custom-metrics.adoc deleted file mode 100644 index e2271d47ee3d..000000000000 --- a/_unused_topics/nodes-pods-autoscaling-custom-metrics.adoc +++ /dev/null @@ -1,83 +0,0 @@ -== Supported metrics - -KEDA emits the following Kubernetes events: - -.Metrics -[cols="3a,5a,5a",options="header"] -|=== - -|Metric |Description |API version - -|ScaledObjectReady -|Normal -|On the first time a ScaledObject is ready, or if the previous ready condition status of the object was Unknown or False - -|ScaledJobReady -|Normal -|On the first time a ScaledJob is ready, or if the previous ready condition status of the object was Unknown or False - -|ScaledObjectCheckFailed -|Warning -|If the check validation for a ScaledObject fails - -|ScaledJobCheckFailed -|Warning -|If the check validation for a ScaledJob fails - -|ScaledObjectDeleted -|Normal -|When a ScaledObject is deleted and removed from KEDA watch - -|ScaledJobDeleted -|Normal -|When a ScaledJob is deleted and removed from KEDA watch - -|KEDAScalersStarted -|Normal -|When Scalers watch loop have started for a ScaledObject or ScaledJob - -|KEDAScalersStopped -|Normal -|When Scalers watch loop have stopped for a ScaledObject or a ScaledJob - -|KEDAScalerFailed -|Warning -|When a Scaler fails to create or check its event source - -|KEDAScaleTargetActivated -|Normal -|When the scale target (Deployment, StatefulSet, etc) of a ScaledObject is scaled to 1 - -|KEDAScaleTargetDeactivated -|Normal -|When the scale target (Deployment, StatefulSet, etc) of a ScaledObject is scaled to 0 - -|KEDAScaleTargetActivationFailed -|Warning -|When KEDA fails to scale the scale target of a ScaledObject to 1 - -|KEDAScaleTargetDeactivationFailed -|Warning -|When KEDA fails to scale the scale target of a ScaledObject to 0 - -|KEDAJobsCreated -|Normal -|When KEDA creates jobs for a ScaledJob - -|TriggerAuthenticationAdded -|Normal -|When a new TriggerAuthentication is added - -|TriggerAuthenticationDeleted -|Normal -|When a TriggerAuthentication is deleted - -|ClusterTriggerAuthenticationAdded -|Normal -|When a new ClusterTriggerAuthentication is added - -|ClusterTriggerAuthenticationDeleted -|Normal -|When a ClusterTriggerAuthentication is deleted - -|=== diff --git a/_unused_topics/nodes-pods-daemonsets-pods.adoc b/_unused_topics/nodes-pods-daemonsets-pods.adoc deleted file mode 100644 index 74520493154b..000000000000 --- a/_unused_topics/nodes-pods-daemonsets-pods.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-daemonsets.adoc - -[id="nodes-pods-daemonsets-pods_{context}"] -= About Scheduling DaemonSets with the default scheduler - -In {product-title}, the scheduler selects the Node that a Pod runs on. However, in previous versions of {product-title}, DaemonSet pods were created and scheduled by the DaemonSet controller. - -The `ScheduleDaemonSetPods` feature, enabled by default, forces {product-title} to schedule DaemonSets using the default scheduler, instead of the DaemonSet controller. -The DaemonSet controller adds the `NodeAffinity` parameter to the DaemonSet pods, instead of the `.spec.nodeName` parameter. The default scheduler then binds the pod to the target host. If the DaemonSet pod is already configured for node affinity, the affinity is replaced. The DaemonSet controller only performs these operations when creating or modifying DaemonSet pods, and no changes are made to the `spec.template` parameter of the DaemonSet. - ----- -nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchFields: - - key: metadata.name - operator: In - values: - - target-host-name ----- - -In addition, the DaemonSet controller adds the `node.kubernetes.io/unschedulable:NoSchedule` toleration to DaemonSet Pods. The default scheduler ignores unschedulable Nodes when scheduling DaemonSet Pods. diff --git a/_unused_topics/nodes-pods-priority-examples.adoc b/_unused_topics/nodes-pods-priority-examples.adoc deleted file mode 100644 index 92d898eb2d3f..000000000000 --- a/_unused_topics/nodes-pods-priority-examples.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-priority.adoc - -[id="nodes-pods-priority-examples_{context}"] -= Pod priority example scenarios - -Pod priority and preemption assigns a priority to pods for scheduling. The scheduler will preempt (evict) lower-priority pods to schedule higher-priority pods. - -Typical preemption scenario:: -*Pod P* is a pending pod. - -. The scheduler locates *Node N*, where the removal of one or more pods enables *Pod P* to be scheduled on that node. - -. The scheduler deletes the lower-priority pods from the *Node N* and schedules *Pod P* on the node. - -. The `nominatedNodeName` field of *Pod P* is set to the name of *Node N*. - -[NOTE] -==== -*Pod P* is not necessarily scheduled to the nominated node. -==== - -Preemption and termination periods:: -The preempted pod has a long termination period. - -. The scheduler preempts a lower-priority pod on *Node N*. - -. The scheduler waits for the pod to gracefully terminate. - -. For other scheduling reasons, *Node M* becomes available. - -. The scheduler can then schedule *Pod P* on *Node M*. - -//// -Under consideration for future release -Pod priority and cross-node preemption:: -*Pod P* is being considered for *Node N*. - -. *Pod Q* is running on another node in the same zone as *Node N*. - -. *Pod P* has zone-wide anti-affinity with *Pod Q*, meaning *Pod P* cannot be scheduled in the same zone as *Pod Q*. -+ -There are no other cases of anti-affinity between *Pod P* and other pods in the zone. - -. To schedule *Pod P* on *Node N*, the scheduler must preempt *Pod Q* to remove the pod anti-affinity violation, allowing the scheduler to schedule *Pod P* on *Node N*. - -The scheduler can preempt *Pod Q*, but scheduler does not perform cross-node preemption. So, Pod P will be deemed unschedulable on Node N. -//// - diff --git a/_unused_topics/nodes-scheduler-node-antiaffinity-configuring.adoc b/_unused_topics/nodes-scheduler-node-antiaffinity-configuring.adoc deleted file mode 100644 index 083d94dfc6eb..000000000000 --- a/_unused_topics/nodes-scheduler-node-antiaffinity-configuring.adoc +++ /dev/null @@ -1,102 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-scheduler-node-affinity.adoc - -[id="nodes-scheduler-node-affinity-configuring_{context}"] -= Configuring node affinity rules - -You can configure two types of node affinity rules: required and preferred. - -== Configuring a required node affinity rule - -Required rules *must* be met before a pod can be scheduled on a node. - -.Procedure - -The following steps demonstrate a simple configuration that creates a node and a pod that the scheduler is required to place on the node. - -. Add a label to a node using the `oc label node` command: -+ ----- -$ oc label node node1 e2e-az-name=e2e-az1 ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to add the label: - -[source,yaml] ----- -kind: Node -apiVersion: v1 -metadata: - name: - labels: - e2e-az-name: e2e-az1 ----- -==== - -. In the pod specification, use the `nodeAffinity` stanza to configure the `requiredDuringSchedulingIgnoredDuringExecution` parameter: -+ -.. Specify the key and values that must be met. If you want the new pod to be scheduled on the node you edited, use the same `key` and `value` parameters as the label in the node. -+ -.. Specify an `operator`. The operator can be `In`, `NotIn`, `Exists`, `DoesNotExist`, `Lt`, or `Gt`. For example, use the operator `In` to require the label to be in the node: -+ ----- -spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: e2e-az-name - operator: In - values: - - e2e-az1 - - e2e-az2 ----- - -. Create the pod: -+ ----- -$ oc create -f e2e-az2.yaml ----- - -== Configuring a Preferred Node Affinity Rule - -Preferred rules specify that, if the rule is met, the scheduler tries to enforce the rules, but does not guarantee enforcement. - -.Procedure - -The following steps demonstrate a simple configuration that creates a node and a pod that the scheduler tries to place on the node. - -. Add a label to a node using the `oc label node` command: -+ ----- -$ oc label node node1 e2e-az-name=e2e-az3 ----- - -. In the pod specification, use the `nodeAffinity` stanza to configure the `preferredDuringSchedulingIgnoredDuringExecution` parameter: -+ -.. Specify a weight for the node, as a number 1-100. The node with highest weight is preferred. -+ -.. Specify the key and values that must be met. If you want the new pod to be scheduled on the node you edited, use the same `key` and `value` parameters as the label in the node: -+ ----- - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - preference: - matchExpressions: - - key: e2e-az-name - operator: In - values: - - e2e-az3 ----- - -. Specify an `operator`. The operator can be `In`, `NotIn`, `Exists`, `DoesNotExist`, `Lt`, or `Gt`. For example, use the operator `In` to require the label to be in the node. - -. Create the pod. -+ ----- -$ oc create -f e2e-az3.yaml ----- diff --git a/_unused_topics/nodes-scheduler-taints-tolerations-examples.adoc b/_unused_topics/nodes-scheduler-taints-tolerations-examples.adoc deleted file mode 100644 index 43a1eb49af61..000000000000 --- a/_unused_topics/nodes-scheduler-taints-tolerations-examples.adoc +++ /dev/null @@ -1,141 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-scheduler-taints-tolerations.adoc - -[id="nodes-scheduler-taints-tolerations-examples_{context}"] -= Example taint and toleration scenarios - -Taints and tolerations are a flexible way to steer pods away from nodes or evict pods that should not be running on a node. A few of typical scenarios are: - -* Dedicating a node for a user -* Binding a user to a node -* Dedicating nodes with special hardware - -[id="nodes-scheduler-taints-tolerations-examples-user_{context}"] -== Dedicating a Node for a User - -You can specify a set of nodes for exclusive use by a particular set of users. - -.Procedure - -To specify dedicated nodes: - -. Add a taint to those nodes: -+ -For example: -+ ----- -$ oc adm taint nodes node1 dedicated=groupName:NoSchedule ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to add the taint: - -[source,yaml] ----- -kind: Node -apiVersion: v1 -metadata: - name: - labels: - ... -spec: - taints: - - key: dedicated - value: groupName - effect: NoSchedule - ... ----- -==== - -. Add a corresponding toleration to the pods by writing a custom admission controller. -+ -Only the pods with the tolerations are allowed to use the dedicated nodes. - -[id="nodes-scheduler-taints-tolerations-examples-binding_{context}"] -== Binding a User to a Node - -You can configure a node so that particular users can use only the dedicated nodes. - -.Procedure - -To configure a node so that users can use only that node: - -. Add a taint to those nodes: -+ -For example: -+ ----- -$ oc adm taint nodes node1 dedicated=groupName:NoSchedule ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to add the taint: - -[source,yaml] ----- -kind: Node -apiVersion: v1 -metadata: - name: - labels: - ... -spec: - taints: - - key: dedicated - value: groupName - effect: NoSchedule - ... ----- -==== - -. Add a corresponding toleration to the pods by writing a custom admission controller. -+ -The admission controller should add a node affinity to require that the pods can only schedule onto nodes labeled with the `key:value` label (`dedicated=groupName`). - -. Add a label similar to the taint (such as the `key:value` label) to the dedicated nodes. - -[id="nodes-scheduler-taints-tolerations-examples-special_{context}"] -== Nodes with Special Hardware - -In a cluster where a small subset of nodes have specialized hardware (for example GPUs), you can use taints and tolerations to keep pods that do not need the specialized hardware off of those nodes, leaving the nodes for pods that do need the specialized hardware. You can also require pods that need specialized hardware to use specific nodes. - -.Procedure - -To ensure pods are blocked from the specialized hardware: - -. Taint the nodes that have the specialized hardware using one of the following commands: -+ ----- -$ oc adm taint nodes disktype=ssd:NoSchedule -$ oc adm taint nodes disktype=ssd:PreferNoSchedule ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to add the taint: - -[source,yaml] ----- -kind: Node -apiVersion: v1 -metadata: - name: - labels: - ... -spec: - taints: - - key: disktype - value: ssd - effect: PreferNoSchedule - ... ----- -==== - -. Adding a corresponding toleration to pods that use the special hardware using an admission controller. - -For example, the admission controller could use some characteristic(s) of the pod to determine that the pod should be allowed to use the special nodes by adding a toleration. - -To ensure pods can only use the specialized hardware, you need some additional mechanism. For example, you could label the nodes that have the special hardware and use node affinity on the pods that need the hardware. diff --git a/_unused_topics/nodes-scheduler-taints-tolerations-seconds.adoc b/_unused_topics/nodes-scheduler-taints-tolerations-seconds.adoc deleted file mode 100644 index a8a788cb0aa8..000000000000 --- a/_unused_topics/nodes-scheduler-taints-tolerations-seconds.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-scheduler-taints-tolerations.adoc - -[id="nodes-scheduler-taints-tolerations-seconds_{context}"] -= Setting a default value for toleration seconds - -When using taints and tolerations, if taints are added to an existing node, non-matching pods on that node will be evicted. You can modify the time allowed before pods are evicted using the toleration seconds plug-in, which sets the eviction period at five minutes, by default. - -.Procedure - -To enable Default Toleration Seconds: - -Create an *AdmissionConfiguration* object: -+ ----- -kind: AdmissionConfiguration -apiVersion: apiserver.k8s.io/v1alpha1 -plugins: -- name: DefaultTolerationSeconds -...---- diff --git a/_unused_topics/olm-installing-po-after.adoc b/_unused_topics/olm-installing-po-after.adoc deleted file mode 100644 index 1c8aeba9e35e..000000000000 --- a/_unused_topics/olm-installing-po-after.adoc +++ /dev/null @@ -1,98 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/admin/olm-managing-po.adoc - -:_mod-docs-content-type: PROCEDURE -[id="olm-installing-po-after_{context}"] -= Installing platform Operators after cluster creation - -As a cluster administrator, you can install platform Operators after cluster creation on clusters that have enabled the `TechPreviewNoUpgrade` feature set by using the cluster-wide `PlatformOperator` API. - -.Procedure - -. Choose a platform Operator from the supported set of OLM-based Operators. For the list of this set and details on current limitations, see "Technology Preview restrictions for platform Operators". - -. Create a `PlatformOperator` object YAML file for your chosen platform Operator, for example a `service-mesh-po.yaml` file for the {SMProductName} Operator: -+ -.Example `sevice-mesh-po.yaml` file -[source,yaml] ----- -apiVersion: platform.openshift.io/v1alpha1 -kind: PlatformOperator -metadata: - name: service-mesh-po -spec: - package: - name: servicemeshoperator ----- - -. Create the `PlatformOperator` object by running the following command: -+ -[source,terminal] ----- -$ oc apply -f service-mesh-po.yaml ----- -+ -[NOTE] -==== -If your cluster does not have the `TechPreviewNoUpgrade` feature set enabled, the object creation fails with the following message: - -[source,terminal] ----- -error: resource mapping not found for name: "service-mesh-po" namespace: "" from "service-mesh-po.yaml": no matches for kind "PlatformOperator" in version "platform.openshift.io/v1alpha1" -ensure CRDs are installed first ----- -==== - -.Verification - -. Check the status of the `service-mesh-po` platform Operator by running the following command: -+ -[source,terminal] ----- -$ oc get platformoperator service-mesh-po -o yaml ----- -+ -.Example output -[source,yaml] ----- -... -status: - activeBundleDeployment: - name: service-mesh-po - conditions: - - lastTransitionTime: "2022-10-24T17:24:40Z" - message: Successfully applied the service-mesh-po BundleDeployment resource - reason: InstallSuccessful - status: "True" <1> - type: Installed ----- -<1> Wait until the `Installed` status condition reports `True`. - -. Verify that the `platform-operators-aggregated` cluster Operator is reporting an `Available=True` status: -+ -[source,terminal] ----- -$ oc get clusteroperator platform-operators-aggregated -o yaml ----- -+ -.Example output -[source,yaml] ----- -... -status: - conditions: - - lastTransitionTime: "2022-10-24T17:43:26Z" - message: All platform operators are in a successful state - reason: AsExpected - status: "False" - type: Progressing - - lastTransitionTime: "2022-10-24T17:43:26Z" - status: "False" - type: Degraded - - lastTransitionTime: "2022-10-24T17:43:26Z" - message: All platform operators are in a successful state - reason: AsExpected - status: "True" - type: Available ----- diff --git a/_unused_topics/olm-rukpak-about.adoc b/_unused_topics/olm-rukpak-about.adoc deleted file mode 100644 index d22c41c26071..000000000000 --- a/_unused_topics/olm-rukpak-about.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/understanding/olm-packaging-format.adoc -// * operators/olm_v1/olmv1_rukpak.adoc - -:_mod-docs-content-type: CONCEPT -[id="olm-rukpak-about_{context}"] -ifeval::["{context}" == "olm-packaging-format"] -= RukPak (Technology Preview) - -:FeatureName: RukPak -include::snippets/technology-preview.adoc[] - -{product-title} 4.14 introduces {olmv1-first} as a Technology Preview feature, which relies on the RukPak component. -endif::[] -ifeval::["{context}" == "olmv1-rukpak"] -= About RukPak -endif::[] - -RukPak is a pluggable solution for packaging and distributing cloud-native content. It supports advanced strategies for installation, updates, and policy. - -RukPak provides a content ecosystem for installing artifacts on a Kubernetes cluster. In {product-title} 4.16, RukPak supports {olmv0-first} bundles as artifacts. RukPak can then manage, scale, and upgrade these artifacts in a safe way to enable powerful cluster extensions. - -include::snippets/olmv1-rukpak-does-not-support-fips.adoc[] - -At its core, RukPak is a set of controllers and the `BundleDeployment` API. The API is packaged as a custom resource definition (CRD) that expresses what content to install on a cluster and how to create a running deployment of the content. The controllers watch for the API. - -.Common terminology - -Bundle:: -A collection of Kubernetes manifests that define content to be deployed to a cluster -Bundle image:: -A container image that contains a bundle within its filesystem -Bundle Git repository:: -A Git repository that contains a bundle within a directory -Provisioner:: -Controllers that install and manage content on a Kubernetes cluster -Bundle deployment:: -Generates deployed instances of a bundle diff --git a/_unused_topics/olm-rukpak-bd.adoc b/_unused_topics/olm-rukpak-bd.adoc deleted file mode 100644 index dc02b73d78f8..000000000000 --- a/_unused_topics/olm-rukpak-bd.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/understanding/olm-packaging-format.adoc - -:_mod-docs-content-type: CONCEPT -[id="olm-rukpak-bd_{context}"] -= BundleDeployment - -In {product-title} 4.16, the RukPak `BundleDeployment` indicates when a bundle should be active. This includes pivoting from older versions of an active bundle. - -[WARNING] -==== -A `BundleDeployment` object changes the state of a Kubernetes cluster by installing and removing objects. It is important to verify and trust the content that is being installed and limit access, by using RBAC, to the `BundleDeployment` API to only those who require those permissions. -==== - -Much like pods generate instances of container images, a bundle deployment generates a deployed version of a bundle. A bundle deployment can be seen as a generalization of the pod concept. - -The specifics of how a bundle deployment makes changes to a cluster based on a referenced bundle is defined by the provisioner that is configured to watch that bundle deployment. diff --git a/_unused_topics/olm-rukpak-bundle-immutability.adoc b/_unused_topics/olm-rukpak-bundle-immutability.adoc deleted file mode 100644 index 628bba5ae5fc..000000000000 --- a/_unused_topics/olm-rukpak-bundle-immutability.adoc +++ /dev/null @@ -1,68 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/understanding/olm-packaging-format.adoc - -:_mod-docs-content-type: CONCEPT -[id="olm-rukpak-bundle-immutability_{context}"] -= Bundle immutability - -After a `Bundle` object is accepted by the API server, the bundle is considered an immutable artifact by the rest of the RukPak system. This behavior enforces the notion that a bundle represents some unique, static piece of content to source onto the cluster. A user can have confidence that a particular bundle is pointing to a specific set of manifests and cannot be updated without creating a new bundle. This property is true for both standalone bundles and dynamic bundles created by an embedded `BundleTemplate` object. - -Bundle immutability is enforced by the core RukPak webhook. This webhook watches `Bundle` object events and, for any update to a bundle, checks whether the `spec` field of the existing bundle is semantically equal to that in the proposed updated bundle. If they are not equal, the update is rejected by the webhook. Other `Bundle` object fields, such as `metadata` or `status`, are updated during the bundle's lifecycle; it is only the `spec` field that is considered immutable. - -Applying a `Bundle` object and then attempting to update its spec should fail. For example, the following example creates a bundle: - -[source,terminal] ----- -$ oc apply -f -</index.json` file -[source,json] ----- -{ - { - "schema": "olm.package", - "name": "", - "defaultChannel": "" - } -} ----- - -. To create an `olm.bundle` blob, edit your `index.json` or `index.yaml` file, similar to the following example: -+ -.Example `/index.json` file with `olm.bundle` blob -[source,json] ----- -{ - "schema": "olm.bundle", - "name": ".v", - "package": "", - "image": "quay.io//:", - "properties": [ - { - "type": "olm.package", - "value": { - "packageName": "", - "version": "" - } - }, - { - "type": "olm.bundle.mediatype", - "value": "plain+v0" - } - ] -} ----- - -. To create an `olm.channel` blob, edit your `index.json` or `index.yaml` file, similar to the following example: -+ -.Example `/index.json` file with `olm.channel` blob -[source,json] ----- -{ - "schema": "olm.channel", - "name": "", - "package": "", - "entries": [ - { - "name": ".v" - } - ] -} ----- - -// Please refer to [channel naming conventions](https://olm.operatorframework.io/docs/best-practices/channel-naming/) for choosing the . An example of the is `candidate-v0`. - -.Verification - -. Open your `index.json` or `index.yaml` file and ensure it is similar to the following example: -+ -.Example `/index.json` file -[source,json] ----- -{ - "schema": "olm.package", - "name": "example-extension", - "defaultChannel": "preview" -} -{ - "schema": "olm.bundle", - "name": "example-extension.v0.0.1", - "package": "example-extension", - "image": "quay.io/example-org/example-extension-bundle:v0.0.1", - "properties": [ - { - "type": "olm.package", - "value": { - "packageName": "example-extension", - "version": "0.0.1" - } - }, - { - "type": "olm.bundle.mediatype", - "value": "plain+v0" - } - ] -} -{ - "schema": "olm.channel", - "name": "preview", - "package": "example-extension", - "entries": [ - { - "name": "example-extension.v0.0.1" - } - ] -} ----- - -. Validate your catalog by running the following command: -+ -[source,terminal] ----- -$ opm validate ----- diff --git a/_unused_topics/olmv1-building-plain-image.adoc b/_unused_topics/olmv1-building-plain-image.adoc deleted file mode 100644 index e1e7e40a4eed..000000000000 --- a/_unused_topics/olmv1-building-plain-image.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/olm_v1/olmv1-plain-bundles.adoc - -:_mod-docs-content-type: PROCEDURE - -[id="olmv1-building-plain-bundle-image-source_{context}"] -= Building a plain bundle image from an image source - -The Operator Controller currently supports installing plain bundles created only from a _plain bundle image_. - -.Procedure - -. At the root of your project, create a Dockerfile that can build a bundle image: -+ -.Example `plainbundle.Dockerfile` -[source,docker] ----- -FROM scratch <1> -ADD manifests /manifests ----- -<1> Use the `FROM scratch` directive to make the size of the image smaller. No other files or directories are required in the bundle image. - -. Build an Open Container Initiative (OCI)-compliant image by using your preferred build tool, similar to the following example: -+ -[source,terminal] ----- -$ podman build -f plainbundle.Dockerfile -t \ - quay.io//: . <1> ----- -<1> Use an image tag that references a repository where you have push access privileges. - -. Push the image to your remote registry by running the following command: -+ -[source,terminal] ----- -$ podman push quay.io//: ----- diff --git a/_unused_topics/olmv1-creating-a-pull-secret-for-catalogd.adoc b/_unused_topics/olmv1-creating-a-pull-secret-for-catalogd.adoc deleted file mode 100644 index 60b590cd04a1..000000000000 --- a/_unused_topics/olmv1-creating-a-pull-secret-for-catalogd.adoc +++ /dev/null @@ -1,99 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/olm_v1/olmv1-installing-an-operator-from-a-catalog.adoc - -ifeval::["{context}" == "managing-catalogs"] -:olmv1-pullsecret-proc: -endif::[] - -:_mod-docs-content-type: PROCEDURE - -[id="olmv1-creating-a-pull-secret-for-catalogs-secure-registry_{context}"] -= Creating a pull secret for catalogs hosted on a private registry - -include::snippets/olmv1-secure-registry-pull-secret.adoc[] - -[IMPORTANT] -==== -include::snippets/olmv1-known-issue-private-registries.adoc[] -==== - -.Prerequisites - -* Login credentials for the secure registry -* Docker or Podman installed on your workstation - -.Procedure - -* If you already have a `.dockercfg` file with login credentials for the secure registry, create a pull secret by running the following command: -+ -[source,terminal] ----- -$ oc create secret generic \ - --from-file=.dockercfg=/.dockercfg \ - --type=kubernetes.io/dockercfg \ - --namespace=openshift-catalogd ----- -+ -.Example command -[%collapsible] -==== -[source,terminal] ----- -$ oc create secret generic redhat-cred \ - --from-file=.dockercfg=/home//.dockercfg \ - --type=kubernetes.io/dockercfg \ - --namespace=openshift-catalogd ----- -==== - -* If you already have a `$HOME/.docker/config.json` file with login credentials for the secured registry, create a pull secret by running the following command: -+ -[source,terminal] ----- -$ oc create secret generic \ - --from-file=.dockerconfigjson=/.docker/config.json \ - --type=kubernetes.io/dockerconfigjson \ - --namespace=openshift-catalogd ----- -+ -.Example command -[%collapsible] -==== -[source,terminal] ----- -$ oc create secret generic redhat-cred \ - --from-file=.dockerconfigjson=/home//.docker/config.json \ - --type=kubernetes.io/dockerconfigjson \ - --namespace=openshift-catalogd ----- -==== -* If you do not have a Docker configuration file with login credentials for the secure registry, create a pull secret by running the following command: -+ -[source,terminal] ----- -$ oc create secret docker-registry \ - --docker-server= \ - --docker-username= \ - --docker-password= \ - --docker-email= \ - --namespace=openshift-catalogd ----- -+ -.Example command -[%collapsible] -==== -[source,terminal] ----- -$ oc create secret docker-registry redhat-cred \ - --docker-server=registry.redhat.io \ - --docker-username=username \ - --docker-password=password \ - --docker-email=user@example.com \ - --namespace=openshift-catalogd ----- -==== - -ifeval::["{context}" == "olmv1-installing-operator"] -:!olmv1-pullsecret-proc: -endif::[] diff --git a/_unused_topics/olmv1-dependency-concepts.adoc b/_unused_topics/olmv1-dependency-concepts.adoc deleted file mode 100644 index 8444809dc60a..000000000000 --- a/_unused_topics/olmv1-dependency-concepts.adoc +++ /dev/null @@ -1,73 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/olm_v1/olmv1-dependency.adoc - -:_mod-docs-content-type: CONCEPT - -[id="olmv1-dependency-concepts_{context}"] -= Concepts - -There are a set of expectations from the user that the package manager should never do the following: - -* Install a package whose dependencies can not be fulfilled or that conflict with the dependencies of another package -* Install a package whose constraints can not be met by the current set of installable packages -* Update a package in a way that breaks another that depends on it - -[id="olmv1-dependency-example-successful_{context}"] -== Example: Successful resolution - -A user wants to install packages A and B that have the following dependencies: - -|=== -|Package A `v0.1.0` |Package B `latest` -|↓ (depends on) |↓ (depends on) -|Package C `v0.1.0` |Package D `latest` -|=== - -Additionally, the user wants to pin the version of A to `v0.1.0`. - -*Packages and constraints passed to {olmv1}* - -.Packages -* A -* B - -.Constraints -* A `v0.1.0` depends on C `v0.1.0` -* A pinned to `v0.1.0` -* B depends on D - -.Output -* Resolution set: -** A `v0.1.0` -** B `latest` -** C `v0.1.0` -** D `latest` - -[id="olmv1-dependency-example-unsuccessful_{context}"] -== Example: Unsuccessful resolution - -A user wants to install packages A and B that have the following dependencies: - -|=== -|Package A `v0.1.0` |Package B `latest` -|↓ (depends on) |↓ (depends on) -|Package C `v0.1.0` |Package C `v0.2.0` -|=== - -Additionally, the user wants to pin the version of A to `v0.1.0`. - -*Packages and constraints passed to {olmv1}* - -.Packages -* A -* B - -.Constraints -* A `v0.1.0` depends on C `v0.1.0` -* A pinned to `v0.1.0` -* B `latest` depends on C `v0.2.0` - -.Output -* Resolution set: -** Unable to resolve because A `v0.1.0` requires C `v0.1.0`, which conflicts with B `latest` requiring C `v0.2.0` diff --git a/_unused_topics/olmv1-dependency.adoc b/_unused_topics/olmv1-dependency.adoc deleted file mode 100644 index bfd803a4b62c..000000000000 --- a/_unused_topics/olmv1-dependency.adoc +++ /dev/null @@ -1,14 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="olmv1-dependency"] -= Dependency resolution in {olmv1} (Technology Preview) -include::_attributes/common-attributes.adoc[] -:context: olmv1-dependency - -toc::[] - -{olmv1-first} uses a dependency manager for resolving constraints over catalogs of bundles. - -:FeatureName: {olmv1} -include::snippets/technology-preview.adoc[] - -include::modules/olmv1-dependency-concepts.adoc[leveloffset=+1] diff --git a/_unused_topics/olmv1-major-version-zero.adoc b/_unused_topics/olmv1-major-version-zero.adoc deleted file mode 100644 index dfaded380ad3..000000000000 --- a/_unused_topics/olmv1-major-version-zero.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/olm_v1/olmv1-installing-an-operator-from-a-catalog.adoc - -// Moved to _unused_topics dir with 4.16 in case useful in a later release of OLMv1. - -:_mod-docs-content-type: CONCEPT - -[id="olmv1-major-version-zero_{context}"] -= Major version zero releases - -The semver standard specifies that major version zero releases (`O.y.z`) are reserved for initial development. During the initial development stage, the API is not stable and breaking changes might be introduced in any published version. As a result, major version zero releases apply a special set of update conditions. - -.Update conditions for major version zero releases - -* You cannot apply automatic updates when the major and minor versions are both zero, such as `0.0.*`. For example, automatic updates with the version range of `>=0.0.1 <0.1.0` are not allowed. -* You cannot apply automatic updates from one minor version to another within a major version zero release. For example, {olmv1} does not automatically apply an update from `0.1.0` to `0.2.0`. -* You can apply automatic updates from patch versions, such as `>=0.1.0 <0.2.0` or `>=0.2.0 <0.3.0`. - -When an automatic update is blocked by {olmv1}, you must manually verify and force the update by editing the Operator or extension's custom resource (CR). diff --git a/_unused_topics/olmv1-managing-plain-bundles.adoc b/_unused_topics/olmv1-managing-plain-bundles.adoc deleted file mode 100644 index 7a90a66e4635..000000000000 --- a/_unused_topics/olmv1-managing-plain-bundles.adoc +++ /dev/null @@ -1,72 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -[id="olmv1-managing-plain-bundles"] -= Managing plain bundles in {olmv1} (Technology Preview) - -:context: olmv1-managing-catalogs - -toc::[] - -In {olmv1-first}, a _plain bundle_ is a static collection of arbitrary Kubernetes manifests in YAML format. The experimental `olm.bundle.mediatype` property of the `olm.bundle` schema object differentiates a plain bundle (`plain+v0`) from a regular (`registry+v1`) bundle. - -:FeatureName: {olmv1} -include::snippets/technology-preview.adoc[] - -// For more information, see the [Plain Bundle Specification](https://github.com/operator-framework/rukpak/blob/main/docs/bundles/plain.md) in the RukPak repository. - -As a cluster administrator, you can build and publish a file-based catalog that includes a plain bundle image by completing the following procedures: - -. Build a plain bundle image. -. Create a file-based catalog. -. Add the plain bundle image to your file-based catalog. -. Build your catalog as an image. -. Publish your catalog image. - -[role="_additional-resources"] -.Additional resources - -* xref:../../operators/olm_v1/arch/olmv1-rukpak.adoc#olmv1-rukpak[RukPak component and packaging format] - -[id="prerequisites_olmv1-plain-bundles"] -== Prerequisites - -* Access to an {product-title} cluster using an account with `cluster-admin` permissions -+ --- -include::snippets/olmv1-cli-only.adoc[] --- -* The `TechPreviewNoUpgrade` feature set enabled on the cluster -+ -[WARNING] -==== -Enabling the `TechPreviewNoUpgrade` feature set cannot be undone and prevents minor version updates. These feature sets are not recommended on production clusters. -==== -* The OpenShift CLI (`oc`) installed on your workstation -* The `opm` CLI installed on your workstation -* Docker or Podman installed on your workstation -* Push access to a container registry, such as link:https://quay.io[Quay] -* Kubernetes manifests for your bundle in a flat directory at the root of your project similar to the following structure: -+ -.Example directory structure -[source,terminal] ----- -manifests -├── namespace.yaml -├── service_account.yaml -├── cluster_role.yaml -├── cluster_role_binding.yaml -└── deployment.yaml ----- - - -[role="_additional-resources"] -.Additional resources - -* xref:../../nodes/clusters/nodes-cluster-enabling-features.adoc#nodes-cluster-enabling[Enabling features using feature gates] - -// - Only the `redhat-operators` catalog source enabled on the cluster. This is a restriction during the Technology Preview release. - -include::modules/olmv1-building-plain-image.adoc[leveloffset=+1] -include::modules/olmv1-creating-fbc.adoc[leveloffset=+1] -include::modules/olmv1-adding-plain-to-fbc.adoc[leveloffset=+1] -include::modules/olmv1-publishing-fbc.adoc[leveloffset=+1] diff --git a/_unused_topics/olmv1-semver-support.adoc b/_unused_topics/olmv1-semver-support.adoc deleted file mode 100644 index 9bc77d47544d..000000000000 --- a/_unused_topics/olmv1-semver-support.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/olm_v1/olmv1-installing-an-operator-from-a-catalog.adoc - -// Moved to _unused_topics dir with 4.16 in case useful in a later release of OLMv1. - -:_mod-docs-content-type: CONCEPT - -[id="olmv1-semver-support_{context}"] -= Support for semantic versioning - -{olmv1-first} supports link:https://semver.org/[semantic versioning (semver)] when explicitly enabled. Cluster extension authors can use the semver standard to define compatible updates. - -[NOTE] -==== -In {product-title} 4.16, {olmv1} uses {olmv0} semantics by default. -==== - -{olmv1} can use an extension's version number to determine if an update can be resolved successfully. - -Cluster administrators can define a range of acceptable versions to install and automatically update. For extensions that follow the semver standard, you can use comparison strings to specify a desired version range. - -[NOTE] -==== -{olmv1} does not support automatic updates to the next major version. If you want to perform a major version update, you must verify and apply the update manually. For more information, see "Forcing an update or rollback". -==== \ No newline at end of file diff --git a/_unused_topics/osd-architecture.adoc b/_unused_topics/osd-architecture.adoc deleted file mode 100644 index 301b58c0ff87..000000000000 --- a/_unused_topics/osd-architecture.adoc +++ /dev/null @@ -1,15 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osd-architecture"] -= Architecture concepts -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: osd-architecture - -toc::[] - -Learn about OpenShift and basic container concepts used in the {product-title} architecture. - -include::modules/kubernetes-about.adoc[leveloffset=+1] - -include::modules/container-benefits.adoc[leveloffset=+1] - -include::modules/sd-vs-ocp.adoc[leveloffset=+1] diff --git a/_unused_topics/osdk-updating-v1101-to-v1160.adoc b/_unused_topics/osdk-updating-v1101-to-v1160.adoc deleted file mode 100644 index 61484e0e9430..000000000000 --- a/_unused_topics/osdk-updating-v1101-to-v1160.adoc +++ /dev/null @@ -1,195 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-upgrading-projects.adoc - -:osdk_ver: v1.16.0 -:osdk_ver_n1: v1.10.1 - -:_mod-docs-content-type: PROCEDURE -[id="osdk-upgrading-v1101-to-v1160_{context}"] -= Updating projects for Operator SDK {osdk_ver} - -The following procedure updates an existing Operator project for compatibility with {osdk_ver}. - -[IMPORTANT] -==== -* Operator SDK v1.16.0 supports Kubernetes 1.22. - -* Many deprecated `v1beta1` APIs were removed in Kubernetes 1.22, including `sigs.k8s.io/controller-runtime v0.10.0` and `controller-gen v0.7`. - -* Updating projects to Kubernetes 1.22 is a breaking change if you need to scaffold `v1beta1` APIs for custom resource definitions (CRDs) or webhooks to publish your project into older cluster versions. - -See link:https://docs.openshift.com/container-platform/4.9/release_notes/ocp-4-9-release-notes.html#ocp-4-9-osdk-k8s-api-bundle-validate[Validating bundle manifests for APIs removed from Kubernetes 1.22] and link:https://docs.openshift.com/container-platform/4.9/release_notes/ocp-4-9-release-notes.html#ocp-4-9-removed-kube-1-22-apis[Beta APIs removed from Kubernetes 1.22] for more information about changes introduced in Kubernetes 1.22. -==== - -.Prerequisites - -* Operator SDK {osdk_ver} installed. -* An Operator project created or maintained with Operator SDK {osdk_ver_n1}. - -.Procedure - -. Add the `protocol` field in the `config/default/manager_auth_proxy_patch.yaml` and `config/rbac/auth_proxy_service.yaml` files: -+ -[source,diff] ----- -... - ports: - - containerPort: 8443 -+ protocol: TCP - name: https ----- - -. Make the following changes to the `config/manager/manager.yaml` file: - -.. Increase the CPU and memory resource limits: -+ -[source,diff] ----- -resources: - limits: -- cpu: 100m -- memory: 30Mi -+ cpu: 200m -+ memory: 100Mi ----- - -.. Add an annotation to specify the default container manager: -+ -[source,yaml] ----- -... -template: - metadata: - annotations: - kubectl.kubernetes.io/default-container: manager -... ----- - -. Add `PHONY` targets to all of the targets in your `Makefile` file. - -. For Go-based Operator projects, make the following changes: - -.. Install the `setup-envtest` binary. - -.. Change your `go.mod` file to update the dependencies: -+ -[source,golang] ----- -k8s.io/api v0.22.1 -k8s.io/apimachinery v0.22.1 -k8s.io/client-go v0.22.1 -sigs.k8s.io/controller-runtime v0.10.0 ----- - -.. Run the `go mod tidy` command to download the dependencies: -+ -[source,terminal] ----- -$ go mod tidy ----- - -.. Make the following changes to your `Makefile` file: -+ -[source,diff] ----- -... - -+ ENVTEST_K8S_VERSION = 1.22 - - test: manifests generate fmt vet envtest ## Run tests. -- go test ./... -coverprofile cover.out -+ KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test ./... -coverprofile cover.out -... - -- $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases -+ $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases -... - -# Produce CRDs that work back to Kubernetes 1.11 (no version conversion) -- CRD_OPTIONS ?= "crd:trivialVersions=true,preserveUnknownFields=false" -... -- admissionReviewVersions={v1,v1beta1} -+ admissionReviewVersions=v1 -... - -+ ifndef ignore-not-found -+ ignore-not-found = false -+ endif - -##@ Deployment -... -- sh kubectl delete -f - -+ sh kubectl delete --ignore-not-found=$(ignore-not-found) -f - ----- - -.. Run the `make manifest` command to generate your manifests with the updated version of Kubernetes: -+ -[source,terminal] ----- -$ make manifest ----- - -. For Ansible-based Operator projects, make the following changes: -+ -.. Change your `requirements.yml` file to include the following: - -... Replace the `community.kubernetes` collection with the `kubernetes.core` collection: -+ -[source,yaml] ----- -... -- name: kubernetes.core - version: "2.2.0" -... ----- - -... Update the `operator_sdk.util` utility from version `0.2.0` to `0.3.1`: -+ -[source,yaml] ----- -... -- name: operator_sdk.util - version: "0.3.1" ----- - -.. Verify the default resource limits in the `config/manager/manager.yaml` file: -+ -[source,yaml] ----- -... - # TODO(user): Configure the resources accordingly based on the project requirements. - # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - -resources: - limits: - cpu: 500m - memory: 768Mi - requests: - cpu: 10m - memory: 256Mi ----- -+ -[IMPORTANT] -==== -Operator SDK scaffolds these values as a reasonable default setting. Operator authors should set and optimize resource limits based on the requirements of their project. -==== - -.. Optional: Make the following changes if you want to run your Ansible-based Operator locally by using the `make run` command: - -... Change the run target in the `Makefile` file: -+ -[source,terminal] ----- -ANSIBLE_ROLES_PATH="$(ANSIBLE_ROLES_PATH):$(shell pwd)/roles" $(ANSIBLE_OPERATOR) run ----- - -... Update the local version of `ansible-runner` to 2.0.2 or later. -+ -[IMPORTANT] -==== -As of version 2.0, the `ansible-runner` tool includes changes in the command signature that are not compatible with earlier versions. -==== - -:!osdk_ver: -:!osdk_ver_n1: diff --git a/_unused_topics/osdk-updating-v125-to-v128.adoc b/_unused_topics/osdk-updating-v125-to-v128.adoc deleted file mode 100644 index 1bbbe0a635ab..000000000000 --- a/_unused_topics/osdk-updating-v125-to-v128.adoc +++ /dev/null @@ -1,130 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/golang/osdk-golang-updating-projects.adoc -// * operators/operator_sdk/ansible/osdk-ansible-updating-projects.adoc -// * operators/operator_sdk/helm/osdk-helm-updating-projects.adoc -// * operators/operator_sdk/helm/ - -ifeval::["{context}" == "osdk-golang-updating-projects"] -:golang: -:type: Go -endif::[] -ifeval::["{context}" == "osdk-ansible-updating-projects"] -:ansible: -:type: Ansible -endif::[] -ifeval::["{context}" == "osdk-helm-updating-projects"] -:helm: -:type: Helm -endif::[] - -:_mod-docs-content-type: PROCEDURE -[id="osdk-upgrading-projects_{context}"] -= Updating {type}-based Operator projects for Operator SDK {osdk_ver} - -The following procedure updates an existing {type}-based Operator project for compatibility with {osdk_ver}. - -.Prerequisites - -* Operator SDK {osdk_ver} installed -* An Operator project created or maintained with Operator SDK {osdk_ver_n1} - -.Procedure - -ifdef::helm[] -* Find the `ose-kube-rbac-proxy` pull spec in the following files, and update the image tag to `v4.14`: -endif::[] -ifdef::ansible,golang[] -. Find the `ose-kube-rbac-proxy` pull spec in the following files, and update the image tag to `v4.14`: -endif::[] -+ --- -* `config/default/manager_auth_proxy_patch.yaml` -* `bundle/manifests/memcached-operator.clusterserviceversion.yaml` --- -+ -[source,yaml] ----- -… - containers: - - name: kube-rbac-proxy - image: registry.redhat.io/openshift4/ose-kube-rbac-proxy:v4.14 <1> -… ----- -<1> Update the tag version from `v4.13` to `v4.14`. - -ifdef::ansible[] -. Update your Makefile's `run` target to the following: -+ -[source,make] ----- -.PHONY: run -ANSIBLE_ROLES_PATH?="$(shell pwd)/roles" -run: ansible-operator ## Run against the configured Kubernetes cluster in ~/.kube/config - $(ANSIBLE_OPERATOR) run ----- - -. To upgrade the `kubernetes.core` collection to v2.4.0, replace the following in your project's `requirements.yaml` file: -+ -[source,yaml] ----- - - name: kubernetes.core - version: "2.3.1" ----- -+ -with: -+ -[source,yaml] ----- - - name: kubernetes.core - version: "2.4.0" ----- -endif::[] - -ifdef::golang[] -. Modify your `go.mod` file to include the following dependencies and updated versions: -+ -[source,go] ----- -k8s.io/api v0.26.2 -k8s.io/apiextensions-apiserver v0.26.2 -k8s.io/apimachinery v0.26.2 -k8s.io/cli-runtime v0.26.2 -k8s.io/client-go v0.26.2 -k8s.io/kubectl v0.26.2 -sigs.k8s.io/controller-runtime v0.14.5 -sigs.k8s.io/controller-tools v0.11.3 -sigs.k8s.io/kubebuilder/v3 v3.9.1 ----- - -. Download the latest dependencies by running the following command: -+ -[source,terminal] ----- -$ go mod tidy ----- - -. Modify your Makefile with the following changes: - -.. Change the `ENVTEST_K8S_VERSION` field from `1.26` to `1.27`. -.. Change the `build` target from `generate fmt vet` to `manifests generate fmt vet`: -+ -[source,diff] ----- - - build: generate fmt vet ## Build manager binary. - + build: manifests generate fmt vet ## Build manager binary. ----- -endif::[] - -ifeval::["{context}" == "osdk-golang-updating-projects"] -:!golang: -:!type: -endif::[] -ifeval::["{context}" == "osdk-ansible-updating-projects"] -:!ansible: -:!type: -endif::[] -ifeval::["{context}" == "osdk-helm-updating-projects"] -:!helm: -:!type: -endif::[] \ No newline at end of file diff --git a/_unused_topics/osdk-upgrading-v180-to-v1101.adoc b/_unused_topics/osdk-upgrading-v180-to-v1101.adoc deleted file mode 100644 index 89e8643443de..000000000000 --- a/_unused_topics/osdk-upgrading-v180-to-v1101.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-upgrading-projects.adoc - -:osdk_ver: v1.10.1 -:osdk_ver_n1: v1.8.0 - -:_mod-docs-content-type: PROCEDURE -[id="osdk-upgrading-v180-to-v1101_{context}"] -= Upgrading projects for Operator SDK {osdk_ver} - -The following upgrade steps must be performed to upgrade an existing Operator project for compatibility with {osdk_ver}. - -.Prerequisites - -- Operator SDK {osdk_ver} installed -- Operator project that was previously created or maintained with Operator SDK {osdk_ver_n1} - -.Procedure - -* For Ansible-based Operator projects, update the command in the `Set pull policy` section of the `molecule/default/prepare.yml` file: -+ -.`molecule/default/prepare.yml` file diff -[%collapsible] -==== -[source,diff] ----- - - name: Set pull policy -- command: '{{ "{{ kustomize }}" }} edit add patch pull_policy/{{ "{{ operator_pull_policy }}" }}.yaml' -+ command: '{{ "{{ kustomize }}" }} edit add patch --path pull_policy/{{ "{{ operator_pull_policy }}" }}.yaml' ----- -==== -+ -Ansible projects are now scaffolded with Kustomize version 3.8.7. This version of Kustomize requires that the path to patch files be provided with the `--path` flag in the `add patch` command. - -Your Operator project is now compatible with Operator SDK {osdk_ver}. - -:!osdk_ver: -:!osdk_ver_n1: diff --git a/_unused_topics/persistent-storage-csi-alicloud-disk.adoc b/_unused_topics/persistent-storage-csi-alicloud-disk.adoc deleted file mode 100644 index de4440c33e4e..000000000000 --- a/_unused_topics/persistent-storage-csi-alicloud-disk.adoc +++ /dev/null @@ -1,25 +0,0 @@ -[id="persistent-storage-csi-alicloud-disk"] -= AliCloud Disk CSI Driver Operator -include::_attributes/common-attributes.adoc[] -:context: persistent-storage-csi-alicloud-disk - -toc::[] - -[id="persistent-storage-csi-alicloud-disk-overview"] -== Overview - -{product-title} is capable of provisioning persistent volumes (PVs) using the Container Storage Interface (CSI) driver for Alibaba AliCloud Disk Storage. - -Familiarity with xref:../../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[persistent storage] and xref:../../storage/container_storage_interface/persistent-storage-csi.adoc#persistent-storage-csi[configuring CSI volumes] is recommended when working with a CSI Operator and driver. - -To create CSI-provisioned PVs that mount to AliCloud Disk storage assets, {product-title} installs the AliCloud Disk CSI Driver Operator and the AliCloud Disk CSI driver, by default, in the `openshift-cluster-csi-drivers` namespace. - -* The _AliCloud Disk CSI Driver Operator_ provides a storage class (`alicloud-disk`) that you can use to create persistent volume claims (PVCs). The AliCloud Disk CSI Driver Operator supports dynamic volume provisioning by allowing storage volumes to be created on demand, eliminating the need for cluster administrators to pre-provision storage. You can disable this default storage class if desired (see xref:../../storage/container_storage_interface/persistent-storage-csi-sc-manage.adoc#persistent-storage-csi-sc-manage[Managing the default storage class]). - -* The _AliCloud Disk CSI driver_ enables you to create and mount AliCloud Disk PVs. - -include::modules/persistent-storage-csi-about.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../storage/container_storage_interface/persistent-storage-csi.adoc#persistent-storage-csi[Configuring CSI volumes] diff --git a/_unused_topics/persistent-storage-csi-smb-cifs-create-sc.adoc b/_unused_topics/persistent-storage-csi-smb-cifs-create-sc.adoc deleted file mode 100644 index b2abd2ae67a7..000000000000 --- a/_unused_topics/persistent-storage-csi-smb-cifs-create-sc.adoc +++ /dev/null @@ -1,80 +0,0 @@ -// Module included in the following assemblies: -// -// * storage/container_storage_interface/persistent-storage-csi-smb-cifs.adoc - -:_mod-docs-content-type: PROCEDURE -[id="persistent-storage-csi-smb-cifs-create-sc_{context}"] -= Creating a storage class for CIFS/SMB - -After installing the Operator, you should create a storage class for dynamic provisioning of Common Internet File System (CIFS) dialect/Server Message Block (SMB) protocol volumes. - -.Prerequisites -* You are logged in to the running {product-title} cluster. - -* You have SMB server installed and know the following information about the server: -** Hostname -** Share name -** Username and password - -.Procedure -To create a storage class: - -. Create a Secret for access to the Samba server using the following command with the following example YAML file: -+ -[source,cli] --- -$ oc create -f .yaml --- -+ -[source,yaml] -.Secret example YAML file --- -apiVersion: v1 -kind: Secret -metadata: - name: smbcreds <1> - namespace: samba-server <2> -stringData: - username: <3> - password: <4> --- -<1> Name of the Secret. -<2> Namespace for the Secret. -<3> Username for the Secret. -<4> Password for the Secret. - -. Create a storage class using the following command with the following example YAML file: -+ -[source,cli] --- -$ oc create -f .yaml --- -+ -[source,yaml] -.Storage class example YAML file --- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: samba -provisioner: smb.csi.k8s.io -parameters: - source: /// <1> - csi.storage.k8s.io/provisioner-secret-name: smbcreds <2> - csi.storage.k8s.io/provisioner-secret-namespace: samba-server <3> - csi.storage.k8s.io/node-stage-secret-name: smbcreds <2> - csi.storage.k8s.io/node-stage-secret-namespace: samba-server <3> -reclaimPolicy: Delete -volumeBindingMode: Immediate -mountOptions: - - dir_mode=0777 - - file_mode=0777 - - noperm - - mfsymlinks - - cache=strict - - noserverino --- -<1> The Samba server must be installed somewhere and reachable from the cluster with `` being the hostname for the Samba server and `` the path the server is configured to have among the exported shares. -<2> Name of Secret that was set in the previous step. -<3> Namespace for the Secret that was set in the previous step. - diff --git a/_unused_topics/pod-using-a-different-service-account.adoc b/_unused_topics/pod-using-a-different-service-account.adoc deleted file mode 100644 index d543c53d4388..000000000000 --- a/_unused_topics/pod-using-a-different-service-account.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * orphaned - -[id="pod-using-a-different-service-account_{context}"] -= Running a pod with a different service account - -You can run a pod with a service account other than the default: - -.Prerequisites - -* Install the `oc` command-line interface. -* Configure a service account. -* Create a DeploymentConfig. - -.Procedure - -. Edit the DeploymentConfig: -+ ----- -$ oc edit dc/ ----- - -. Add the `serviceAccount` and `serviceAccountName` parameters to the `spec` -field, and specify the service account that you want to use: -+ ----- -spec: - securityContext: {} - serviceAccount: - serviceAccountName: ----- diff --git a/_unused_topics/rbac-updating-policy-definitions.adoc b/_unused_topics/rbac-updating-policy-definitions.adoc deleted file mode 100644 index 1a2e45a62e90..000000000000 --- a/_unused_topics/rbac-updating-policy-definitions.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * orphaned - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -[id="updating-policy-definitions_{context}"] -= Updating policy definitions - -During a cluster upgrade, and on every restart of any master, the -default cluster roles are automatically reconciled to restore any missing permissions. - -If you customized default cluster roles and want to ensure a role reconciliation -does not modify them, you must take the following actions. - -.Procedure - -. Protect each role from reconciliation: -+ ----- -$ oc annotate clusterrole.rbac --overwrite rbac.authorization.kubernetes.io/autoupdate=false ----- -+ -[WARNING] -==== -You must manually update the roles that contain this setting to include any new -or required permissions after upgrading. -==== - -. Generate a default bootstrap policy template file: -+ ----- -$ oc adm create-bootstrap-policy-file --filename=policy.json ----- -+ -[NOTE] -==== -The contents of the file vary based on the {product-title} version, but the file -contains only the default policies. -==== - -. Update the *_policy.json_* file to include any cluster role customizations. - -. Use the policy file to automatically reconcile roles and role bindings that -are not reconcile protected: -+ ----- -$ oc auth reconcile -f policy.json ----- - -. Reconcile Security Context Constraints: -+ ----- -# oc adm policy reconcile-sccs \ - --additive-only=true \ - --confirm ----- -endif::[] diff --git a/_unused_topics/rollback-to-openshift-sdn.adoc b/_unused_topics/rollback-to-openshift-sdn.adoc deleted file mode 100644 index c10e063e7d8e..000000000000 --- a/_unused_topics/rollback-to-openshift-sdn.adoc +++ /dev/null @@ -1,19 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="rollback-to-openshift-sdn"] -= Rolling back to the OpenShift SDN network provider -include::_attributes/common-attributes.adoc[] -:context: rollback-to-openshift-sdn - -toc::[] - -As a cluster administrator, you can roll back to the OpenShift SDN network plugin from the OVN-Kubernetes network plugin using either the _offline_ migration method, or the _limited live_ migration method. This can only be done after the migration to the OVN-Kubernetes network plugin has successfully completed. - -[NOTE] -==== -* If you used the offline migration method to migrate to the OpenShift SDN network plugin from the OVN-Kubernetes network plugin, you should use the offline migration rollback method. -* If you used the limited live migration method to migrate to the OpenShift SDN network plugin from the OVN-Kubernetes network plugin, you should use the limited live migration rollback method. -==== - - -include::modules/nw-ovn-kubernetes-rollback.adoc[leveloffset=+1] -include::modules/nw-ovn-kubernetes-rollback-live.adoc[leveloffset=+1] diff --git a/_unused_topics/rosa-aws-understand.adoc b/_unused_topics/rosa-aws-understand.adoc deleted file mode 100644 index 851979c5507b..000000000000 --- a/_unused_topics/rosa-aws-understand.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_install_access_delete_clusters/rosa_getting_started_iam/rosa-aws-prereqs.adoc -// * rosa_planning/rosa-sts-aws-prereqs.adoc - -[id="rosa-aws-prereqs_{context}"] -= Deployment Prerequisites -To deploy {product-title} (ROSA) into your existing Amazon Web Services (AWS) account, Red Hat requires that several prerequisites are met. - -Red Hat recommends the use of AWS Organizations to manage multiple AWS accounts. The AWS Organizations, managed by the customer, host multiple AWS accounts. There is a root account in the organization that all accounts will refer to in the account hierarchy. - -It is a best practice for the ROSA cluster to be hosted in an AWS account within an AWS Organizational Unit. A service control policy (SCP) is created and applied to the AWS Organizational Unit that manages what services the AWS sub-accounts are permitted to access. The SCP applies only to available permissions within a single AWS account for all AWS sub-accounts within the Organizational Unit. It is also possible to apply a SCP to a single AWS account. All other accounts in the customer’s AWS Organizations are managed in whatever manner the customer requires. Red Hat Site Reliability Engineers (SRE) will not have any control over SCPs within AWS Organizations. - -//2023-09-22: this module is not applicable to the prerequisites content. \ No newline at end of file diff --git a/_unused_topics/rosa-basic-architecture-concepts.adoc b/_unused_topics/rosa-basic-architecture-concepts.adoc deleted file mode 100644 index 7debb4ecad46..000000000000 --- a/_unused_topics/rosa-basic-architecture-concepts.adoc +++ /dev/null @@ -1,12 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="rosa-basic-architecture-concepts"] -= Architecture concepts -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: rosa-basic-architecture-concepts -toc::[] - -Learn about OpenShift and container basic concepts used in {product-title} architecture. - -include::modules/rosa-openshift-concepts.adoc[leveloffset=+1] -include::modules/rosa-kubernetes-concept.adoc[leveloffset=+1] -include::modules/rosa-containers-concept.adoc[leveloffset=+1] diff --git a/_unused_topics/running-modified-installation.adoc b/_unused_topics/running-modified-installation.adoc deleted file mode 100644 index f2c75c4d0d68..000000000000 --- a/_unused_topics/running-modified-installation.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * TBD - -[id="running-modified-installation_{context}"] -= Running a modified {product-title} installation - -Running a default {product-title} {product-version} cluster is the best way to ensure that the {product-title} cluster you get will be easy to install, maintain, and upgrade going forward. However, because you may want to add to or change your {product-title} cluster, openshift-install offers several ways to modify the default installation or add to it later. These include: - -* Creating an install-config file: Changing the contents of the install-config file, to identify things like the cluster name and credentials, is fully supported. -* Creating ignition-config files: Viewing ignition-config files, which define how individual nodes are configured when they are first deployed, is fully supported. However, changing those files is not supported. -* Creating Kubernetes (manifests) and {product-title} (openshift) manifest files: You can view manifest files in the manifests and openshift directories to see how Kubernetes and {product-title} features are configured, respectively. Changing those files is not supported. - -Whether you want to change your {product-title} installation or simply gain a deeper understanding of the details of the installation process, the goal of this section is to step you through an {product-title} installation. Along the way, it covers: - -* The underlying activities that go on under the covers to bring up an {product-title} cluster -* Major components that are leveraged ({op-system}, Ignition, Terraform, and so on) -* Opportunities to customize the install process (install configs, Ignition configs, manifests, and so on) diff --git a/_unused_topics/security-context-constraints-restore-defaults.adoc b/_unused_topics/security-context-constraints-restore-defaults.adoc deleted file mode 100644 index 089b41a32468..000000000000 --- a/_unused_topics/security-context-constraints-restore-defaults.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * orphaned - -[id="security-context-constraints-restore-defaults_{context}"] -= Restoring the default Security Context Constraints - -If the default Security Context Constraints (SCCs) are not present when the -master restarts, they are created again. To reset SCCs to the default values or -update existing SCCs to new default definitions after an upgrade you can either: - -. Delete any SCC you want to reset and restart the master. -. Use the `oc adm policy reconcile-sccs` command. - -The `oc adm policy reconcile-sccs` command sets all SCC policies to the default -values but retains any additional users, groups, labels, annotations, and -priorities you set. - -To view which SCCs will be changed, you can run the command with no options or -by specifying your preferred output with the `-o ` option. - -After reviewing it is recommended that you back up your existing SCCs and then -use the `--confirm` option to persist the data. - -[NOTE] -==== -If you want to reset priorities and grants, use the `--additive-only=false` option. -==== - -[NOTE] -==== -If you customized settings other than priority, users, groups, labels, or annotations in an -SCC, you lose those settings when you reconcile. -==== diff --git a/_unused_topics/security-overview.adoc b/_unused_topics/security-overview.adoc deleted file mode 100644 index 4e9ea7cfc726..000000000000 --- a/_unused_topics/security-overview.adoc +++ /dev/null @@ -1,125 +0,0 @@ -// Module included in the following assemblies: -// -// * orphaned - -[id="security-overview_{context}"] -= Security in {product-title} - -The {product-title} and Kubernetes APIs authenticate users who present -credentials and then authorizes them based on their role. Both developers and -administrators can be authenticated through a number of means, primarily -OAuth tokens and X.509 client certificates. OAuth tokens are signed with JSON -Web Algorithm _RS256_, which is RSA signature algorithm PKCS#1 v1.5 with SHA-256. - -Developers, the clients of the system, typically make REST API calls from a -client program like `oc` or to the {product-title} web console through their browser. -Both methods use OAuth bearer tokens for most communication. Infrastructure components. -like nodes, use client certificates that are generated by the system that contain their -identities. Infrastructure components that run in containers use a token that is -associated with their service account to connect to the API. - -Authorization is handled in the {product-title} policy engine, which defines -actions like `create pod` or `list services`, and groups them into roles in a -policy document. Roles are bound to users or groups by the user or group -identifier. When a user or service account attempts an action, the policy engine -checks for one or more of the roles assigned to the user, such as a cluster -administrator or administrator of the current project, before allowing it to -continue. - -ifdef::openshift-origin,openshift-online,openshift-enterprise,openshift-webscale[] -Since every container that runs on the cluster is associated with a service -account, it is also possible to associate secrets to those service accounts and have them -automatically delivered into the container. This secret delivery enables the infrastructure to -manage secrets for pulling and pushing images, builds, and the deployment -components and also allows application code to use those secrets. -endif::[] - -[id="architecture-overview-tls-support_{context}"] -== TLS Support - -All communication channels with the REST API, as well as between master -components such as etcd and the API server, are secured with TLS. TLS provides -strong encryption, data integrity, and authentication of servers with X.509 -server certificates and public key infrastructure. -ifdef::openshift-origin,openshift-enterprise[] -By default, a new internal PKI is created for each deployment of -{product-title}. The internal PKI uses 2048 bit RSA keys and SHA-256 signatures. -endif::[] -ifdef::openshift-origin,openshift-enterprise,openshift-webscale[] -Custom certificates for public hosts are supported as well. -endif::[] - -{product-title} uses Golang’s standard library implementation of -link:https://golang.org/pkg/crypto/tls/[*crypto/tls*] and does not depend on any -external crypto and TLS libraries. Additionally, the client depends on external -libraries for GSSAPI authentication and OpenPGP signatures. GSSAPI is typically -provided by either MIT Kerberos or Heimdal Kerberos, which both use OpenSSL's -libcrypto. OpenPGP signature verification is handled by libgpgme and GnuPG. - -The insecure versions SSL 2.0 and SSL 3.0 are unsupported and not available. The -{product-title} server and `oc` client only provide TLS 1.2 by default. TLS 1.0 -and TLS 1.1 can be enabled in the server configuration. Both server and client -prefer modern cipher suites with authenticated encryption algorithms and perfect -forward secrecy. Cipher suites with deprecated and insecure algorithms such as -RC4, 3DES, and MD5 are disabled. Some internal clients, like LDAP -authentication, have less restrict settings with TLS 1.0 to 1.2 and more cipher -suites enabled. - -.Supported TLS Versions -[cols="4*", options="header"] -|=== -|TLS Version -|{product-title} Server -|`oc` Client -|Other Clients - -|SSL 2.0 -|Unsupported -|Unsupported -|Unsupported - -|SSL 3.0 -|Unsupported -|Unsupported -|Unsupported - -|TLS 1.0 -|No footnoteref:[tlsconfig,Disabled by default, but can be enabled in the server configuration.] -|No footnoteref:[tlsconfig] -|Maybe footnoteref:[otherclient,Some internal clients, such as the LDAP client.] - -|TLS 1.1 -|No footnoteref:[tlsconfig] -|No footnoteref:[tlsconfig] -|Maybe footnoteref:[otherclient] - -|TLS 1.2 -|*Yes* -|*Yes* -|*Yes* - -|TLS 1.3 -|N/A footnoteref:[tls13,TLS 1.3 is still under development.] -|N/A footnoteref:[tls13] -|N/A footnoteref:[tls13] -|=== - -The following list of enabled cipher suites of {product-title}'s server and `oc` -client are sorted in preferred order: - -- `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305` -- `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305` -- `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256` -- `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256` -- `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384` -- `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384` -- `TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256` -- `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256` -- `TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA` -- `TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA` -- `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA` -- `TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA` -- `TLS_RSA_WITH_AES_128_GCM_SHA256` -- `TLS_RSA_WITH_AES_256_GCM_SHA384` -- `TLS_RSA_WITH_AES_128_CBC_SHA` -- `TLS_RSA_WITH_AES_256_CBC_SHA` diff --git a/_unused_topics/serverless-creating-kubeconfig-file.adoc b/_unused_topics/serverless-creating-kubeconfig-file.adoc deleted file mode 100644 index cecdb2fbba4f..000000000000 --- a/_unused_topics/serverless-creating-kubeconfig-file.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module is included in the following assemblies: -// -// serverless/knative-client.adoc - -[id="create-kubeconfig-file_{contect}"] -= Creating a `kubeconfig` file - -Use `kubeconfig` files to organize information about clusters, users, namespaces, and authentication mechanisms. The CLI tool uses `kubeconfig` files to communicate with the API server of a cluster. - -.Procedure -* Create a basic `kubeconfig` file from client certificates. Use the following command: - ----- -$ oc adm create-kubeconfig \ - --client-certificate=/path/to/client.crt \ - --client-key=/path/to/client.key \ - --certificate-authority=/path/to/ca.crt ----- \ No newline at end of file diff --git a/_unused_topics/serverless-rn-template-module.adoc b/_unused_topics/serverless-rn-template-module.adoc deleted file mode 100644 index 4c8ff63b2eda..000000000000 --- a/_unused_topics/serverless-rn-template-module.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies -// -// * /serverless/serverless-release-notes.adoc - -:_mod-docs-content-type: REFERENCE -[id="serverless-rn-_{context}"] -= Release notes for Red Hat {ServerlessProductName} -// add a version, e.g. 1.20.0 -//update the to match the filename and IDs, then remove these comments - -{ServerlessProductName} is now available. New features, changes, and known issues that pertain to {ServerlessProductName} on {product-title} are included in this topic. - -[id="new-features-_{context}"] -== New features -// add a version, e.g. 1-20-0 - -* {ServerlessProductName} now uses Knative Serving 0.x. -* {ServerlessProductName} now uses Knative Eventing 0.x. -* {ServerlessProductName} now uses Kourier 0.x. -* {ServerlessProductName} now uses Knative (`kn`) CLI 0.x. -* {ServerlessProductName} now uses Knative Kafka 0.x. -* The `kn func` CLI plug-in now uses `func` 0.x. - -[id="fixed-issues-_{context}"] -== Fixed issues -// add a version, e.g. 1-20-0 - -[id="known-issues-_{context}"] -== Known issues -// add a version, e.g. 1-20-0 diff --git a/_unused_topics/service-accounts-adding-secrets.adoc b/_unused_topics/service-accounts-adding-secrets.adoc deleted file mode 100644 index 11d925ea62c7..000000000000 --- a/_unused_topics/service-accounts-adding-secrets.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/using-service-accounts.adoc - -[id="service-accounts-managing-secrets_{context}"] -== Managing secrets on a service account's pod - -In addition to providing API credentials, a pod's service account determines -which secrets the pod is allowed to use. - -Pods use secrets in two ways: - -* image pull secrets, providing credentials used to pull images for the pod's containers -* mountable secrets, injecting the contents of secrets into containers as files - -To allow a secret to be used as an image pull secret by a service account's -pods, run: - ----- -$ oc secrets link --for=pull ----- - -To allow a secret to be mounted by a service account's pods, run: - ----- -$ oc secrets link --for=mount ----- - -[NOTE] -==== -Limiting secrets to only the service accounts that reference them is disabled by -default. This means that if `serviceAccountConfig.limitSecretReferences` is set -to `false` (the default setting) in the master configuration file, mounting -secrets to a service account's pods with the `--for=mount` option is not -required. However, using the `--for=pull` option to enable using an image pull -secret is required, regardless of the -`serviceAccountConfig.limitSecretReferences` value. -==== - -This example creates and adds secrets to a service account: - ----- -$ oc create secret generic secret-plans \ - --from-file=plan1.txt \ - --from-file=plan2.txt -secret/secret-plans - -$ oc create secret docker-registry my-pull-secret \ - --docker-username=mastermind \ - --docker-password=12345 \ - --docker-email=mastermind@example.com -secret/my-pull-secret - -$ oc secrets link robot secret-plans --for=mount - -$ oc secrets link robot my-pull-secret --for=pull - -$ oc describe serviceaccount robot -Name: robot -Labels: -Image pull secrets: robot-dockercfg-624cx - my-pull-secret - -Mountable secrets: robot-token-uzkbh - robot-dockercfg-624cx - secret-plans - -Tokens: robot-token-8bhpp - robot-token-uzkbh ----- diff --git a/_unused_topics/service-accounts-managing-secrets.adoc b/_unused_topics/service-accounts-managing-secrets.adoc deleted file mode 100644 index cae0fb9bf790..000000000000 --- a/_unused_topics/service-accounts-managing-secrets.adoc +++ /dev/null @@ -1,65 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/using-service-accounts.adoc - -[id="service-accounts-managing-secrets_{context}"] -= Managing allowed secrets - -You can use the service account's secrets in your application's pods for: - -* Image pull secrets, providing credentials used to pull images for the pod's containers -* Mountable secrets, injecting the contents of secrets into containers as files - -.Procedure - -. Create a secret: -+ ----- -$ oc create secret generic \ - --from-file=.txt - -secret/ ----- - -. To allow a secret to be used as an image pull secret by a service account's -pods, run: -+ ----- -$ oc secrets link --for=pull ----- - -. To allow a secret to be mounted by a service account's pods, run: -+ ----- -$ oc secrets link --for=mount ----- - -. Confirm that the secret was added to the service account: -+ ----- -$ oc describe serviceaccount -Name: -Labels: -Image pull secrets: robot-dockercfg-624cx - my-pull-secret - -Mountable secrets: robot-token-uzkbh - robot-dockercfg-624cx - secret-plans - -Tokens: robot-token-8bhpp - robot-token-uzkbh ----- - -//// -[NOTE] -==== -Limiting secrets to only the service accounts that reference them is disabled by -default. This means that if `serviceAccountConfig.limitSecretReferences` is set -to `false` (the default setting) in the master configuration file, mounting -secrets to a service account's pods with the `--for=mount` option is not -required. However, using the `--for=pull` option to enable using an image pull -secret is required, regardless of the -`serviceAccountConfig.limitSecretReferences` value. -==== -//// diff --git a/_unused_topics/sts-mode-installing-manual-config.adoc b/_unused_topics/sts-mode-installing-manual-config.adoc deleted file mode 100644 index 134960c50507..000000000000 --- a/_unused_topics/sts-mode-installing-manual-config.adoc +++ /dev/null @@ -1,413 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc - -[id="sts-mode-installing-manual-config_{context}"] -= Creating AWS resources manually - -To install an {product-title} cluster that is configured to use the CCO in manual mode with STS, you must first manually create the required AWS resources. - -.Procedure - -. Generate a private key to sign the `ServiceAccount` object: -+ -[source,terminal] ----- -$ openssl genrsa -out sa-signer 4096 ----- - -. Generate a `ServiceAccount` object public key: -+ -[source,terminal] ----- -$ openssl rsa -in sa-signer -pubout -out sa-signer.pub ----- - -. Create an S3 bucket to hold the OIDC configuration: -+ -[source,terminal] ----- -$ aws s3api create-bucket --bucket --region --create-bucket-configuration LocationConstraint= ----- -+ -[NOTE] -==== -If the value of `` is `us-east-1`, do not specify the `LocationConstraint` parameter. -==== - -. Retain the S3 bucket URL: -+ -[source,terminal] ----- -OPENID_BUCKET_URL="https://.s3..amazonaws.com" ----- - -. Build an OIDC configuration: - -.. Create a file named `keys.json` that contains the following information: -+ -[source,json] ----- -{ - "keys": [ - { - "use": "sig", - "kty": "RSA", - "kid": "", - "alg": "RS256", - "n": "", - "e": "" - } - ] -} ----- -+ -where: - -*** `` is generated from the public key with: -+ -[source,terminal] ----- -$ openssl rsa -in sa-signer.pub -pubin --outform DER | openssl dgst -binary -sha256 | openssl base64 | tr '/+' '_-' | tr -d '=' ----- -+ -This command converts the public key to DER format, performs a SHA-256 checksum on the binary representation, encodes the data with base64 encoding, and then changes the base64-encoded output to base64URL encoding. - -*** `` is generated from the public key with: -+ -[source,terminal] ----- -$ openssl rsa -pubin -in sa-signer.pub -modulus -noout | sed -e 's/Modulus=//' | xxd -r -p | base64 -w0 | tr '/+' '_-' | tr -d '=' ----- -+ -This command prints the modulus of the public key, extracts the hex representation of the modulus, converts the ASCII hex to binary, encodes the data with base64 encoding, and then changes the base64-encoded output to base64URL encoding. - -*** `` is generated from the public key with: -+ -[source,terminal] ----- -$ printf "%016x" $(openssl rsa -pubin -in sa-signer.pub -noout -text | grep Exponent | awk '{ print $2 }') | awk '{ sub(/(00)+/, "", $1); print $1 }' | xxd -r -p | base64 -w0 | tr '/+' '_-' | tr -d '=' ----- -+ -This command extracts the decimal representation of the public key exponent, prints it as hex with a padded `0` if needed, removes leading `00` pairs, converts the ASCII hex to binary, encodes the data with base64 encoding, and then changes the base64-encoded output to use only characters that can be used in a URL. - -.. Create a file named `openid-configuration` that contains the following information: -+ -[source,json] ----- -{ - "issuer": "$OPENID_BUCKET_URL", - "jwks_uri": "${OPENID_BUCKET_URL}/keys.json", - "response_types_supported": [ - "id_token" - ], - "subject_types_supported": [ - "public" - ], - "id_token_signing_alg_values_supported": [ - "RS256" - ], - "claims_supported": [ - "aud", - "exp", - "sub", - "iat", - "iss", - "sub" - ] -} ----- - -. Upload the OIDC configuration: -+ -[source,terminal] ----- -$ aws s3api put-object --bucket --key keys.json --body ./keys.json ----- -+ -[source,terminal] ----- -$ aws s3api put-object --bucket --key '.well-known/openid-configuration' --body ./openid-configuration ----- -+ -Where `` is the S3 bucket that was created to hold the OIDC configuration. - -. Allow the AWS IAM OpenID Connect (OIDC) identity provider to read these files: -+ -[source,terminal] ----- -$ aws s3api put-object-acl --bucket --key keys.json --acl public-read ----- -+ -[source,terminal] ----- -$ aws s3api put-object-acl --bucket --key '.well-known/openid-configuration' --acl public-read ----- - -. Create an AWS IAM OIDC identity provider: - -.. Get the certificate chain from the server that hosts the OIDC configuration: -+ -[source,terminal] ----- -$ echo | openssl s_client -servername $.s3.$.amazonaws.com -connect $.s3.$.amazonaws.com:443 -showcerts 2>/dev/null | awk '/BEGIN/,/END/{ if(/BEGIN/){a++}; out="cert"a".pem"; print >out}' ----- - -.. Calculate the fingerprint for the certificate at the root of the chain: -+ -[source,terminal] ----- -$ export BUCKET_FINGERPRINT=$(openssl x509 -in cert.pem -fingerprint -noout | sed -e 's/.*Fingerprint=//' -e 's/://g') ----- -+ -Where `` is the highest number in the files that were saved. For example, if `2` is the highest number in the files that were saved, use `cert2.pem`. - -.. Create the identity provider: -+ -[source,terminal] ----- -$ aws iam create-open-id-connect-provider --url $OPENID_BUCKET_URL --thumbprint-list $BUCKET_FINGERPRINT --client-id-list openshift sts.amazonaws.com ----- - -.. Retain the returned ARN of the newly created identity provider. This ARN is later referred to as ``. - -. Generate IAM roles: - -.. Locate all `CredentialsRequest` CRs in this release image that target the cloud you are deploying on: -+ -[source,terminal] ----- -$ oc adm release extract quay.io/openshift-release-dev/ocp-release:4..-x86_64 --credentials-requests --cloud=aws ----- -+ -Where `` and `` are the numbers corresponding to the version of {product-title} you are installing. - -.. For each `CredentialsRequest` CR, create an IAM role of type `Web identity` using the previously created IAM Identity Provider that grants the necessary permissions and establishes a trust relationship that trusts the identity provider previously created. -+ -For example, for the openshift-machine-api-operator `CredentialsRequest` CR in `0000_30_machine-api-operator_00_credentials-request.yaml`, create an IAM role that allows an identity from the created OIDC provider created for the cluster, similar to the following: -+ -[source,json] ----- -{ - "Role": { - "Path": "/", - "RoleName": "openshift-machine-api-aws-cloud-credentials", - "RoleId": "ARSOMEROLEID", - "Arn": "arn:aws:iam::123456789012:role/openshift-machine-api-aws-cloud-credentials", - "CreateDate": "2021-01-06T15:54:13Z", - "AssumeRolePolicyDocument": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringEquals": { - ".s3..amazonaws.com/$BUCKET_NAME:aud": "openshift" - } - } - } - ] - }, - "Description": "OpenShift role for openshift-machine-api/aws-cloud-credentials", - "MaxSessionDuration": 3600, - "RoleLastUsed": { - "LastUsedDate": "2021-02-03T02:51:24Z", - "Region": "" - } - } -} ----- -+ -Where `` is the returned ARN of the newly created identity provider. - -.. To further restrict the role such that only specific cluster `ServiceAccount` objects can assume the role, modify the trust relationship of each role by updating the `.Role.AssumeRolePolicyDocument.Statement[].Condition` field to the specific `ServiceAccount` objects for each component. - -*** Modify the trust relationship of the `cluster-image-registry-operator` role to have the following condition: -+ -[source,json] ----- -"Condition": { - "StringEquals": { - ".s3..amazonaws.com:sub": [ - "system:serviceaccount:openshift-image-registry:registry", - "system:serviceaccount:openshift-image-registry:cluster-image-registry-operator" - ] - } -} ----- - -*** Modify the trust relationship of the `openshift-ingress-operator` to have the following condition: -+ -[source,json] ----- -"Condition": { - "StringEquals": { - ".s3..amazonaws.com:sub": [ - "system:serviceaccount:openshift-ingress-operator:ingress-operator" - ] - } -} ----- - -*** Modify the trust relationship of the `openshift-cluster-csi-drivers` to have the following condition: -+ -[source,json] ----- -"Condition": { - "StringEquals": { - ".s3..amazonaws.com:sub": [ - "system:serviceaccount:openshift-cluster-csi-drivers:aws-ebs-csi-driver-operator", - "system:serviceaccount:openshift-cluster-csi-drivers:aws-ebs-csi-driver-controller-sa" - ] - } -} ----- - -*** Modify the trust relationship of the `openshift-machine-api` to have the following condition: -+ -[source,json] ----- -"Condition": { - "StringEquals": { - ".s3..amazonaws.com:sub": [ - "system:serviceaccount:openshift-machine-api:machine-api-controllers" - ] - } -} ----- - -. For each IAM role, attach an IAM policy to the role that reflects the required permissions from the corresponding `CredentialsRequest` objects. -+ -For example, for `openshift-machine-api`, attach an IAM policy similar to the following: -+ -[source,json] ----- -{ - "RoleName": "openshift-machine-api-aws-cloud-credentials", - "PolicyName": "openshift-machine-api-aws-cloud-credentials", - "PolicyDocument": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "ec2:CreateTags", - "ec2:DescribeAvailabilityZones", - "ec2:DescribeDhcpOptions", - "ec2:DescribeImages", - "ec2:DescribeInstances", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeVpcs", - "ec2:RunInstances", - "ec2:TerminateInstances", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:RegisterTargets", - "iam:PassRole", - "iam:CreateServiceLinkedRole" - ], - "Resource": "*" - }, - { - "Effect": "Allow", - "Action": [ - "kms:Decrypt", - "kms:Encrypt", - "kms:GenerateDataKey", - "kms:GenerateDataKeyWithoutPlainText", - "kms:DescribeKey" - ], - "Resource": "*" - }, - { - "Effect": "Allow", - "Action": [ - "kms:RevokeGrant", - "kms:CreateGrant", - "kms:ListGrants" - ], - "Resource": "*", - "Condition": { - "Bool": { - "kms:GrantIsForAWSResource": true - } - } - } - ] - } -} ----- -. Prepare to run the {product-title} installer: - -.. Create the `install-config.yaml` file: -+ -[source,terminal] ----- -$ ./openshift-install create install-config ----- - -.. Configure the cluster to install with the CCO in manual mode: -+ -[source,terminal] ----- -$ echo "credentialsMode: Manual" >> install-config.yaml ----- - -.. Create install manifests: -+ -[source,terminal] ----- -$ ./openshift-install create manifests ----- - -.. Create a `tls` directory, and copy the private key generated previously there: -+ -[NOTE] -==== -The target file name must be `./tls/bound-service-account-signing-key.key`. -==== -+ -[source,terminal] ----- -$ mkdir tls ; cp ./tls/bound-service-account-signing-key.key ----- - -.. Create a custom `Authentication` CR with the file name `cluster-authentication-02-config.yaml`: -+ -[source,terminal] ----- -$ cat << EOF > manifests/cluster-authentication-02-config.yaml -apiVersion: config.openshift.io/v1 -kind: Authentication -metadata: - name: cluster -spec: - serviceAccountIssuer: $OPENID_BUCKET_URL -EOF ----- - -.. For each `CredentialsRequest` CR that is extracted from the release image, create a secret with the target namespace and target name that is indicated in each `CredentialsRequest`, substituting the AWS IAM role ARN created previously for each component: -+ -.Example secret manifest for `openshift-machine-api` -+ -[source,terminal] ----- -$ cat manifests/openshift-machine-api-aws-cloud-credentials-credentials.yaml -apiVersion: v1 -stringData: - credentials: |- - [default] - role_arn = arn:aws:iam::123456789012:role/openshift-machine-api-aws-cloud-credentials - web_identity_token_file = /var/run/secrets/openshift/serviceaccount/token -kind: Secret -metadata: - name: aws-cloud-credentials - namespace: openshift-machine-api -type: Opaque ----- diff --git a/_unused_topics/sts-mode-installing-verifying.adoc b/_unused_topics/sts-mode-installing-verifying.adoc deleted file mode 100644 index 20b17301c6bf..000000000000 --- a/_unused_topics/sts-mode-installing-verifying.adoc +++ /dev/null @@ -1,108 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc -// * authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc - -ifeval::["{context}" == "cco-mode-sts"] -:aws-sts: -endif::[] -ifeval::["{context}" == "cco-mode-gcp-workload-identity"] -:google-cloud-platform: -endif::[] - -[id="sts-mode-installing-verifying_{context}"] -= Verifying the installation - -. Connect to the {product-title} cluster. - -. Verify that the cluster does not have `root` credentials: -+ -ifdef::aws-sts[] -[source,terminal] ----- -$ oc get secrets -n kube-system aws-creds ----- -endif::aws-sts[] -ifdef::google-cloud-platform[] -[source,terminal] ----- -$ oc get secrets -n kube-system gcp-credentials ----- -endif::google-cloud-platform[] -+ -The output should look similar to: -+ -ifdef::aws-sts[] -[source,terminal] ----- -Error from server (NotFound): secrets "aws-creds" not found ----- -endif::aws-sts[] -ifdef::google-cloud-platform[] -[source,terminal] ----- -Error from server (NotFound): secrets "gcp-credentials" not found ----- -endif::google-cloud-platform[] - -. Verify that the components are assuming the -ifdef::aws-sts[] -IAM roles -endif::aws-sts[] -ifdef::google-cloud-platform[] -service accounts -endif::google-cloud-platform[] -that are specified in the secret manifests, instead of using credentials that are created by the CCO: -+ -.Example command with the Image Registry Operator -ifdef::aws-sts[] -[source,terminal] ----- -$ oc get secrets -n openshift-image-registry installer-cloud-credentials -o json | jq -r .data.credentials | base64 --decode ----- -endif::aws-sts[] -ifdef::google-cloud-platform[] -[source,terminal] ----- -$ oc get secrets -n openshift-image-registry installer-cloud-credentials -o json | jq -r '.data."service_account.json"' | base64 -d ----- -endif::google-cloud-platform[] -+ -The output should show the role and web identity token that are used by the component and look similar to: -+ -.Example output with the Image Registry Operator -ifdef::aws-sts[] -[source,terminal] ----- -[default] -role_arn = arn:aws:iam::123456789:role/openshift-image-registry-installer-cloud-credentials -web_identity_token_file = /var/run/secrets/openshift/serviceaccount/token ----- -endif::aws-sts[] -ifdef::google-cloud-platform[] -[source,json] ----- -{ - "type": "external_account", <1> - "audience": "//iam.googleapis.com/projects/123456789/locations/global/workloadIdentityPools/test-pool/providers/test-provider", - "subject_token_type": "urn:ietf:params:oauth:token-type:jwt", - "token_url": "https://sts.googleapis.com/v1/token", - "service_account_impersonation_url": "https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/:generateAccessToken", <2> - "credential_source": { - "file": "/var/run/secrets/openshift/serviceaccount/token", - "format": { - "type": "text" - } - } -} ----- -<1> The credential type is `external_account`. -<2> The resource URL of the service account used by the Image Registry Operator. -endif::google-cloud-platform[] - -ifeval::["{context}" == "cco-mode-sts"] -:!aws-sts: -endif::[] -ifeval::["{context}" == "cco-mode-gcp-workload-identity"] -:!google-cloud-platform: -endif::[] diff --git a/_unused_topics/understanding-installation.adoc b/_unused_topics/understanding-installation.adoc deleted file mode 100644 index dbd19c82853d..000000000000 --- a/_unused_topics/understanding-installation.adoc +++ /dev/null @@ -1,8 +0,0 @@ -// Module included in the following assemblies: -// -// * TBD - -[id="understanding-installation_{context}"] -= Understanding {product-title} installation - -{product-title} installation is designed to quickly spin up an {product-title} cluster, with the user starting the cluster required to provide as little information as possible. diff --git a/_unused_topics/understanding-workers-masters.adoc b/_unused_topics/understanding-workers-masters.adoc deleted file mode 100644 index b0028c61b6b4..000000000000 --- a/_unused_topics/understanding-workers-masters.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * - -[id="understanding-workers-masters_{context}"] -= Understanding {product-title} workers and masters - -With installation complete, the cluster is now fully in charge of managing itself. Management of worker (compute) and master (control plane) nodes is done from within the cluster. So, before moving on to what the {product-title} cluster does to help you develop and deploy applications, you should explore how an {product-title} cluster manages itself. For that, we focus on three things; workers, masters (the control plane) and Operators. - -To see which workers and masters are running on your cluster, type: - ----- -$ oc get nodes - -NAME STATUS ROLES AGE VERSION -ip-10-0-0-1.us-east-2.compute.internal Ready worker 4h20m v1.25.0 -ip-10-0-0-2.us-east-2.compute.internal Ready master 4h39m v1.25.0 -ip-10-0-0.3.us-east-2.compute.internal Ready worker 4h20m v1.25.0 -ip-10-0-0-4.us-east-2.compute.internal Ready master 4h39m v1.25.0 -ip-10-0-0-5.us-east-2.compute.internal Ready master 4h39m v1.25.0 -ip-10-0-0-6.us-east-2.compute.internal Ready worker 4h20m v1.25.0 ----- - -To see more information about internal and external IP addresses, the type of operating system ({op-system}), kernel version, and container runtime (CRI-O), add the `-o wide` option. - ----- -$ oc get nodes -o wide - -NAME                                       STATUS ROLES  AGE  VERSION  INTERNAL-IP   EXTERNAL-IP  OS-IMAGE             KERNEL-VERSION             CONTAINER-RUNTIME -ip-10-0-134-252.us-east-2.compute.internal Ready worker 17h v1.25.0 10.0.134.252 Red Hat CoreOS 4.0 3.10.0-957.5.1.el7.x86_64 cri-o://1.25.0-1.rhaos4.0.git2f0cb0d.el7 - -.... ----- diff --git a/_unused_topics/upgrade-cluster-version-definition.adoc b/_unused_topics/upgrade-cluster-version-definition.adoc deleted file mode 100644 index 15f39773dd38..000000000000 --- a/_unused_topics/upgrade-cluster-version-definition.adoc +++ /dev/null @@ -1,78 +0,0 @@ -// Module included in the following assemblies: -// -// * none - -[id="upgrade-cluster-version-definition_{context}"] -= ClusterVersion definition - -You can review the `ClusterVersion` definition to see the update history -for your cluster. You can also apply overrides to this definition if your -cluster is not for production or during debugging. - -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: ClusterVersion -metadata: - creationTimestamp: 2019-03-22T14:26:41Z - generation: 1 - name: version - resourceVersion: "16740" - selfLink: /apis/config.openshift.io/v1/clusterversions/version - uid: 82f9f2c4-4cae-11e9-90b7-06dc0f62ad38 -spec: - channel: stable-4.3 <1> - overrides: "" <2> - clusterID: 0b1cf91f-c3fb-4f9e-aa02-e0d70c71f6e6 - status: <3> - availableUpdates: null <4> - conditions: <5> - - lastTransitionTime: 2019-05-22T07:13:26Z - status: "True" - type: RetrievedUpdates - - lastTransitionTime: 2019-05-22T07:13:26Z - message: Done applying 4.0.0-0.alpha-2019-03-22-124110 - status: "True" - type: Available - - lastTransitionTime: 2019-05-22T07:12:26Z - status: "False" - type: Failing - - lastTransitionTime: 2019-05-22T07:13:26Z - message: Cluster version is 4.0.0-0.alpha-2019-03-22-124110 - status: "False" - type: Progressing ----- -<1> Specify the channel to use to apply non-standard updates to the -cluster. If you do not change the value, the CVO uses the default channel. -+ -[IMPORTANT] -==== -The default channel contains stable updates. Do not modify the -`ClusterVersionSpec.channel` value on production clusters. If you update your -cluster from a different channel without explicit direction from Red Hat -support, your cluster is no longer supported. -==== -<2> A list of overrides for components that the CVO manages. Mark -components as `unmanaged` to prevent the CVO from creating or updating the object. -+ -[IMPORTANT] -==== -Set the `ClusterVersionSpec.overrides` parameter value only during cluster -debugging. Setting this value can prevent successful upgrades and is not -supported for production clusters. -==== -<3> The status of available updates and any in-progress updates. These values display -the version that the cluster is reconciling to, and the conditions -array reports whether the update succeeded, is in progress, or is failing. -All of the `ClusterVersionStatus` values are set by the cluster itself, and you -cannot modify them. -<4> The list of appropriate updates for the cluster. This list is empty if no -updates are recommended, the update service is unavailable, or you specified -an invalid channel. -<5> The condition of the CVO. This section contains both the reason that the -cluster entered its current condition and a message that provides more -information about the condition. - -* `Available` means that the upgrade to the `desiredUpdate` value completed. -* `Progressing` means that an upgrade is in progress. -* `Failing` means that an update is blocked by a temporary or permanent error. diff --git a/_unused_topics/using-images-source-to-image-java.adoc b/_unused_topics/using-images-source-to-image-java.adoc deleted file mode 100644 index 06933b38c65f..000000000000 --- a/_unused_topics/using-images-source-to-image-java.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="using-images-source-to-image"] -= Java -include::_attributes/common-attributes.adoc[] -:context: using-images-source-to-image -toc::[] - -This topic includes information on the source-to-image (S2I) supported Java images available for {product-title} users. - -//Add link to Build -> S21 following updates - -include::modules/images-using-images-s2i-java.adoc[leveloffset=+1] -include::modules/images-s2i-java-pulling-images.adoc[leveloffset=+1] -include::modules/images-s2i-build-process-overview.adoc[leveloffset=+1] -include::modules/images-s2i-java-configuration.adoc[leveloffset=+1] -include::modules/images-s2i-java-build-deploy-applications.adoc[leveloffset=+1] diff --git a/_unused_topics/using-images-source-to-image-nodejs.adoc b/_unused_topics/using-images-source-to-image-nodejs.adoc deleted file mode 100644 index 5b176a926821..000000000000 --- a/_unused_topics/using-images-source-to-image-nodejs.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="using-images-source-to-image-nodejs"] -= Node.js -include::_attributes/common-attributes.adoc[] -:context: using-images-source-to-image-nodejs -toc::[] - -This topic includes information on the source-to-image (S2I) supported Node.js images available for {product-title} users. - -//Add link to Build -> S21 following updates - -include::modules/images-using-images-s2i-nodejs.adoc[leveloffset=+1] -include::modules/images-s2i-nodejs-pulling-images.adoc[leveloffset=+1] -include::modules/images-s2i-build-process-overview.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-nodejs-configuration.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-nodejs-hot-deploying.adoc[leveloffset=+1] diff --git a/_unused_topics/using-images-source-to-image-perl.adoc b/_unused_topics/using-images-source-to-image-perl.adoc deleted file mode 100644 index f49d044ab927..000000000000 --- a/_unused_topics/using-images-source-to-image-perl.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="using-images-source-to-image-perl"] -= Perl -include::_attributes/common-attributes.adoc[] -:context: using-images-source-to-image-perl -toc::[] - -This topic includes information on the source-to-image (S2I) supported Perl images available for {product-title} users. - -//Add link to Build -> S21 following updates - -include::modules/images-using-images-s2i-perl.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-perl-pulling-images.adoc[leveloffset=+1] -include::modules/images-s2i-build-process-overview.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-perl-configuration.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-perl-hot-deploying.adoc[leveloffset=+1] diff --git a/_unused_topics/using-images-source-to-image-php.adoc b/_unused_topics/using-images-source-to-image-php.adoc deleted file mode 100644 index 275223464506..000000000000 --- a/_unused_topics/using-images-source-to-image-php.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="using-images-source-to-image-php"] -= PHP -include::_attributes/common-attributes.adoc[] -:context: using-images-source-to-image-php -toc::[] - -This topic includes information on the source-to-image (S2I) supported PHP images available for {product-title} users. - -//Add link to Build -> S21 following updates - -include::modules/images-using-images-s2i-php.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-php-pulling-images.adoc[leveloffset=+1] -include::modules/images-s2i-build-process-overview.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-php-configuration.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-php-hot-deploying.adoc[leveloffset=+1] diff --git a/_unused_topics/using-images-source-to-image-python.adoc b/_unused_topics/using-images-source-to-image-python.adoc deleted file mode 100644 index f72452a4b3e7..000000000000 --- a/_unused_topics/using-images-source-to-image-python.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="using-images-source-to-image-python"] -= Python -include::_attributes/common-attributes.adoc[] -:context: using-images-source-to-image-python -toc::[] - -This topic includes information on the source-to-image (S2I) supported Python images available for {product-title} users. - -//Add link to Build -> S21 following updates - -include::modules/images-using-images-s2i-python.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-python-pulling-images.adoc[leveloffset=+1] -include::modules/images-s2i-build-process-overview.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-python-configuration.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-python-hot-deploying.adoc[leveloffset=+1] diff --git a/_unused_topics/using-images-source-to-image-ruby.adoc b/_unused_topics/using-images-source-to-image-ruby.adoc deleted file mode 100644 index b96681837cc7..000000000000 --- a/_unused_topics/using-images-source-to-image-ruby.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="using-images-source-to-image-ruby"] -= Ruby -include::_attributes/common-attributes.adoc[] -:context: using-images-source-to-image-ruby -toc::[] - -This topic includes information on the source-to-image (S2I) supported Ruby images available for {product-title} users. - -//Add link to Build -> S21 following updates - -include::modules/images-using-images-s2i-ruby.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-ruby-pulling-images.adoc[leveloffset=+1] -include::modules/images-s2i-build-process-overview.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-ruby-configuration.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-ruby-hot-deploying.adoc[leveloffset=+1] diff --git a/_unused_topics/virt-creating-data-volumes-using-storage-api.adoc b/_unused_topics/virt-creating-data-volumes-using-storage-api.adoc deleted file mode 100644 index b386a6828065..000000000000 --- a/_unused_topics/virt-creating-data-volumes-using-storage-api.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// -// * virt/storage/virt-creating-data-volumes.adoc - -:_mod-docs-content-type: PROCEDURE -[id="virt-creating-data-volumes-using-storage-api_{context}"] -= Creating data volumes by using the storage API - -When you create a data volume by using the storage API, the Containerized Data Interface (CDI) optimizes your persistent volume claim (PVC) allocation based on the type of storage supported by your selected storage class. You only have to specify the data volume name, namespace, and the amount of storage that you want to allocate. - -For example: - -* When using Ceph RBD, `accessModes` is automatically set to `ReadWriteMany`, which enables live migration. `volumeMode` is set to `Block` to maximize performance. -* When you are using `volumeMode: Filesystem`, more space will automatically be requested by CDI, if required to accommodate file system overhead. - -In the following YAML, using the storage API requests a data volume with two gigabytes of usable space. The user does not need to know the `volumeMode` in order to correctly estimate the required persistent volume claim (PVC) size. CDI chooses the optimal combination of `accessModes` and `volumeMode` attributes automatically. These optimal values are based on the type of storage or the defaults that you define in your storage profile. If you want to provide custom values, they override the system-calculated values. - -.Procedure - -. Create a YAML file for a `DataVolume` object as shown in the following example: -+ -[source,yaml] ----- -apiVersion: cdi.kubevirt.io/v1beta1 -kind: DataVolume -metadata: - name: <1> -spec: - source: - pvc: - name: "" <2> - namespace: "" <3> - storage: - storageClassName: <4> ----- -<1> Specify the name of the new data volume. -<2> Specify the namespace of the source PVC. -<3> Specify the name of the source PVC. -<4> Optional: If the storage class is not specified, the default storage class is used. - -. Create the data volume by running the following command: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- \ No newline at end of file diff --git a/_unused_topics/windows-machine-config-operator.adoc b/_unused_topics/windows-machine-config-operator.adoc deleted file mode 100644 index f315ccefb886..000000000000 --- a/_unused_topics/windows-machine-config-operator.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="windows-machine-config-operator_{context}"] -= Windows Machine Config Operator - -[discrete] -== Purpose - -The Windows Machine Config Operator (WMCO) orchestrates the process of deploying and managing Windows workloads on a cluster. The WMCO configures Windows machines into compute nodes, enabling Windows container workloads to run in {product-title} clusters. This is done by creating a compute machine set that uses a Windows image with the Docker-formatted container runtime installed. The WMCO completes all necessary steps to configure the underlying Windows VM so that it can join the cluster as a compute node. - -[discrete] -== Project - -link:https://github.com/openshift/windows-machine-config-operator[windows-machine-config-operator] diff --git a/adding_service_cluster/_attributes b/about/_attributes similarity index 100% rename from adding_service_cluster/_attributes rename to about/_attributes diff --git a/about/cluster-observability-operator-overview.adoc b/about/cluster-observability-operator-overview.adoc new file mode 100644 index 000000000000..8c87f6ce6736 --- /dev/null +++ b/about/cluster-observability-operator-overview.adoc @@ -0,0 +1,36 @@ +:_mod-docs-content-type: ASSEMBLY +[id="cluster-observability-operator-overview"] +include::_attributes/common-attributes.adoc[] += {coo-full} overview +:context: cluster_observability_operator_overview + +toc::[] + + +The {coo-first} is an optional component of the {ocp-product-title} designed for creating and managing highly customizable monitoring stacks. It enables cluster administrators to automate configuration and management of monitoring needs extensively, offering a more tailored and detailed view of each namespace compared to the default {ocp-product-title} monitoring system. + +The {coo-short} deploys the following monitoring components: + +* **Prometheus** - A highly available Prometheus instance capable of sending metrics to an external endpoint by using remote write. +* **Thanos Querier** (optional) - Enables querying of Prometheus instances from a central location. +* **Alertmanager** (optional) - Provides alert configuration capabilities for different services. +* **xref:../ui_plugins/observability-ui-plugins-overview.adoc#observability-ui-plugins-overview[UI plugins]** (optional) - Enhances the observability capabilities with plugins for monitoring, logging, distributed tracing and troubleshooting. +* **Korrel8r** (optional) - Provides observability signal correlation, powered by the open source Korrel8r project. +* **xref:../ui_plugins/monitoring-ui-plugin.adoc#coo-incident-detection-overview_monitoring-ui-plugin[Incident detection]** (optional) - Groups related alerts into incidents, to help you identify the root causes of alert bursts. + + + +include::modules/coo-versus-default-ocp-monitoring.adoc[leveloffset=+1] + +include::modules/coo-advantages.adoc[leveloffset=+1] + +include::modules/coo-target-users.adoc[leveloffset=+1] + +//include::modules/monitoring-understanding-the-cluster-observability-operator.adoc[leveloffset=+1] + +include::modules/coo-server-side-apply.adoc[leveloffset=+1] + +[role="_additional-resources"] +.Additional resources + +* link:https://kubernetes.io/docs/reference/using-api/server-side-apply/[Kubernetes documentation for Server-Side Apply (SSA)] diff --git a/about/docinfo.xml b/about/docinfo.xml new file mode 100644 index 000000000000..18cdc0d91b7d --- /dev/null +++ b/about/docinfo.xml @@ -0,0 +1,13 @@ +About Red Hat OpenShift Cluster Observability Operator +{product-title} +{product-version} +Introduction to Cluster Observability Operator. + + This document provides an overview of Cluster Observability Operator features, and also + includes release notes and support information. + + + + Red Hat OpenShift Documentation Team + + \ No newline at end of file diff --git a/cicd/images b/about/images similarity index 100% rename from cicd/images rename to about/images diff --git a/cicd/modules b/about/modules similarity index 100% rename from cicd/modules rename to about/modules diff --git a/adding_service_cluster/snippets b/about/snippets similarity index 100% rename from adding_service_cluster/snippets rename to about/snippets diff --git a/adding_service_cluster/adding-service.adoc b/adding_service_cluster/adding-service.adoc deleted file mode 100644 index 363bf2106eda..000000000000 --- a/adding_service_cluster/adding-service.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/attributes-openshift-dedicated.adoc[] -[id="adding-service"] -= Adding services to a cluster using {cluster-manager-first} console -:context: adding-service - -toc::[] - -You can add, access, and remove add-on services for your {product-title} -ifdef::openshift-rosa[] -(ROSA) -endif::openshift-rosa[] -cluster by using {cluster-manager-first}. - -ifdef::openshift-rosa[] -== Prerequisites -* For the Amazon CloudWatch service, you must first install the `cluster-logging-operator` using the ROSA CLI (`rosa`). -endif::[] - -include::modules/adding-service-existing.adoc[leveloffset=+1] -include::modules/access-service.adoc[leveloffset=+1] -include::modules/deleting-service.adoc[leveloffset=+1] -//include::modules/deleting-service-cli.adoc[leveloffset=+1] diff --git a/adding_service_cluster/available-services.adoc b/adding_service_cluster/available-services.adoc deleted file mode 100644 index 382b72492a2d..000000000000 --- a/adding_service_cluster/available-services.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/attributes-openshift-dedicated.adoc[] -[id="available-services"] -= Add-on services available for {product-title} -:context: available-services - -toc::[] - -You can add services to your existing {product-title} cluster using the xref:../adding_service_cluster/adding-service.adoc#adding-service[{cluster-manager-first} console]. - -include::modules/osd-rhoam.adoc[leveloffset=+1] diff --git a/adding_service_cluster/images b/adding_service_cluster/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/adding_service_cluster/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/adding_service_cluster/modules b/adding_service_cluster/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/adding_service_cluster/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/adding_service_cluster/rosa-available-services.adoc b/adding_service_cluster/rosa-available-services.adoc deleted file mode 100644 index b8e1c94471dc..000000000000 --- a/adding_service_cluster/rosa-available-services.adoc +++ /dev/null @@ -1,41 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/attributes-openshift-dedicated.adoc[] -[id="rosa-available-services"] -= Add-on services available for {product-title} -:context: rosa-available-services - - -You can add services to your existing {product-title} (ROSA) cluster using the xref:../adding_service_cluster/adding-service.adoc#adding-service[{cluster-manager-first} console]. - -These services can also be installed xref:../cli_reference/rosa_cli/rosa-manage-objects-cli.adoc#rosa-managing-objects-cli[using the `rosa` CLI]. - - -include::modules/aws-cloudwatch.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* link:https://aws.amazon.com/cloudwatch/[Amazon CloudWatch product information] - -include::modules/osd-rhoam.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* link:https://access.redhat.com/documentation/en-us/red_hat_openshift_api_management[Red{nbsp}Hat OpenShift API Management] documentation - -//// -include::modules/rosa-rhoda.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* link:https://www.redhat.com/en/technologies/cloud-computing/openshift/openshift-database-access[Red{nbsp}Hat OpenShift Database Access] product page -//// -// This module and additional resource are no longer included in the document due to OSDOCS-5817. - -include::modules/rosa-rhods.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* link:https://docs.redhat.com/en/documentation/red_hat_openshift_ai/2025[Red{nbsp}Hat OpenShift AI] documentation -* link:https://www.redhat.com/en/technologies/cloud-computing/openshift/openshift-ai[Red{nbsp}Hat OpenShift AI] product page diff --git a/applications/_attributes b/api/_attributes similarity index 100% rename from applications/_attributes rename to api/_attributes diff --git a/api/api-monitoring-package.adoc b/api/api-monitoring-package.adoc new file mode 100644 index 000000000000..d8fbc2e5238b --- /dev/null +++ b/api/api-monitoring-package.adoc @@ -0,0 +1,2509 @@ +:_mod-docs-content-type: ASSEMBLY +[id="api-monitoring-package"] += Monitoring API reference +include::_attributes/common-attributes.adoc[] +:context: api-monitoring-package + +toc::[] + +The resource types are xref:#monitoringstack[`MonitoringStack`] and xref:#thanosquerier[`ThanosQuerier`]. + +[id="monitoringstack"] +== MonitoringStack + +`MonitoringStack` is the Schema for the monitoringstacks API. + +[cols="2,1,3,1"] +|=== +|Name |Type |Description |Required + +|`apiVersion` +|string +|`monitoring.rhobs/v1alpha1` +|true + +|`kind` +|string +|`MonitoringStack` +|true + +|link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta[`metadata`] +|object +|Refer to the Kubernetes API documentation for the fields of the `metadata` field. +|true + +|xref:#monitoringstackspec[`spec`] +|object +|`MonitoringStackSpec` is the specification for the desired `MonitoringStack` object. +|true + + +|xref:#monitoringstackstatus[`status`] +|object +|`MonitoringStackStatus` defines the observed state of the `MonitoringStack` object. It should always be reconstructable from the state of the cluster and/or outside world. +|false +|=== + +[id="monitoringstackspec"] +== MonitoringStack.spec + +`MonitoringStackSpec` is the specification for the desired `MonitoringStack` object. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|xref:#monitoringstackspecalertmanagerconfig[`alertmanagerConfig`] +|object +|Define `Alertmanager` config. + +_Default_: `map[disabled:false]` +|false + +|`logLevel` +|enum +|Set log levels of configured components. + +_Enum_: `debug, info, warn, error` + +_Default_: `info` +|false + +|xref:#monitoringstackspecnamespaceselector[`namespaceSelector`] +|object +a|Namespace selector for `MonitoringStack` resources. + +* To monitor everything, set to empty map selector. For example, `namespaceSelector: {}`. + +* To monitor resources in the namespace where `MonitoringStack` instance was created, set to null. For example, `namespaceSelector:`. +|false + +|`nodeSelector` +|`map[string]string` +|Define node selector for `MonitoringStack` pods. +|false + +|xref:#monitoringstackspecprometheusconfig[`prometheusConfig`] +|object +|Define prometheus config. + +_Default_: `map[replicas:2]` +|false + +|xref:#monitoringstackspecresourceselector[`resourceSelector`] +|object +a|Label selector for `MonitoringStack` resources. + +* To monitor everything, set to empty map selector. For example, `resourceSelector: {}`. + +* To disable service discovery, set to null. For example, `resourceSelector:`. +|false + +|xref:#monitoringstackspecresources[`resources`] +|object +|Define resources requests and limits for `MonitoringStack` pods. + +_Default_: `map[limits:map[cpu:500m memory:512Mi] requests:map[cpu:100m memory:256Mi]]` +|false + +|`retention` +|string +|Time duration to retain data. The string must match the regular expression `[0-9]+(ms\|s\|m\|h\|d\|w\|y)` (milliseconds seconds minutes hours days weeks years). + +_Default_: `120h` +|false + +|xref:#monitoringstackspectolerationsindex[`tolerations`] +|`[]object` +|Define tolerations for `MonitoringStack` pods. +|false +|=== + +[id="monitoringstackspecalertmanagerconfig"] +== MonitoringStack.spec.alertmanagerConfig + + +Define `Alertmanager` config. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|`disabled` +|boolean +|Disables the deployment of `Alertmanager`. + +_Default_: false +|false + +|xref:#monitoringstackspecalertmanagerconfigwebtlsconfig[`webTLSConfig`] +|object +|Configure TLS options for the `Alertmanager` web server. +|false +|=== + +[id="monitoringstackspecalertmanagerconfigwebtlsconfig"] +== MonitoringStack.spec.alertmanagerConfig.webTLSConfig + +Configure TLS options for the `Alertmanager` web server. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|xref:#monitoringstackspecalertmanagerconfigwebtlsconfigcertificate[`certificate`] +|object +|Reference to the TLS public certificate for the web server. +|true + +|xref:#monitoringstackspecalertmanagerconfigwebtlsconfigcertificateauthority[`certificateAuthority`] +|object +|Reference to the root Certificate Authority used to verify the web server's certificate. +|true + +|xref:#monitoringstackspecalertmanagerconfigwebtlsconfigcertificateprivatekey[`privateKey`] +|object +|Reference to the TLS private key for the web server. +|true +|=== + +[id="monitoringstackspecalertmanagerconfigwebtlsconfigcertificate"] +== MonitoringStack.spec.alertmanagerConfig.webTLSConfig.certificate + +Reference to the TLS public certificate for the web server. + +[cols="1,1,3,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|The key of the secret to select from. Must be a valid secret key. +|true + +|`name` +|string +|The name of the secret in the object's namespace to select from. +|true +|=== + +[id="monitoringstackspecalertmanagerconfigwebtlsconfigcertificateauthority"] +== MonitoringStack.spec.alertmanagerConfig.webTLSConfig.certificateAuthority + +Reference to the root Certificate Authority used to verify the web server's certificate. + +[cols="1,1,3,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|The key of the secret to select from. Must be a valid secret key. +|true + +|`name` +|string +|The name of the secret in the object's namespace to select from. +|true +|=== + +[id="monitoringstackspecalertmanagerconfigwebtlsconfigcertificateprivatekey"] +== MonitoringStack.spec.alertmanagerConfig.webTLSConfig.privateKey + +Reference to the TLS private key for the web server. + +[cols="1,1,3,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|The key of the secret to select from. Must be a valid secret key. +|true + +|`name` +|string +|The name of the secret in the object's namespace to select from. +|true +|=== + +[[monitoringstackspecnamespaceselector]] +== MonitoringStack.spec.namespaceSelector + +Namespace selector for `MonitoringStack` resources. + +* To monitor everything, set to empty map selector. For example, `namespaceSelector: {}`. + +* To monitor resources in the namespace where the `MonitoringStack` instance was created, set to null. For example, `namespaceSelector:`. + +[cols="2,2,4,1"] +|=== +|Name |Type |Description |Required + +|xref:#monitoringstackspecnamespaceselectormatchexpressionsindex[`matchExpressions`] +|`[]object` +|`matchExpressions` is a list of label selector requirements. The requirements are ANDed. +|false + +|`matchLabels` +|`map[string]string` +|`matchLabels` is a map of {key,value} pairs. A single {key,value} in the `matchLabels` map is equivalent to an element of `matchExpressions`, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. +|false +|=== + +[[monitoringstackspecnamespaceselectormatchexpressionsindex]] +== MonitoringStack.spec.namespaceSelector.matchExpressions[index] + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + +[cols="1,1,4,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|`key` is the label key that the selector applies to. +|true + +|`operator` +|string +|`operator` represents a key's relationship to a set of values. Valid operators are `In`, `NotIn`, `Exists` and `DoesNotExist`. +|true + +|`values` +|`[]string` +|`values` is an array of string values. If the operator is `In` or `NotIn`, the values array must be non-empty. If the operator is `Exists` or `DoesNotExist`, the values array must be empty. This array is replaced during a strategic merge patch. +|false +|=== + +[[monitoringstackspecprometheusconfig]] +== MonitoringStack.spec.prometheusConfig + +Define Prometheus configuration. + +[cols="2,2,4,1"] +|=== +|Name |Type |Description |Required + +|`enableOtlpHttpReceiver` +|boolean +|Enable Prometheus to accept OpenTelemetry Metrics via the `otlp`/`http`protocol. Defaults to the value of `false`. The resulting endpoint is `/api/v1/otlp/v1/metrics`. +|false + +|`enableRemoteWriteReceiver` +|boolean +|Enable Prometheus to be used as a receiver for the Prometheus remote write protocol. Defaults to the value of `false`. +|false + +|`externalLabels` +|`map[string]string` +|Define `ExternalLabels` for Prometheus. +|false + +|xref:#monitoringstackspecprometheusconfigpersistentvolumeclaim[`persistentVolumeClaim`] +|object +|Define persistent volume claim for Prometheus. +|false + +|xref:#monitoringstackspecprometheusconfigremotewriteindex[`remoteWrite`] +|`[]object` +|Define remote write for Prometheus. +|false + +|`replicas` +|integer +|Number of replicas/pods to deploy for a Prometheus deployment. + +_Format_: int32 + +_Default_: 2 + +_Minimum_: 0 +|false + +|`scrapeInterval` +|string +|Default interval between scrapes. +|false + +|xref:#monitoringstackspecprometheusconfigwebtlsconfig[`webTLSConfig`] +|object +|Configure TLS options for the Prometheus web server. +|false +|=== + +[[monitoringstackspecprometheusconfigpersistentvolumeclaim]] +== MonitoringStack.spec.prometheusConfig.persistentVolumeClaim + + +Define persistent volume claim for Prometheus. + +[cols="2,2,4,1"] +|=== +|Name |Type |Description |Required + +|`accessModes` +|`[]string` +|`accessModes` contains the desired access modes the volume should have. For more information, see link:https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1[Kubernetes Docs]. +|false + +|xref:#monitoringstackspecprometheusconfigpersistentvolumeclaimdatasource[`dataSource`] +|object +a|`dataSource` field can be used to specify either: + +* An existing `VolumeSnapshot` object (`snapshot.storage.k8s.io/VolumeSnapshot`) +* An existing PVC (PersistentVolumeClaim) + +If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. + +When the `AnyVolumeDataSource` feature gate is enabled, `dataSource` contents will be copied to `dataSourceRef`, and `dataSourceRef` contents will be copied to `dataSource` when `dataSourceRef.namespace` is not specified. If the namespace is specified, then `dataSourceRef` will not be copied to `dataSource`. +|false + +|xref:#monitoringstackspecprometheusconfigpersistentvolumeclaimdatasourceref[`dataSourceRef`] +|object +a|`dataSourceRef` specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a `PersistentVolumeClaim` object. + +When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the `dataSource` field and as such if both fields are non-empty, they must have the same value. + +For backwards compatibility, when `namespace` is not specified in `dataSourceRef`, both fields (`dataSource` and `dataSourceRef`) will be set to the same value automatically if one of them is empty and the other is non-empty. When `namespace` is specified in `dataSourceRef`, `dataSource` isn't set to the same value and must be empty. + +There are three important differences between `dataSource` and `dataSourceRef`: + +* While `dataSource` only allows two specific types of objects, `dataSourceRef` allows any non-core object, as well as `PersistentVolumeClaim` objects. +* While `dataSource` ignores disallowed values (dropping them), `dataSourceRef` preserves all values, and generates an error if a disallowed value is specified. +* While `dataSource` only allows local objects, `dataSourceRef` allows objects in any namespaces. + +_Beta_: Using this field requires the `AnyVolumeDataSource` feature gate to be enabled. + +_Alpha_: Using the namespace field of `dataSourceRef` requires the `CrossNamespaceVolumeDataSource` feature gate to be enabled. +|false + +|xref:#monitoringstackspecprometheusconfigpersistentvolumeclaimresources[`resources`] +|object +|`resources` represents the minimum resources the volume should have. + +If `RecoverVolumeExpansionFailure` feature is enabled, users are allowed to specify resource requirements that are lower than the previous value but must still be higher than the capacity recorded in the status field of the claim. + +For more information, see link:https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources[Kubernetes Docs] +|false + +|xref:#monitoringstackspecprometheusconfigpersistentvolumeclaimselector[`selector`] +|object +|`selector` is a label query over volumes to consider for binding. +|false + +|`storageClassName` +|string +|`storageClassName` is the name of the `StorageClass` required by the claim. For more information, see link:https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1[Kubernetes Docs] +|false + +|`volumeAttributesClassName` +|string +|`volumeAttributesClassName` may be used to set the `VolumeAttributesClass` used by this claim. + +If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding `VolumeAttributesClass`. This has a different purpose than `storageClassName`, as it can be changed after the claim is created. An empty string value means that no `VolumeAttributesClass` will be applied to the claim but it is not allowed to reset this field to the empty string once it is set. + +If unspecified and the `PersistentVolumeClaim` is unbound, the default `VolumeAttributesClass` will be set by the `persistentvolume` controller if it exists. If the resource referred to by `volumeAttributesClass` does not exist, this `PersistentVolumeClaim` will be set to a `Pending` state, as reflected by the `modifyVolumeStatus` field, until such as a resource exists. + +For more information, see link:https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/[Kubernetes Docs] + +_Beta_: Using this field requires the `VolumeAttributesClass` feature gate to be enabled (off by default). +|false + +|`volumeMode` +|string +|`volumeMode` defines what type of volume is required by the claim. Value of `Filesystem` is implied when not included in claim spec. +|false + +|`volumeName` +|string +|`volumeName` is the binding reference to the `PersistentVolume` backing this claim. +|false +|=== + +[[monitoringstackspecprometheusconfigpersistentvolumeclaimdatasource]] +== MonitoringStack.spec.prometheusConfig.persistentVolumeClaim.dataSource + +`dataSource` field can be used to specify either: + +* An existing `VolumeSnapshot` object (`snapshot.storage.k8s.io/VolumeSnapshot`) +* An existing PVC (PersistentVolumeClaim) + +If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the `AnyVolumeDataSource` feature gate is enabled, `dataSource` contents will be copied to `dataSourceRef`, and `dataSourceRef` contents will be copied to `dataSource` when `dataSourceRef.namespace` is not specified. If the namespace is specified, then `dataSourceRef` will not be copied to `dataSource`. + +[cols="1,1,4,1"] +|=== +|Name |Type |Description |Required + +|`kind` +|string +|`kind` is the type of resource being referenced +|true + +|`name` +|string +|`name` is the name of resource being referenced +|true + +|`apiGroup` +|string +|`apiGroup` is the group for the resource being referenced. If `apiGroup` is not specified, the specified `kind` must be in the core API group. For any other third-party types, `apiGroup` is required. +|false +|=== + +[[monitoringstackspecprometheusconfigpersistentvolumeclaimdatasourceref]] +== MonitoringStack.spec.prometheusConfig.persistentVolumeClaim.dataSourceRef + +`dataSourceRef` specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a `PersistentVolumeClaim` object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. + +This field will replace the functionality of the `dataSource` field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when `namespace` isn't specified in `dataSourceRef`, both fields (`dataSource` and `dataSourceRef`) will be set to the same value automatically if one of them is empty and the other is non-empty. When `namespace` is specified in `dataSourceRef`, `dataSource` isn't set to the same value and must be empty. + +There are three important differences between `dataSource` and `dataSourceRef`: + +* While `dataSource` only allows two specific types of objects, `dataSourceRef` allows any non-core object, as well as `PersistentVolumeClaim` objects. +* While `dataSource` ignores disallowed values (dropping them), `dataSourceRef` preserves all values, and generates an error if a disallowed value is specified. +* While `dataSource` only allows local objects, `dataSourceRef` allows objects in any namespaces. + +_Beta_: Using this field requires the `AnyVolumeDataSource` feature gate to be enabled. + +_Alpha_: Using the namespace field of `dataSourceRef` requires the `CrossNamespaceVolumeDataSource` feature gate to be enabled. + +[cols="1,1,4,1"] +|=== +|Name |Type |Description |Required + +|`kind` +|string +|`kind` is the type of resource being referenced +|true + +|`name` +|string +|`name` is the name of resource being referenced +|true + +|`apiGroup` +|string +|`apiGroup` is the group for the resource being referenced. + +If `apiGroup` is not specified, the specified `kind` must be in the core API group. For any other third-party types, `apiGroup` is required. +|false + +|`namespace` +|string +|`namespace` is the namespace of resource being referenced. + +Note that when a namespace is specified, a `gateway.networking.k8s.io/ReferenceGrant` object is required in the referent namespace to allow that namespace's owner to accept the reference. See the `ReferenceGrant` documentation for details. + +_Alpha_: This field requires the `CrossNamespaceVolumeDataSource` feature gate to be enabled. +|false +|=== + +[[monitoringstackspecprometheusconfigpersistentvolumeclaimresources]] +== MonitoringStack.spec.prometheusConfig.persistentVolumeClaim.resources + +`resources` represents the minimum resources the volume should have. + +If `RecoverVolumeExpansionFailure` feature is enabled users are allowed to specify resource requirements that are lower than the previous value but must still be higher than the capacity recorded in the status field of the claim. + +For more information, see link:https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources[Kubernetes Docs] + +[cols="2,2,4,1"] +|=== +|Name |Type |Description |Required + +|`limits` +|`map[string]int or string` +|`Limits` describes the maximum amount of compute resources allowed. + +For more information, see link:https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/[Kubernetes Docs] +|false + +|`requests` +|`map[string]int or string` +|`Requests` describes the minimum amount of compute resources required. + +If `Requests` is omitted for a container, it defaults to `Limits` if that is explicitly specified, otherwise to an implementation-defined value. `Requests` cannot exceed `Limits`. + +For more information, see link:https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/[Kubernetes Docs] +|false +|=== + +[[monitoringstackspecprometheusconfigpersistentvolumeclaimselector]] +== MonitoringStack.spec.prometheusConfig.persistentVolumeClaim.selector + + +`selector` is a label query over volumes to consider for binding. + +[cols="2,2,4,1"] +|=== +|Name |Type |Description |Required + +|xref:#monitoringstackspecprometheusconfigpersistentvolumeclaimselectormatchexpressionsindex[`matchExpressions`] +|`[]object` +|`matchExpressions` is a list of label selector requirements. The requirements are ANDed. +|false + +|`matchLabels` +|`map[string]string` +|`matchLabels` is a map of {key,value} pairs. + +A single {key,value} in the `matchLabels` map is equivalent to an element of `matchExpressions`, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. +|false +|=== + +[[monitoringstackspecprometheusconfigpersistentvolumeclaimselectormatchexpressionsindex]] +== MonitoringStack.spec.prometheusConfig.persistentVolumeClaim.selector.matchExpressions[index] + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + +[cols="1,1,4,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|`key` is the label key that the selector applies to. +|true + +|`operator` +|string +|`operator` represents a key's relationship to a set of values. Valid operators are `In`, `NotIn`, `Exists` and `DoesNotExist`. +|true + +|`values` +|`[]string` +|`values` is an array of string values. + +If the operator is `In` or `NotIn`, the values array must be non-empty. If the operator is `Exists` or `DoesNotExist`, the values array must be empty. This array is replaced during a strategic merge patch. +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindex]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index] + +`RemoteWriteSpec` defines the configuration to write samples from Prometheus to a remote endpoint. + +[cols="2,2,5,1"] +|=== +|Name |Type |Description |Required + +|`url` +|string +|The URL of the endpoint to send samples to. +|true + +|xref:#monitoringstackspecprometheusconfigremotewriteindexauthorization[`authorization`] +|object +a|Authorization section for the URL. + +* It requires Prometheus >= v2.26.0. +* Cannot be set at the same time as `sigv4`, `basicAuth`, `oauth2`, or `azureAd`. +|false + +|xref:#monitoringstackspecprometheusconfigremotewriteindexazuread[`azureAd`] +|object +a|AzureAD for the URL. + +* It requires Prometheus >= v2.45.0. +* Cannot be set at the same time as `authorization`, `basicAuth`, `oauth2`, or `sigv4`. +|false + +|xref:#monitoringstackspecprometheusconfigremotewriteindexbasicauth[`basicAuth`] +|object +a|BasicAuth configuration for the URL. + +* Cannot be set at the same time as `sigv4`, `authorization`, `oauth2`, or `azureAd`. +|false + +|`bearerToken` +|string +|_Warning: this field should not be used because the token value appears in clear-text. Prefer using `authorization`._ + +_Deprecated: this will be removed in a future release._ +|false + +|`bearerTokenFile` +|string +|File from which to read bearer token for the URL. + +_Deprecated: this will be removed in a future release. Prefer using `authorization`._ +|false + +|`enableHTTP2` +|boolean +|Whether to enable HTTP2. +|false + +|`followRedirects` +|boolean +|Configure whether HTTP requests follow HTTP 3xx redirects. + +It requires Prometheus >= v2.26.0. +|false + +|`headers` +|`map[string]string` +|Custom HTTP headers to be sent along with each remote write request. Be aware that headers that are set by Prometheus itself can't be overwritten. + +It requires Prometheus >= v2.25.0. +|false + +|`messageVersion` +|enum +a|The Remote Write message's version to use when writing to the endpoint. + +* `Version1.0` corresponds to the `prometheus.WriteRequest` protobuf message introduced in Remote Write 1.0. +*`Version2.0` corresponds to the `io.prometheus.write.v2.Request` protobuf message introduced in Remote Write 2.0. +* When `Version2.0` is selected, Prometheus will automatically be configured to append the metadata of scraped metrics to the WAL. +* Before setting this field, consult with your remote storage provider what message version it supports. +* It requires Prometheus >= v2.54.0. + +_Enum_: `V1.0`, `V2.0` +|false + +|xref:#monitoringstackspecprometheusconfigremotewriteindexmetadataconfig[`metadataConfig`] +|object +|`MetadataConfig` configures the sending of series metadata to the remote storage. +|false + +|`name` +|string +|The name of the remote write queue, it must be unique if specified. The name is used in metrics and logging in order to differentiate queues. + +It requires Prometheus >= v2.15.0. +|false + +|`noProxy` +|string +|`noProxy` is a comma-separated string that can contain IPs, CIDR notation, or domain names that should be excluded from proxying. IP and domain names can contain port numbers. + +It requires Prometheus >= v2.43.0 or Alertmanager >= 0.25.0. +|false + +|xref:#monitoringstackspecprometheusconfigremotewriteindexoauth2[`oauth2`] +|object +a|OAuth2 configuration for the URL. + +* It requires Prometheus >= v2.27.0. +* Cannot be set at the same time as `sigv4`, `authorization`, `basicAuth`, or `azureAd`. +|false + +|xref:#monitoringstackspecprometheusconfigremotewriteindexproxyconnectheaderkeyindex[`proxyConnectHeader`] +|`map[string][]object` +|`ProxyConnectHeader` optionally specifies headers to send to proxies during CONNECT requests. + +It requires Prometheus >= v2.43.0 or Alertmanager >= 0.25.0. +|false + +|`proxyFromEnvironment` +|boolean +|Whether to use the proxy configuration defined by environment variables (`HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY`). + +It requires Prometheus >= v2.43.0 or Alertmanager >= 0.25.0. +|false + +|`proxyUrl` +|string +|`proxyURL` defines the HTTP proxy server to use. +|false + +|xref:#monitoringstackspecprometheusconfigremotewriteindexqueueconfig[`queueConfig`] +|object +|`QueueConfig` allows tuning of the remote write queue parameters. +|false + +|`remoteTimeout` +|string +|Timeout for requests to the remote write endpoint. +|false + +|`sendExemplars` +|boolean +|Enables sending of exemplars over remote write. Note that exemplar-storage itself must be enabled using the `spec.enableFeatures` option for exemplars to be scraped in the first place. + +It requires Prometheus >= v2.27.0. +|false + +|`sendNativeHistograms` +|boolean +|Enables sending of native histograms, also known as sparse histograms over remote write. + +It requires Prometheus >= v2.40.0. +|false + +|xref:#monitoringstackspecprometheusconfigremotewriteindexsigv4[`sigv4`] +|object +a|`Sigv4` allows to configures AWS's Signature Verification 4 for the URL. + +* It requires Prometheus >= v2.26.0. +* Cannot be set at the same time as `authorization`, `basicAuth`, `oauth2`, or `azureAd`. +|false + +|xref:#monitoringstackspecprometheusconfigremotewriteindextlsconfig[`tlsConfig`] +|object +|TLS Config to use for the URL. +|false + +|xref:#monitoringstackspecprometheusconfigremotewriteindexwriterelabelconfigsindex[`writeRelabelConfigs`] +|`[]object` +|The list of remote write relabel configurations. +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindexauthorization]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].authorization + +Authorization section for the URL. + +* It requires Prometheus >= v2.26.0. +* Cannot be set at the same time as `sigv4`, `basicAuth`, `oauth2`, or `azureAd`. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|xref:#monitoringstackspecprometheusconfigremotewriteindexauthorizationcredentials[`credentials`] +|object +|Selects a key of a Secret in the namespace that contains the credentials for authentication. +|false + +|`credentialsFile` +|string +|File to read a secret from, mutually exclusive with `credentials`. +|false + +|`type` +|string +|Defines the authentication type. The value is case-insensitive. + +"Basic" is not a supported value. + +Default: "Bearer" +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindexauthorizationcredentials]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].authorization.credentials + + +Selects a key of a Secret in the namespace that contains the credentials for authentication. + +[cols="1,1,4,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|The key of the secret to select from. Must be a valid secret key. +|true + +|`name` +|string +|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. For more information, see link:https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names[Kubernetes Docs] +|false + +|`optional` +|boolean +|Specify whether the Secret or its key must be defined +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindexazuread]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].azureAd + + +AzureAD for the URL. + +* It requires Prometheus >= v2.45.0. +* Cannot be set at the same time as `authorization`, `basicAuth`, `oauth2`, or `sigv4`. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|`cloud` +|enum +|The Azure Cloud. Options are 'AzurePublic', 'AzureChina', or 'AzureGovernment'. + +_Enum_: `AzureChina`, `AzureGovernment`, `AzurePublic` +|false + +|xref:#monitoringstackspecprometheusconfigremotewriteindexazureadmanagedidentity[`managedIdentity`] +|object +|`ManagedIdentity` defines the Azure User-assigned Managed identity. Cannot be set at the same time as `oauth` or `sdk`. +|false + +|xref:#monitoringstackspecprometheusconfigremotewriteindexazureadoauth[`oauth`] +|object +a|`OAuth` defines the oauth config that is being used to authenticate. + +* Cannot be set at the same time as `managedIdentity` or `sdk`. + +* It requires Prometheus >= v2.48.0. +|false + +|xref:#monitoringstackspecprometheusconfigremotewriteindexazureadsdk[`sdk`] +|object +a|`SDK` defines the Azure SDK config that is being used to authenticate. See link:https://learn.microsoft.com/en-us/azure/developer/go/azure-sdk-authentication[Azure SDK Authentication]. + +* Cannot be set at the same time as `oauth` or `managedIdentity`. + +* It requires Prometheus >= 2.52.0. +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindexazureadmanagedidentity]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].azureAd.managedIdentity + + +ManagedIdentity defines the Azure User-assigned Managed identity. + +* Cannot be set at the same time as `oauth` or `sdk`. + +[cols="1,1,3,1"] +|=== +|Name |Type |Description |Required + +|`clientId` +|string +|The client id +|true +|=== + +[[monitoringstackspecprometheusconfigremotewriteindexazureadoauth]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].azureAd.oauth + +`OAuth` defines the oauth config that is being used to authenticate. + +* Cannot be set at the same time as `managedIdentity` or `sdk`. + +* It requires Prometheus >= v2.48.0. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|`clientId` +|string +|`clientID` is the clientId of the Azure Active Directory application that is being used to authenticate. +|true + +|xref:#monitoringstackspecprometheusconfigremotewriteindexazureadoauthclientsecret[`clientSecret`] +|object +|`clientSecret` specifies a key of a Secret containing the client secret of the Azure Active Directory application that is being used to authenticate. +|true + +|`tenantId` +|string +|`tenantId` is the tenant ID of the Azure Active Directory application that is being used to authenticate. +|true +|=== + +[[monitoringstackspecprometheusconfigremotewriteindexazureadoauthclientsecret]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].azureAd.oauth.clientSecret + + +`clientSecret` specifies a key of a Secret containing the client secret of the Azure Active Directory application that is being used to authenticate. + +[cols="1,1,4,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|The key of the secret to select from. Must be a valid secret key. +|true + +|`name` +|string +|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. + +For more information, see link:https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names[Kubernetes Docs] +|false + +|`optional` +|boolean +|Specify whether the Secret or its key must be defined +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindexazureadsdk]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].azureAd.sdk + + +`SDK` defines the Azure SDK config that is being used to authenticate. + +For more information, see link:https://learn.microsoft.com/en-us/azure/developer/go/azure-sdk-authentication[Azure SDK Authentication]. + +* Cannot be set at the same time as `oauth` or `managedIdentity`. + +* It requires Prometheus >= 2.52.0. + +[cols="1,1,3,1"] +|=== +|Name |Type |Description |Required + +|`tenantId` +|string +|`tenantId` is the tenant ID of the azure active directory application that is being used to authenticate. +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindexbasicauth]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].basicAuth + +BasicAuth configuration for the URL. + +* Cannot be set at the same time as `sigv4`, `authorization`, `oauth2`, or `azureAd`. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|xref:#monitoringstackspecprometheusconfigremotewriteindexbasicauthpassword[`password`] +|object +|`password` specifies a key of a Secret containing the password for authentication. +|false + +|xref:#monitoringstackspecprometheusconfigremotewriteindexbasicauthusername[`username`] +|object +|`username` specifies a key of a Secret containing the username for authentication. +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindexbasicauthpassword]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].basicAuth.password + + +`password` specifies a key of a Secret containing the password for authentication. + +[cols="1,1,4,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|The key of the secret to select from. Must be a valid secret key. +|true + +|`name` +|string +|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. + +For more information, see link:https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names[Kubernetes Docs] +|false + +|`optional` +|boolean +|Specify whether the Secret or its key must be defined +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindexbasicauthusername]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].basicAuth.username + +`username` specifies a key of a Secret containing the username for authentication. + +[cols="1,1,4,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|The key of the secret to select from. Must be a valid secret key. +|true + +|`name` +|string +|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. + +For more information, see link:https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names[Kubernetes Docs] +|false + +|`optional` +|boolean +|Specify whether the Secret or its key must be defined +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindexmetadataconfig]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].metadataConfig + +`MetadataConfig` configures the sending of series metadata to the remote storage. + +[cols="1,1,3,1"] +|=== +|Name |Type |Description |Required + +|`send` +|boolean +|Defines whether metric metadata is sent to the remote storage or not. +|false + +|`sendInterval` +|string +|Defines how frequently metric metadata is sent to the remote storage. +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindexoauth2]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].oauth2 + +OAuth2 configuration for the URL. + +* It requires Prometheus >= v2.27.0. + +* Cannot be set at the same time as `sigv4`, `authorization`, `basicAuth`, or `azureAd`. + +[cols="2,2,4,1"] +|=== +|Name |Type |Description |Required + +|xref:#monitoringstackspecprometheusconfigremotewriteindexoauth2clientid[`clientId`] +|object +|`clientId` specifies a key of a Secret or ConfigMap object containing the OAuth2 client's ID. +|true + +|xref:#monitoringstackspecprometheusconfigremotewriteindexoauth2clientsecret[`clientSecret`] +|object +|`clientSecret` specifies a key of a Secret containing the OAuth2 client's secret. +|true + +|`tokenUrl` +|string +|`tokenURL` configures the URL to fetch the token from. +|true + +|`endpointParams` +|`map[string]string` +|`endpointParams` configures the HTTP parameters to append to the token URL. +|false + +|`noProxy` +|string +|`noProxy` is a comma-separated string that can contain IPs, CIDR notation, or domain names that should be excluded from proxying. IP and domain names can contain port numbers. + +It requires Prometheus >= v2.43.0 or Alertmanager >= 0.25.0. +|false + +|xref:#monitoringstackspecprometheusconfigremotewriteindexoauth2proxyconnectheaderkeyindex[`proxyConnectHeader`] +|`map[string][]object` +|ProxyConnectHeader optionally specifies headers to send to proxies during `CONNECT` requests. + +It requires Prometheus >= v2.43.0 or Alertmanager >= 0.25.0. +|false + +|`proxyFromEnvironment` +|boolean +|Whether to use the proxy configuration defined by environment variables (`HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY`). + +It requires Prometheus >= v2.43.0 or Alertmanager >= 0.25.0. +|false + +|`proxyUrl` +|string +|`proxyURL` defines the HTTP proxy server to use. +|false + +|`scopes` +|`[]string` +|`scopes` defines the OAuth2 scopes used for the token request. +|false + +|xref:#monitoringstackspecprometheusconfigremotewriteindexoauth2tlsconfig[`tlsConfig`] +|object +|TLS configuration to use when connecting to the OAuth2 server. + +It requires Prometheus >= v2.43.0. +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindexoauth2clientid]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].oauth2.clientId + + +`clientId` specifies a key of a Secret or ConfigMap containing the OAuth2 client's ID. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|xref:#monitoringstackspecprometheusconfigremotewriteindexoauth2clientidconfigmap[`configMap`] +|object +|ConfigMap containing data to use for the targets. +|false + +|xref:#monitoringstackspecprometheusconfigremotewriteindexoauth2clientidsecret[`secret`] +|object +|Secret containing data to use for the targets. +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindexoauth2clientidconfigmap]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].oauth2.clientId.configMap + + +ConfigMap containing data to use for the targets. + +[cols="1,1,4,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|The key to select. +|true + +|`name` +|string +|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. + +For more information, see link:https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names[Kubernetes Docs] +|false + +|`optional` +|boolean +|Specify whether the ConfigMap or its key must be defined +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindexoauth2clientidsecret]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].oauth2.clientId.secret + + +Secret containing data to use for the targets. + +[cols="1,1,4,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|The key of the secret to select from. Must be a valid secret key. +|true + +|`name` +|string +|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. + +For more information, see link:https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names[Kubernetes Docs] +|false + +|`optional` +|boolean +|Specify whether the Secret or its key must be defined +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindexoauth2clientsecret]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].oauth2.clientSecret + + +`clientSecret` specifies a key of a Secret containing the OAuth2 client's secret. + +[cols="1,1,4,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|The key of the secret to select from. Must be a valid secret key. +|true + +|`name` +|string +|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. + +For more information, see link:https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names[Kubernetes Docs] +|false + +|`optional` +|boolean +|Specify whether the Secret or its key must be defined +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindexoauth2proxyconnectheaderkeyindex]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].oauth2.proxyConnectHeader[key][index] + + +SecretKeySelector selects a key of a Secret. + +[cols="1,1,4,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|The key of the secret to select from. Must be a valid secret key. +|true + +|`name` +|string +|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. + +For more information, see link:https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names[Kubernetes Docs] +|false + +|`optional` +|boolean +|Specify whether the Secret or its key must be defined +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindexoauth2tlsconfig]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].oauth2.tlsConfig + +TLS configuration to use when connecting to the OAuth2 server. + +* It requires Prometheus >= v2.43.0. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|xref:#monitoringstackspecprometheusconfigremotewriteindexoauth2tlsconfigca[`ca`] +|object +|Certificate authority used when verifying server certificates. +|false + +|xref:#monitoringstackspecprometheusconfigremotewriteindexoauth2tlsconfigcert[`cert`] +|object +|Client certificate to present when doing client-authentication. +|false + +|`insecureSkipVerify` +|boolean +|Disable target certificate validation. +|false + +|xref:#monitoringstackspecprometheusconfigremotewriteindexoauth2tlsconfigkeysecret[`keySecret`] +|object +|Secret containing the client key file for the targets. +|false + +|`maxVersion` +|enum +|Maximum acceptable TLS version. + +It requires Prometheus >= v2.41.0. + +_Enum_: `TLS10`, `TLS11`, `TLS12`, `TLS13` +|false + +|`minVersion` +|enum +|Minimum acceptable TLS version. + +It requires Prometheus >= v2.35.0. + +_Enum_: `TLS10`, `TLS11`, `TLS12`, `TLS13` +|false + +|`serverName` +|string +|Used to verify the hostname for the targets. +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindexoauth2tlsconfigca]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].oauth2.tlsConfig.ca + + +Certificate authority used when verifying server certificates. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|xref:#monitoringstackspecprometheusconfigremotewriteindexoauth2tlsconfigcaconfigmap[`configMap`] +|object +|ConfigMap containing data to use for the targets. +|false + +|xref:#monitoringstackspecprometheusconfigremotewriteindexoauth2tlsconfigcasecret[`secret`] +|object +|Secret containing data to use for the targets. +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindexoauth2tlsconfigcaconfigmap]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].oauth2.tlsConfig.ca.configMap + + +ConfigMap containing data to use for the targets. + +[cols="1,1,4,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|The key to select. +|true + +|`name` +|string +|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. + +For more information, see link:https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names[Kubernetes Docs] +|false + +|`optional` +|boolean +|Specify whether the ConfigMap or its key must be defined +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindexoauth2tlsconfigcasecret]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].oauth2.tlsConfig.ca.secret + + +Secret containing data to use for the targets. + +[cols="1,1,4,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|The key of the secret to select from. Must be a valid secret key. +|true + +|`name` +|string +|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. + +For more information, see link:https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names[Kubernetes Docs] +|false + +|`optional` +|boolean +|Specify whether the Secret or its key must be defined +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindexoauth2tlsconfigcert]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].oauth2.tlsConfig.cert + + +Client certificate to present when doing client-authentication. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|xref:#monitoringstackspecprometheusconfigremotewriteindexoauth2tlsconfigcertconfigmap[`configMap`] +|object +|ConfigMap containing data to use for the targets. +|false + +|xref:#monitoringstackspecprometheusconfigremotewriteindexoauth2tlsconfigcertsecret[`secret`] +|object +|Secret containing data to use for the targets. +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindexoauth2tlsconfigcertconfigmap]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].oauth2.tlsConfig.cert.configMap + + +ConfigMap containing data to use for the targets. + +[cols="1,1,4,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|The key to select. +|true + +|`name` +|string +|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. + +For more information, see link:https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names[Kubernetes Docs] +|false + +|`optional` +|boolean +|Specify whether the ConfigMap or its key must be defined +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindexoauth2tlsconfigcertsecret]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].oauth2.tlsConfig.cert.secret + +Secret containing data to use for the targets. + +[cols="1,1,4,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|The key of the secret to select from. Must be a valid secret key. +|true + +|`name` +|string +|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. + +For more information, see link:https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names[Kubernetes Docs] +|false + +|`optional` +|boolean +|Specify whether the Secret or its key must be defined +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindexoauth2tlsconfigkeysecret]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].oauth2.tlsConfig.keySecret + + +Secret containing the client key file for the targets. + +[cols="1,1,4,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|The key of the secret to select from. Must be a valid secret key. +|true + +|`name` +|string +|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. + +For more information, see link:https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names[Kubernetes Docs] +|false + +|`optional` +|boolean +|Specify whether the Secret or its key must be defined +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindexproxyconnectheaderkeyindex]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].proxyConnectHeader[key][index] + +SecretKeySelector selects a key of a Secret. + +[cols="1,1,4,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|The key of the secret to select from. Must be a valid secret key. +|true + +|`name` +|string +|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. + +For more information, see link:https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names[Kubernetes Docs] +|false + +|`optional` +|boolean +|Specify whether the Secret or its key must be defined +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindexqueueconfig]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].queueConfig + + +QueueConfig allows tuning of the remote write queue parameters. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|`batchSendDeadline` +|string +|`BatchSendDeadline` is the maximum time a sample will wait in buffer. +|false + +|`capacity` +|integer +|`Capacity` is the number of samples to buffer per shard before we start dropping them. +|false + +|`maxBackoff` +|string +|`MaxBackoff` is the maximum retry delay. +|false + +|`maxRetries` +|integer +|`MaxRetries` is the maximum number of times to retry a batch on recoverable errors. +|false + +|`maxSamplesPerSend` +|integer +|`MaxSamplesPerSend` is the maximum number of samples per send. +|false + +|`maxShards` +|integer +|`MaxShards` is the maximum number of shards, that is, the amount of concurrency. +|false + +|`minBackoff` +|string +|`MinBackoff` is the initial retry delay. Gets doubled for every retry. +|false + +|`minShards` +|integer +|`MinShards` is the minimum number of shards, that is, the amount of concurrency. +|false + +|`retryOnRateLimit` +|boolean +|Retry upon receiving a 429 status code from the remote-write storage. + +This is an *experimental feature*, it may change in any upcoming release in a breaking way. +|false + +|`sampleAgeLimit` +|string +|`SampleAgeLimit` drops samples older than the limit. + +It requires Prometheus >= v2.50.0. +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindexsigv4]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].sigv4 + + +Sigv4 allows to configures AWS's Signature Verification 4 for the URL. + +* It requires Prometheus >= v2.26.0. + +* Cannot be set at the same time as `authorization`, `basicAuth`, `oauth2`, or `azureAd`. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|xref:#monitoringstackspecprometheusconfigremotewriteindexsigv4accesskey[`accessKey`] +|object +|`AccessKey` is the AWS API key. If not specified, the environment variable `AWS_ACCESS_KEY_ID` is used. +|false + +|`profile` +|string +|`Profile` is the named AWS profile used to authenticate. +|false + +|`region` +|string +|`Region` is the AWS region. If blank, the region from the default credentials chain used. +|false + +|`roleArn` +|string +|`RoleArn` is the named AWS profile used to authenticate. +|false + +|xref:#monitoringstackspecprometheusconfigremotewriteindexsigv4secretkey[`secretKey`] +|object +|`SecretKey` is the AWS API secret. If not specified, the environment variable `AWS_SECRET_ACCESS_KEY` is used. +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindexsigv4accesskey]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].sigv4.accessKey + + +`AccessKey` is the AWS API key. If not specified, the environment variable `AWS_ACCESS_KEY_ID` is used. + +[cols="1,1,4,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|The key of the secret to select from. Must be a valid secret key. +|true + +|`name` +|string +|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. + +For more information, see link:https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names[Kubernetes Docs] +|false + +|`optional` +|boolean +|Specify whether the Secret or its key must be defined +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindexsigv4secretkey]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].sigv4.secretKey + + +`SecretKey` is the AWS API secret. If not specified, the environment variable `AWS_SECRET_ACCESS_KEY` is used. + +[cols="1,1,4,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|The key of the secret to select from. Must be a valid secret key. +|true + +|`name` +|string +|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. + +For more information, see link:https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names[Kubernetes Docs] +|false + +|`optional` +|boolean +|Specify whether the Secret or its key must be defined +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindextlsconfig]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].tlsConfig + + +TLS Config to use for the URL. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|xref:#monitoringstackspecprometheusconfigremotewriteindextlsconfigca[`ca`] +|object +|Certificate authority used when verifying server certificates. +|false + +|`caFile` +|string +|Path to the CA cert in the Prometheus container to use for the targets. +|false + +|xref:#monitoringstackspecprometheusconfigremotewriteindextlsconfigcert[`cert`] +|object +|Client certificate to present when doing client-authentication. +|false + +|`certFile` +|string +|Path to the client cert file in the Prometheus container for the targets. +|false + +|`insecureSkipVerify` +|boolean +|Disable target certificate validation. +|false + +|`keyFile` +|string +|Path to the client key file in the Prometheus container for the targets. +|false + +|xref:#monitoringstackspecprometheusconfigremotewriteindextlsconfigkeysecret[`keySecret`] +|object +|Secret containing the client key file for the targets. +|false + +|`maxVersion` +|enum +|Maximum acceptable TLS version. + +It requires Prometheus >= v2.41.0. + +_Enum_: `TLS10`, `TLS11`, `TLS12`, `TLS13` +|false + +|`minVersion` +|enum +|Minimum acceptable TLS version. + +It requires Prometheus >= v2.35.0. + +_Enum_: `TLS10`, `TLS11`, `TLS12`, `TLS13` +|false + +|`serverName` +|string +|Used to verify the hostname for the targets. +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindextlsconfigca]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].tlsConfig.ca + + +Certificate authority used when verifying server certificates. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|xref:#monitoringstackspecprometheusconfigremotewriteindextlsconfigcaconfigmap[`configMap`] +|object +|ConfigMap containing data to use for the targets. +|false + +|xref:#monitoringstackspecprometheusconfigremotewriteindextlsconfigcasecret[`secret`] +|object +|Secret containing data to use for the targets. +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindextlsconfigcaconfigmap]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].tlsConfig.ca.configMap + +ConfigMap containing data to use for the targets. + +[cols="1,1,4,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|The key to select. +|true + +|`name` +|string +|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. + +For more information, see link:https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names[Kubernetes Docs] +|false + +|`optional` +|boolean +|Specify whether the ConfigMap or its key must be defined +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindextlsconfigcasecret]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].tlsConfig.ca.secret + + +Secret containing data to use for the targets. + +[cols="1,1,4,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|The key of the secret to select from. Must be a valid secret key. +|true + +|`name` +|string +|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. + +For more information, see link:https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names[Kubernetes Docs] +|false + +|`optional` +|boolean +|Specify whether the Secret or its key must be defined +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindextlsconfigcert]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].tlsConfig.cert + + +Client certificate to present when doing client-authentication. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|xref:#monitoringstackspecprometheusconfigremotewriteindextlsconfigcertconfigmap[`configMap`] +|object +|ConfigMap containing data to use for the targets. +|false + +|xref:#monitoringstackspecprometheusconfigremotewriteindextlsconfigcertsecret[`secret`] +|object +|Secret containing data to use for the targets. +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindextlsconfigcertconfigmap]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].tlsConfig.cert.configMap + + +ConfigMap containing data to use for the targets. + +[cols="1,1,4,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|The key to select. +|true + +|`name` +|string +|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. + +For more information, see link:https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names[Kubernetes Docs] +|false + +|`optional` +|boolean +|Specify whether the ConfigMap or its key must be defined +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindextlsconfigcertsecret]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].tlsConfig.cert.secret + + +Secret containing data to use for the targets. + +[cols="1,1,4,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|The key of the secret to select from. Must be a valid secret key. +|true + +|`name` +|string +|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. + +For more information, see link:https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names[Kubernetes Docs] +|false + +|`optional` +|boolean +|Specify whether the Secret or its key must be defined +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindextlsconfigkeysecret]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].tlsConfig.keySecret + + +Secret containing the client key file for the targets. + +[cols="1,1,4,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|The key of the secret to select from. Must be a valid secret key. +|true + +|`name` +|string +|Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. + +For more information, see link:https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names[Kubernetes Docs] +|false + +|`optional` +|boolean +|Specify whether the Secret or its key must be defined +|false +|=== + +[[monitoringstackspecprometheusconfigremotewriteindexwriterelabelconfigsindex]] +== MonitoringStack.spec.prometheusConfig.remoteWrite[index].writeRelabelConfigs[index] + + +RelabelConfig allows dynamic rewriting of the label set for targets, alerts, scraped samples and remote write samples. + +For more information, see link:https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config[Prometheus Docs] + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|`action` +|enum +a|Action to perform based on the regex matching. + +* `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. +* `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + +_Enum_: `replace`, `Replace`, `keep`, `Keep`, `drop`, `Drop`, `hashmod`, `HashMod`, `labelmap`, `LabelMap`, `labeldrop`, `LabelDrop`, `labelkeep`, `LabelKeep`, `lowercase`, `Lowercase`, `uppercase`, `Uppercase`, `keepequal`, `KeepEqual`, `dropequal`, `DropEqual` + +_Default_: `replace` +|false + +|`modulus` +|integer +|Modulus to take of the hash of the source label values. + +Only applicable when the action is `HashMod`. + +_Format_: int64 +|false + +|`regex` +|string +|Regular expression against which the extracted value is matched. +|false + +|`replacement` +|string +|Replacement value against which a Replace action is performed if the regular expression matches. + +Regex capture groups are available. +|false + +|`separator` +|string +|`Separator` is the string between concatenated `SourceLabels`. +|false + +|`sourceLabels` +|`[]string` +|The source labels select values from existing labels. Their content is concatenated using the configured Separator and matched against the configured regular expression. +|false + +|`targetLabel` +|string +|Label to which the resulting string is written in a replacement. + +It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, `KeepEqual` and `DropEqual` actions. + +Regex capture groups are available. +|false +|=== + +[[monitoringstackspecprometheusconfigwebtlsconfig]] +== MonitoringStack.spec.prometheusConfig.webTLSConfig + + +Configure TLS options for the Prometheus web server. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|xref:#monitoringstackspecprometheusconfigwebtlsconfigcertificate[`certificate`] +|object +|Reference to the TLS public certificate for the web server. +|true + +|xref:#monitoringstackspecprometheusconfigwebtlsconfigcertificateauthority[`certificateAuthority`] +|object +|Reference to the root Certificate Authority used to verify the web server's certificate. +|true + +|xref:#monitoringstackspecprometheusconfigwebtlsconfigprivatekey[`privateKey`] +|object +|Reference to the TLS private key for the web server. +|true +|=== + +[[monitoringstackspecprometheusconfigwebtlsconfigcertificate]] +== MonitoringStack.spec.prometheusConfig.webTLSConfig.certificate + +Reference to the TLS public certificate for the web server. + +[cols="1,1,3,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|The key of the secret to select from. Must be a valid secret key. +|true + +|`name` +|string +|The name of the secret in the object's namespace to select from. +|true +|=== + +[[monitoringstackspecprometheusconfigwebtlsconfigcertificateauthority]] +== MonitoringStack.spec.prometheusConfig.webTLSConfig.certificateAuthority + + +Reference to the root Certificate Authority used to verify the web server's certificate. + +[cols="1,1,3,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|The key of the secret to select from. Must be a valid secret key. +|true + +|`name` +|string +|The name of the secret in the object's namespace to select from. +|true +|=== + +[[monitoringstackspecprometheusconfigwebtlsconfigprivatekey]] +== MonitoringStack.spec.prometheusConfig.webTLSConfig.privateKey + + +Reference to the TLS private key for the web server. + +[cols="1,1,3,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|The key of the secret to select from. Must be a valid secret key. +|true + +|`name` +|string +|The name of the secret in the object's namespace to select from. +|true +|=== + +[[monitoringstackspecresourceselector]] +== MonitoringStack.spec.resourceSelector + + +Label selector for `MonitoringStack` resources. + +* To monitor everything, set to empty map selector. For example, `resourceSelector: {}`. + +* To disable service discovery, set to null. For example, `resourceSelector:`. + +[cols="2,2,4,1"] +|=== +|Name |Type |Description |Required + +|xref:#monitoringstackspecresourceselectormatchexpressionsindex[`matchExpressions`] +|`[]object` +|`matchExpressions` is a list of label selector requirements. The requirements are ANDed. +|false + +|`matchLabels` +|`map[string]string` +|`matchLabels` is a map of {key,value} pairs. A single {key,value} in the `matchLabels` map is equivalent to an element of `matchExpressions`, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. +|false +|=== + +[[monitoringstackspecresourceselectormatchexpressionsindex]] +== MonitoringStack.spec.resourceSelector.matchExpressions[index] + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + +[cols="1,1,4,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|`key` is the label key that the selector applies to. +|true + +|`operator` +|string +|`operator` represents a key's relationship to a set of values. Valid operators are `In`, `NotIn`, `Exists` and `DoesNotExist`. +|true + +|`values` +|`[]string` +|`values` is an array of string values. If the operator is `In` or `NotIn`, the values array must be non-empty. If the operator is `Exists` or `DoesNotExist`, the values array must be empty. This array is replaced during a strategic merge patch. +|false +|=== + +[[monitoringstackspecresources]] +== MonitoringStack.spec.resources + + +Define resources requests and limits for `MonitoringStack` pods. + +[cols="2,2,4,1"] +|=== +|Name |Type |Description |Required + +|xref:#monitoringstackspecresourcesclaimsindex[`claims`] +|`[]object` +|`Claims` lists the names of resources, defined in `spec.resourceClaims`, that are used by this container. + +This is an alpha field and requires enabling the `DynamicResourceAllocation` feature gate. + +This field is immutable. It can only be set for containers. +|false + +|`limits` +|`map[string]int or string` +|`Limits` describes the maximum amount of compute resources allowed. + +For more information, see link:https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/[Kubernetes Docs] +|false + +|`requests` +|`map[string]int or string` +|`Requests` describes the minimum amount of compute resources required. If `Requests` is omitted for a container, it defaults to `Limits` if that is explicitly specified, otherwise to an implementation-defined value. `Requests` cannot exceed `Limits`. + +For more information, see link:https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/[Kubernetes Docs] +|false +|=== + +[[monitoringstackspecresourcesclaimsindex]] +== MonitoringStack.spec.resources.claims[index] + + +`ResourceClaim` references one entry in `PodSpec.ResourceClaims`. + +[cols="1,1,4,1"] +|=== +|Name |Type |Description |Required + +|`name` +|string +|`Name` must match the name of one entry in `pod.spec.resourceClaims` of the Pod where this field is used. It makes that resource available inside a container. +|true + +|`request` +|string +|`Request` is the name chosen for a request in the referenced claim. If empty, everything from the claim is made available, otherwise only the result of this request. +|false +|=== + +[[monitoringstackspectolerationsindex]] +== MonitoringStack.spec.tolerations[index] + + +The pod this `Toleration` is attached to tolerates any taint that matches the triple `` using the matching operator ``. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|`effect` +|string +|`Effect` indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are `NoSchedule`, `PreferNoSchedule` and `NoExecute`. +|false + +|`key` +|string +|`Key` is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be `Exists`; this combination means to match all values and all keys. +|false + +|`operator` +|string +|`Operator` represents a key's relationship to the value. Valid operators are `Exists` and `Equal`. Defaults to `Equal`. `Exists` is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. +|false + +|`tolerationSeconds` +|integer +|`TolerationSeconds` represents the period of time the toleration (which must be of effect `NoExecute`, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + +_Format_: int64 +|false + +|`value` +|string +|`Value` is the taint value the toleration matches to. If the operator is `Exists`, the value should be empty, otherwise just a regular string. +|false +|=== + +[[monitoringstackstatus]] +== MonitoringStack.status + + +`MonitoringStackStatus` defines the observed state of the `MonitoringStack` instance. +It should always be reconstructable from the state of the cluster and/or outside world. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|xref:#monitoringstackstatusconditionsindex[`conditions`] +|`[]object` +|`Conditions` provide status information about the `MonitoringStack` instance. +|true +|=== + +[[monitoringstackstatusconditionsindex]] +== MonitoringStack.status.conditions[index] + + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|`lastTransitionTime` +|string +|`lastTransitionTime` is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + +_Format_: date-time +|true + +|`message` +|string +|`message` is a human readable message indicating details about the transition. This may be an empty string. +|true + +|`reason` +|string +|`reason` contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. +|true + +|`status` +|enum +|status of the condition + +_Enum_: `True`, `False`, `Unknown`, `Degraded` +|true + +|`type` +|string +|`type` of condition in CamelCase or in `foo.example.com/CamelCase`. The regex it matches is `(dns1123SubdomainFmt/)?(qualifiedNameFmt)` +|true + +|`observedGeneration` +|integer +|`observedGeneration` represents the `.metadata.generation` that the condition was set based upon. For instance, if `.metadata.generation` is currently 12, but the `.status.conditions[x].observedGeneration` is 9, the condition is out of date with respect to the current state of the instance. + +_Format_: int64 + +_Minimum_: 0 +|false +|=== + +[[thanosquerier]] +== ThanosQuerier + +ThanosQuerier outlines the Thanos querier components, managed by this stack + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|`apiVersion` +|string +|`monitoring.rhobs/v1alpha1` +|true + +|`kind` +|string +|`ThanosQuerier` +|true + +|link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#objectmeta-v1-meta[`metadata`] +|object +|Refer to the Kubernetes API documentation for the fields of the `metadata` field. +|true + +|xref:#thanosquerierspec[`spec`] +|object +|`ThanosQuerierSpec` defines a single Thanos Querier instance. This means a label selector by which `MonitoringStack` instances to query are selected, and an optional namespace selector and a list of replica labels by which to deduplicate. +|false + +|`status` +|object +|`ThanosQuerierStatus` defines the observed state of ThanosQuerier. It should always be reconstructable from the state of the cluster and/or outside world. +|false +|=== + +[[thanosquerierspec]] +== ThanosQuerier.spec + +`ThanosQuerierSpec` defines a single Thanos Querier instance. This means a label selector by which `MonitoringStack` instances to query are selected, and an optional namespace selector and a list of replica labels by which to deduplicate. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|xref:#thanosquerierspecselector[`selector`] +|object +|Selector to select monitoring stacks to unify. +|true + +|xref:#thanosquerierspecnamespaceselector[`namespaceSelector`] +|object +|Selector to select which namespaces the `MonitoringStack` objects are discovered from. +|false + +|`replicaLabels` +|`[]string` +| +|false + +|xref:#thanosquerierspecwebtlsconfig[`webTLSConfig`] +|object +|Configure TLS options for the Thanos web server. +|false +|=== + +[[thanosquerierspecselector]] +== ThanosQuerier.spec.selector + +Selector to select monitoring stacks to unify. + +[cols="2,2,4,1"] +|=== +|Name |Type |Description |Required + +|xref:#thanosquerierspecselectormatchexpressionsindex[`matchExpressions`] +|`[]object` +|`matchExpressions` is a list of label selector requirements. The requirements are ANDed. +|false + +|`matchLabels` +|`map[string]string` +|`matchLabels` is a map of {key,value} pairs. A single {key,value} in the `matchLabels` map is equivalent to an element of `matchExpressions`, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. +|false +|=== + +[[thanosquerierspecselectormatchexpressionsindex]] +== ThanosQuerier.spec.selector.matchExpressions[index] + + +A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + +[cols="1,1,4,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|`key` is the label key that the selector applies to. +|true + +|`operator` +|string +|`operator` represents a key's relationship to a set of values. Valid operators are `In`, `NotIn`, `Exists` and `DoesNotExist`. +|true + +|`values` +|`[]string` +|`values` is an array of string values. If the operator is `In` or `NotIn`, the values array must be non-empty. If the operator is `Exists` or `DoesNotExist`, the values array must be empty. This array is replaced during a strategic merge patch. +|false +|=== + +[[thanosquerierspecnamespaceselector]] +== ThanosQuerier.spec.namespaceSelector + + +Selector to select which namespaces the `MonitoringStack` objects are discovered from. + +[cols="1,1,4,1"] +|=== +|Name |Type |Description |Required + +|`any` +|boolean +|Boolean describing whether all namespaces are selected in contrast to a list restricting them. +|false + +|`matchNames` +|`[]string` +|List of namespace names. +|false +|=== + +[[thanosquerierspecwebtlsconfig]] +== ThanosQuerier.spec.webTLSConfig + + +Configure TLS options for the Thanos web server. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|xref:#thanosquerierspecwebtlsconfigcertificate[`certificate`] +|object +|Reference to the TLS public certificate for the web server. +|true + +|xref:#thanosquerierspecwebtlsconfigcertificateauthority[`certificateAuthority`] +|object +|Reference to the root Certificate Authority used to verify the web server's certificate. +|true + +|xref:#thanosquerierspecwebtlsconfigprivatekey[`privateKey`] +|object +|Reference to the TLS private key for the web server. +|true +|=== + +[[thanosquerierspecwebtlsconfigcertificate]] +== ThanosQuerier.spec.webTLSConfig.certificate + + +Reference to the TLS public certificate for the web server. + +[cols="1,1,3,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|The key of the secret to select from. Must be a valid secret key. +|true + +|`name` +|string +|The name of the secret in the object's namespace to select from. +|true +|=== + +[[thanosquerierspecwebtlsconfigcertificateauthority]] +== ThanosQuerier.spec.webTLSConfig.certificateAuthority + + +Reference to the root Certificate Authority used to verify the web server's certificate. + +[cols="1,1,3,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|The key of the secret to select from. Must be a valid secret key. +|true + +|`name` +|string +|The name of the secret in the object's namespace to select from. +|true +|=== + +[[thanosquerierspecwebtlsconfigprivatekey]] +== ThanosQuerier.spec.webTLSConfig.privateKey + + +Reference to the TLS private key for the web server. + +[cols="1,1,3,1"] +|=== +|Name |Type |Description |Required + +|`key` +|string +|The key of the secret to select from. Must be a valid secret key. +|true + +|`name` +|string +|The name of the secret in the object's namespace to select from. +|true +|=== diff --git a/api/api-observability-package.adoc b/api/api-observability-package.adoc new file mode 100644 index 000000000000..3f1a8fd6b552 --- /dev/null +++ b/api/api-observability-package.adoc @@ -0,0 +1,637 @@ +:_mod-docs-content-type: ASSEMBLY +[id="api-observability-package"] += observability.openshift.io/v1alpha1 +include::_attributes/common-attributes.adoc[] +:context: api-observability-package + +toc::[] + +The resource types are xref:#clusterobservability[`ClusterObservability`] and xref:#uiplugin[`UIPlugin`]. + +[[clusterobservability]] +== ClusterObservability + +ClusterObservability defines the desired state of the observability stack. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|`apiVersion` +|string +|`observability.openshift.io/v1alpha1` +|true + +|`kind` +|string +|`ClusterObservability` +|true + +|link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#objectmeta-v1-meta[`metadata`] +|object +|Refer to the Kubernetes API documentation for the fields of the `metadata` field. +|true + +|xref:#clusterobservabilityspec[`spec`] +|object +|`Spec` defines the desired state of the cluster observability. +|false + +|`status` +|object +|Status of the signal manager. +|false +|=== + +[[clusterobservabilityspec]] +== ClusterObservability.spec + + +`Spec` defines the desired state of the cluster observability. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|xref:#clusterobservabilityspeccapabilities[`capabilities`] +|object +|`Capabilities` defines the observability capabilities. Each capability has to be enabled explicitly. +|false + +|xref:#clusterobservabilityspecstorage[`storage`] +|object +|`Storage` defines the storage for the capabilities that require a storage. +|false +|=== + +[[clusterobservabilityspeccapabilities]] +== ClusterObservability.spec.capabilities + + +`Capabilities` defines the observability capabilities. Each capability has to be enabled explicitly. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|xref:#clusterobservabilityspeccapabilitiesopentelemetry[`opentelemetry`] +|object +|`OpenTelemetry` defines the OpenTelemetry capabilities. +|false + +|xref:#clusterobservabilityspeccapabilitiestracing[`tracing`] +|object +|`Tracing` defines the tracing capabilities. +|false +|=== + +[[clusterobservabilityspeccapabilitiesopentelemetry]] +== ClusterObservability.spec.capabilities.opentelemetry + + +`OpenTelemetry` defines the OpenTelemetry capabilities. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|`enabled` +|boolean +|`Enabled` indicates whether the capability is enabled and it operator should deploy an instance. By default, it is set to false. + +_Default_: false +|false + +|xref:#clusterobservabilityspeccapabilitiesopentelemetryexporter[`exporter`] +|object +|`Exporter` defines the OpenTelemetry exporter configuration. When defined the collector will export telemetry data to the specified endpoint. +|false + +|xref:#clusterobservabilityspeccapabilitiesopentelemetryoperators[`operators`] +|object +|`Operators` defines the operators installation for the capability. +|false +|=== + +[[clusterobservabilityspeccapabilitiesopentelemetryexporter]] +== ClusterObservability.spec.capabilities.opentelemetry.exporter + +`Exporter` defines the OpenTelemetry exporter configuration. When defined the collector will export telemetry data to the specified endpoint. + +[cols="1,1,3,1"] +|=== +|Name |Type |Description |Required + +|`endpoint` +|string +|`Endpoint` is the OTLP endpoint. +|false +|=== + +[[clusterobservabilityspeccapabilitiesopentelemetryoperators]] +== ClusterObservability.spec.capabilities.opentelemetry.operators + + +`Operators` defines the operators installation for the capability. + +[cols="1,1,3,1"] +|=== +|Name |Type |Description |Required + +|`install` +|boolean +|`Install` indicates whether the operator(s) used by the capability should be installed via OLM. When the capability is enabled, the install is set to true, otherwise it is set to false. +|false +|=== + +[[clusterobservabilityspeccapabilitiestracing]] +== ClusterObservability.spec.capabilities.tracing + + +`Tracing` defines the tracing capabilities. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|`enabled` +|boolean +|`Enabled` indicates whether the capability is enabled and it operator should deploy an instance. By default, it is set to false. + +_Default_: false +|false + +|xref:#clusterobservabilityspeccapabilitiestracingoperators[`operators`] +|object +|`Operators` defines the operators installation for the capability. +|false +|=== + +[[clusterobservabilityspeccapabilitiestracingoperators]] +== ClusterObservability.spec.capabilities.tracing.operators + + +`Operators` defines the operators installation for the capability. + +[cols="1,1,3,1"] +|=== +|Name |Type |Description |Required + +|`install` +|boolean +|`Install` indicates whether the operator(s) used by the capability should be installed via OLM. When the capability is enabled, the install is set to true, otherwise it is set to false. +|false +|=== + +[[clusterobservabilityspecstorage]] +== ClusterObservability.spec.storage + + +`Storage` defines the storage for the capabilities that require a storage. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|xref:#clusterobservabilityspecstoragesecret[`secret`] +|object +|`SecretSpec` defines the secret for the storage. +|false +|=== + +[[clusterobservabilityspecstoragesecret]] +== ClusterObservability.spec.storage.secret + + +`SecretSpec` defines the secret for the storage. + +[cols="1,1,3,1"] +|=== +|Name |Type |Description |Required + +|`name` +|string +|`Name` is the name of the secret for the storage. +|false +|=== + +[[uiplugin]] +== UIPlugin + + +UIPlugin defines an observability console plugin. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|`apiVersion` +|string +|`observability.openshift.io/v1alpha1` +|true + +|`kind` +|string +|`UIPlugin` +|true + +|link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#objectmeta-v1-meta[`metadata`] +|object +|Refer to the Kubernetes API documentation for the fields of the `metadata` field. +|true + +|xref:#uipluginspec[`spec`] +|object +|`UIPluginSpec` is the specification for desired state of UIPlugin. +|false + +|xref:#uipluginstatus[`status`] +|object +|`UIPluginStatus` defines the observed state of UIPlugin. It should always be reconstructable from the state of the cluster and/or outside world. +|false +|=== + +[[uipluginspec]] +== UIPlugin.spec + +`UIPluginSpec` is the specification for desired state of UIPlugin. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|`type` +|enum +|Type defines the UI plugin. + +_Enum_: `Dashboards`, `TroubleshootingPanel`, `DistributedTracing`, `Logging`, `Monitoring` +|true + +|xref:#uipluginspecdeployment[`deployment`] +|object +|`Deployment` allows customizing aspects of the generated deployment hosting the UI Plugin. +|false + +|xref:#uipluginspecdistributedtracing[`distributedTracing`] +|object +|`DistributedTracing` contains configuration for the distributed tracing console plugin. +|false + +|xref:#uipluginspeclogging[`logging`] +|object +|`Logging` contains configuration for the logging console plugin. + +It only applies to UIPlugin Type: `Logging`. +|false + +|xref:#uipluginspecmonitoring[`monitoring`] +|object +|`Monitoring` contains configuration for the monitoring console plugin. +|false + +|xref:#uipluginspectroubleshootingpanel[`troubleshootingPanel`] +|object +|`TroubleshootingPanel` contains configuration for the troubleshooting console plugin. +|false +|=== + +[[uipluginspecdeployment]] +== UIPlugin.spec.deployment + + +`Deployment` allows customizing aspects of the generated deployment hosting the UI Plugin. + +[cols="2,2,4,1"] +|=== +|Name |Type |Description |Required + +|`nodeSelector` +|`map[string]string` +|Define a label-selector for nodes which the Pods should be scheduled on. + +When no selector is specified it will default to a value only selecting Linux nodes (`"kubernetes.io/os=linux"`). +|false + +|xref:#uipluginspecdeploymenttolerationsindex[`tolerations`] +|`[]object` +|Define the tolerations used for the deployment. +|false +|=== + +[[uipluginspecdeploymenttolerationsindex]] +== UIPlugin.spec.deployment.tolerations[index] + + +The pod this `Toleration` is attached to tolerates any taint that matches the triple `` using the matching operator ``. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|`effect` +|string +|`Effect` indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are `NoSchedule`, `PreferNoSchedule` and `NoExecute`. +|false + +|`key` +|string +|`Key` is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be `Exists`; this combination means to match all values and all keys. +|false + +|`operator` +|string +|`Operator` represents a key's relationship to the value. Valid operators are `Exists` and `Equal`. Defaults to `Equal`. `Exists` is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. +|false + +|`tolerationSeconds` +|integer +|`TolerationSeconds` represents the period of time the toleration (which must be of effect `NoExecute`, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + +_Format_: int64 +|false + +|`value` +|string +|`Value` is the taint value the toleration matches to. If the operator is `Exists`, the value should be empty, otherwise just a regular string. +|false +|=== + +[[uipluginspecdistributedtracing]] +== UIPlugin.spec.distributedTracing + + +`DistributedTracing` contains configuration for the distributed tracing console plugin. + +[cols="1,1,4,1"] +|=== +|Name |Type |Description |Required + +|`timeout` +|string +|`Timeout` is the maximum duration before a query timeout. + +The value is expected to be a sequence of digits followed by a unit suffix, which can be 's' (seconds) or 'm' (minutes). +|false +|=== + +[[uipluginspeclogging]] +== UIPlugin.spec.logging + + +`Logging` contains configuration for the logging console plugin. + +* It only applies to UIPlugin Type: `Logging`. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|`logsLimit` +|integer +|`LogsLimit` is the max number of entries returned for a query. + +_Format_: int32 + +_Minimum_: 0 +|false + +|xref:#uipluginspeclogginglokistack[`lokiStack`] +|object +|`LokiStack` points to the `LokiStack` instance of which logs should be displayed. It always references a `LokiStack` in the "openshift-logging" namespace. +|false + +|`schema` +|enum +|`Schema` is the schema to use for logs querying and display. + +Defaults to "viaq" if not specified. + +_Enum_: `viaq`, `otel`, `select` + +_Default_: `viaq` +|false + +|`timeout` +|string +|`Timeout` is the maximum duration before a query timeout. + +The value is expected to be a sequence of digits followed by an optional unit suffix, which can be 's' (seconds) or 'm' (minutes). If the unit is omitted, it defaults to seconds. +|false +|=== + +[[uipluginspeclogginglokistack]] +== UIPlugin.spec.logging.lokiStack + + +`LokiStack` points to the LokiStack instance of which logs should be displayed. It always references a LokiStack in the "openshift-logging" namespace. + +[cols="1,1,4,1"] +|=== +|Name |Type |Description |Required + +|`name` +|string +|Name of the `LokiStack` resource. +|false + +|`namespace` +|string +| +|false +|=== + +[[uipluginspecmonitoring]] +== UIPlugin.spec.monitoring + + +`Monitoring` contains configuration for the monitoring console plugin. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|xref:#uipluginspecmonitoringacm[`acm`] +|object +|`ACM` points to the alertmanager and thanosQuerier instance services of which it should create a proxy to. +|false + +|xref:#uipluginspecmonitoringincidents[`incidents`] +|object +|`Incidents` feature flag enablement +|false + +|xref:#uipluginspecmonitoringperses[`perses`] +|object +|`Perses` points to the perses instance service of which it should create a proxy to. +|false +|=== + +[[uipluginspecmonitoringacm]] +== UIPlugin.spec.monitoring.acm + + +`ACM` points to the alertmanager and thanosQuerier instance services of which it should create a proxy to. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|xref:#uipluginspecmonitoringacmalertmanager[`alertmanager`] +|object +|`Alertmanager` points to the alertmanager instance of which it should create a proxy to. +|true + +|`enabled` +|boolean +|Indicates if ACM-related feature(s) should be enabled +|true + +|xref:#uipluginspecmonitoringacmthanosquerier[`thanosQuerier`] +|object +|`ThanosQuerier` points to the thanos-querier service of which it should create a proxy to. +|true +|=== + +[[uipluginspecmonitoringacmalertmanager]] +== UIPlugin.spec.monitoring.acm.alertmanager + +`Alertmanager` points to the alertmanager instance of which it should create a proxy to. + +[cols="1,1,3,1"] +|=== +|Name |Type |Description |Required + +|`url` +|string +|Url of the Alertmanager to proxy to. +|true +|=== + +[[uipluginspecmonitoringacmthanosquerier]] +== UIPlugin.spec.monitoring.acm.thanosQuerier + + +`ThanosQuerier` points to the thanos-querier service of which it should create a proxy to. + +[cols="1,1,3,1"] +|=== +|Name |Type |Description |Required + +|`url` +|string +|Url of the ThanosQuerier to proxy to. +|true +|=== + +[[uipluginspecmonitoringincidents]] +== UIPlugin.spec.monitoring.incidents + + +`Incidents` feature flag enablement + +[cols="1,1,3,1"] +|=== +|Name |Type |Description |Required + +|`enabled` +|boolean +|Indicates if incidents-related feature(s) should be enabled. +|true +|=== + +[[uipluginspecmonitoringperses]] +== UIPlugin.spec.monitoring.perses + + +`Perses` points to the perses instance service of which it should create a proxy to. + +[cols="1,1,3,1"] +|=== +|Name |Type |Description |Required + +|`enabled` +|boolean +|Indicates if perses-related feature(s) should be enabled +|true +|=== + +[[uipluginspectroubleshootingpanel]] +== UIPlugin.spec.troubleshootingPanel + + +`TroubleshootingPanel` contains configuration for the troubleshooting console plugin. + +[cols="1,1,4,1"] +|=== +|Name |Type |Description |Required + +|`timeout` +|string +|`Timeout` is the maximum duration before a query timeout. + +The value is expected to be a sequence of digits followed by a unit suffix, which can be 's' (seconds) or 'm' (minutes). +|false +|=== + +[[uipluginstatus]] +== UIPlugin.status + + +`UIPluginStatus` defines the observed state of UIPlugin. It should always be reconstructable from the state of the cluster and/or outside world. + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|xref:#uipluginstatusconditionsindex[`conditions`] +|`[]object` +|`Conditions` provide status information about the plugin. +|true +|=== + +[[uipluginstatusconditionsindex]] +== UIPlugin.status.conditions[index] + + +[cols="2,1,4,1"] +|=== +|Name |Type |Description |Required + +|`lastTransitionTime` +|string +|`lastTransitionTime` is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + +_Format_: date-time +|true + +|`message` +|string +|`message` is a human readable message indicating details about the transition. This may be an empty string. +|true + +|`reason` +|string +|`reason` contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. +|true + +|`status` +|enum +|status of the condition + +_Enum_: `True`, `False`, `Unknown`, `Degraded` +|true + +|`type` +|string +|`type` of condition in CamelCase or in `foo.example.com/CamelCase`. The regex it matches is `(dns1123SubdomainFmt/)?(qualifiedNameFmt)` +|true + +|`observedGeneration` +|integer +|`observedGeneration` represents the `.metadata.generation` that the condition was set based upon. For instance, if `.metadata.generation` is currently 12, but the `.status.conditions[x].observedGeneration` is 9, the condition is out of date with respect to the current state of the instance. + +_Format_: int64 + +_Minimum_: 0 +|false +|=== \ No newline at end of file diff --git a/api/docinfo.xml b/api/docinfo.xml new file mode 100644 index 000000000000..ebf2edf50114 --- /dev/null +++ b/api/docinfo.xml @@ -0,0 +1,12 @@ +API reference for Red Hat OpenShift Cluster Observability Operator +{product-title} +{product-version} +Monitoring API package. + + This document provides an overview of the Monitoring API package. + + + + Red Hat OpenShift Documentation Team + + diff --git a/cloud_experts_tutorials/cloud-experts-deploying-application/images b/api/images similarity index 100% rename from cloud_experts_tutorials/cloud-experts-deploying-application/images rename to api/images diff --git a/cloud_providers/modules b/api/modules similarity index 100% rename from cloud_providers/modules rename to api/modules diff --git a/cloud_experts_tutorials/cloud-experts-deploying-application/snippets b/api/snippets similarity index 100% rename from cloud_experts_tutorials/cloud-experts-deploying-application/snippets rename to api/snippets diff --git a/applications/PLACEHOLDER b/applications/PLACEHOLDER deleted file mode 100644 index 985a0e1895b7..000000000000 --- a/applications/PLACEHOLDER +++ /dev/null @@ -1,2 +0,0 @@ -Please leave this file until after Node PRs merge, as is it needed for the topic_yaml. Subtopics are not allowed, apparently, without at least one topic in the TOC - diff --git a/applications/application-health.adoc b/applications/application-health.adoc deleted file mode 100644 index 42fefe17fdf1..000000000000 --- a/applications/application-health.adoc +++ /dev/null @@ -1,38 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -:context: application-health -[id="application-health"] -= Monitoring application health by using health checks -include::_attributes/common-attributes.adoc[] - -toc::[] - - -In software systems, components can become unhealthy due to transient issues such as temporary connectivity loss, configuration errors, or problems with external dependencies. {product-title} applications have a number of options to detect and handle unhealthy containers. - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - - -include::modules/application-health-about.adoc[leveloffset=+1] - -include::modules/application-health-configuring.adoc[leveloffset=+1] - -include::modules/odc-monitoring-application-health-using-developer-perspective.adoc[leveloffset=+1] - -// cannot add health checks in web console -ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -include::modules/odc-adding-health-checks.adoc[leveloffset=+1] -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] - -include::modules/odc-editing-health-checks.adoc[leveloffset=+1] - -include::modules/odc-monitoring-health-checks.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -* For details on switching to the *Developer* perspective in the web console, see xref:../web_console/web-console-overview.adoc#about-developer-perspective_web-console-overview[About the *Developer* perspective]. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -* For details on adding health checks while creating and deploying an application, see *Advanced Options* in the xref:../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[Creating applications using the Developer perspective] section. diff --git a/applications/config-maps.adoc b/applications/config-maps.adoc deleted file mode 100644 index 24c58954e1ad..000000000000 --- a/applications/config-maps.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="config-maps"] -= Using config maps with applications -include::_attributes/common-attributes.adoc[] -:context: config-maps - -toc::[] - -Config maps allow you to decouple configuration artifacts from image content to keep containerized applications portable. - -The following sections define config maps and how to create and use them. - -include::modules/nodes-pods-configmap-overview.adoc[leveloffset=+1] - -.Additional resources - -* xref:../nodes/pods/nodes-pods-configmaps.adoc[Creating and using config maps] - -[id="nodes-pods-config-maps-consuming-configmap-in-pods"] -== Use cases: Consuming config maps in pods - -The following sections describe some uses cases when consuming `ConfigMap` -objects in pods. - -include::modules/nodes-pods-configmaps-use-case-consuming-in-env-vars.adoc[leveloffset=+2] - -include::modules/nodes-pods-configmaps-use-case-setting-command-line-arguments.adoc[leveloffset=+2] - -include::modules/nodes-pods-configmaps-use-case-consuming-in-volumes.adoc[leveloffset=+2] diff --git a/applications/connecting_applications_to_services/_attributes b/applications/connecting_applications_to_services/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/applications/connecting_applications_to_services/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/applications/connecting_applications_to_services/binding-workloads-using-sbo.adoc b/applications/connecting_applications_to_services/binding-workloads-using-sbo.adoc deleted file mode 100644 index 8c00878a9e2b..000000000000 --- a/applications/connecting_applications_to_services/binding-workloads-using-sbo.adoc +++ /dev/null @@ -1,50 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="binding-workloads-using-sbo"] -= Binding workloads using Service Binding Operator -include::_attributes/common-attributes.adoc[] -include::_attributes/servicebinding-document-attributes.adoc[] -:context: binding-workloads-using-sbo - -toc::[] - -Application developers must bind a workload to one or more backing services by using a binding secret. This secret is generated for the purpose of storing information to be consumed by the workload. - -As an example, consider that the service you want to connect to is already exposing the binding data. In this case, you would also need a workload to be used along with the `ServiceBinding` custom resource (CR). By using this `ServiceBinding` CR, the workload sends a binding request with the details of the services to bind with. - -.Example of `ServiceBinding` CR -[source,yaml] ----- -apiVersion: binding.operators.coreos.com/v1alpha1 -kind: ServiceBinding -metadata: - name: spring-petclinic-pgcluster - namespace: my-petclinic -spec: - services: <1> - - group: postgres-operator.crunchydata.com - version: v1beta1 - kind: PostgresCluster - name: hippo - application: <2> - name: spring-petclinic - group: apps - version: v1 - resource: deployments ----- -<1> Specifies a list of service resources. -<2> The sample application that points to a Deployment or any other similar resource with an embedded PodSpec. - -As shown in the previous example, you can also directly use a `ConfigMap` or a `Secret` itself as a service resource to be used as a source of binding data. - -include::modules/sbo-naming-strategies.adoc[leveloffset=+1] -include::modules/sbo-advanced-binding-options.adoc[leveloffset=+1] -include::modules/sbo-binding-workloads-that-are-not-compliant-with-PodSpec.adoc[leveloffset=+1] -include::modules/sbo-unbinding-workloads-from-a-backing-service.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_binding-workloads-sbo"] -== Additional resources -* xref:../../applications/connecting_applications_to_services/understanding-service-binding-operator.adoc#binding-a-workload-together-with-a-backing-service_understanding-service-binding-operator[Binding a workload together with a backing service]. -* xref:../../applications/connecting_applications_to_services/getting-started-with-service-binding.adoc#sbo-connecting-spring-petclinic-sample-app-to-postgresql-database-service_getting-started-with-service-binding[Connecting the Spring PetClinic sample application to the PostgreSQL database service]. -* xref:../../operators/understanding/crds/crd-managing-resources-from-crds.adoc#crd-creating-custom-resources-from-file_crd-managing-resources-from-crds[Creating custom resources from a file] -* link:https://redhat-developer.github.io/service-binding-operator/userguide/binding-workloads-using-sbo/custom-path-injection.html#_workload_resource_mapping[Example schema of the ClusterWorkloadResourceMapping resource]. diff --git a/applications/connecting_applications_to_services/exposing-binding-data-from-a-service.adoc b/applications/connecting_applications_to_services/exposing-binding-data-from-a-service.adoc deleted file mode 100644 index 4bc0cf51d69f..000000000000 --- a/applications/connecting_applications_to_services/exposing-binding-data-from-a-service.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="exposing-binding-data-from-a-service"] -= Exposing binding data from a service -include::_attributes/common-attributes.adoc[] -include::_attributes/servicebinding-document-attributes.adoc[] -:context: exposing-binding-data-from-a-service - -toc::[] - -[role="_abstract"] -Application developers need access to backing services to build and connect workloads. Connecting workloads to backing services is always a challenge because each service provider requires a different way to access their secrets and consume them in a workload. - -The {servicebinding-title} enables application developers to easily bind workloads together with operator-managed backing services, without any manual procedures to configure the binding connection. For the {servicebinding-title} to provide the binding data, as an Operator provider or user who creates backing services, you must expose the binding data to be automatically detected by the {servicebinding-title}. Then, the {servicebinding-title} automatically collects the binding data from the backing service and shares it with a workload to provide a consistent and predictable experience. - -include::modules/sbo-methods-of-exposing-binding-data.adoc[leveloffset=+1] -include::modules/sbo-data-model.adoc[leveloffset=+1] -include::modules/sbo-setting-annotations-mapping-optional.adoc[leveloffset=+1] -include::modules/sbo-rbac-requirements.adoc[leveloffset=+1] -include::modules/sbo-categories-of-exposable-binding-data.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_exposing-binding-data"] -== Additional resources -// * link:https://github.com/openshift/console/blob/master/frontend/packages/operator-lifecycle-manager/src/components/descriptors/reference/reference.md[OLM Descriptor Reference]. -// When OLM descriptors are supported again, add this additional resource. -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -* xref:../../operators/operator_sdk/osdk-generating-csvs.adoc#osdk-generating-csvs[Defining cluster service versions (CSVs)]. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -* xref:../../applications/connecting_applications_to_services/projecting-binding-data.adoc#projecting-binding-data[Projecting binding data]. diff --git a/applications/connecting_applications_to_services/getting-started-with-service-binding-ibm-power-ibm-z.adoc b/applications/connecting_applications_to_services/getting-started-with-service-binding-ibm-power-ibm-z.adoc deleted file mode 100644 index bd200f551a1d..000000000000 --- a/applications/connecting_applications_to_services/getting-started-with-service-binding-ibm-power-ibm-z.adoc +++ /dev/null @@ -1,38 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="getting-started-with-service-binding-ibm-power-ibm-z"] -= Getting started with service binding on {ibm-power-title}, {ibm-z-title}, and {ibm-linuxone-title} -include::_attributes/common-attributes.adoc[] -include::_attributes/servicebinding-document-attributes.adoc[] -:context: getting-started-with-service-binding-ibm-power-ibm-z - -toc::[] - -[role="_abstract"] -The {servicebinding-title} manages the data plane for workloads and backing services. This guide provides instructions with examples to help you create a database instance, deploy an application, and use the {servicebinding-title} to create a binding connection between the application and the database service. - -// Prerequisites for getting started with Service Binding Operator -[discrete] -== Prerequisites - -* You have access to an {product-title} cluster using an account with `cluster-admin` permissions. -* You have installed the `oc` CLI. -* You have installed the {servicebinding-title} from OperatorHub. - -//Deploying PostgreSQL operator -include::modules/sbo-deploying-a-postgresql-database-operator-power-z.adoc[leveloffset=+1] - -//Creating a PostgreSQL database instance -include::modules/sbo-creating-a-postgresql-database-instance-power-z.adoc[leveloffset=+1] - -//Deploying the Spring PetClinic sample application -include::modules/sbo-deploying-the-spring-petclinic-sample-application-power-z.adoc[leveloffset=+1] - -//Connecting the Spring PetClinic sample application to the PostgreSQL database service -include::modules/sbo-connecting-spring-petclinic-sample-app-to-postgresql-database-service-power-z.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_getting-started-with-service-binding-ibm-power-ibm-z"] -== Additional resources -* xref:../../applications/connecting_applications_to_services/installing-sbo.adoc#installing-sbo[Installing Service Binding Operator] -* xref:../../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[Creating applications using the Developer perspective] -* xref:../../operators/understanding/crds/crd-managing-resources-from-crds.adoc[Managing resources from custom resource definitions] diff --git a/applications/connecting_applications_to_services/getting-started-with-service-binding.adoc b/applications/connecting_applications_to_services/getting-started-with-service-binding.adoc deleted file mode 100644 index 535314cb299d..000000000000 --- a/applications/connecting_applications_to_services/getting-started-with-service-binding.adoc +++ /dev/null @@ -1,57 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="getting-started-with-service-binding"] -= Getting started with service binding -include::_attributes/common-attributes.adoc[] -include::_attributes/servicebinding-document-attributes.adoc[] -:context: getting-started-with-service-binding - -toc::[] - -[role="_abstract"] -The {servicebinding-title} manages the data plane for workloads and backing services. This guide provides instructions with examples to help you create a database instance, deploy an application, and use the {servicebinding-title} to create a binding connection between the application and the database service. - -// Prerequisites for getting started with Service Binding Operator -[discrete] -== Prerequisites - -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -* You have access to an {product-title} cluster using an account with `cluster-admin` permissions. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -* You have access to an {product-title} cluster using an account with `dedicated-admin` permissions. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -* You have installed the `oc` CLI. -* You have installed {servicebinding-title} from OperatorHub. -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -* You have installed the 5.1.2 version of the Crunchy Postgres for Kubernetes Operator from OperatorHub using the *v5* Update channel. The installed Operator is available in an appropriate namespace, such as the `my-petclinic` namespace. -+ -[NOTE] -==== -You can create the namespace using the `oc create namespace my-petclinic` command. -==== -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -* You have installed the 5.1.2 version of the Crunchy Postgres for Kubernetes Operator from OperatorHub using the *v5* Update channel. The installed Operator is available in an appropriate project, such as the `my-petclinic` project. -+ -[NOTE] -==== -You can create the project using the `oc new-project my-petclinic` command. -==== -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] - -//Creating a PostgreSQL database instance -include::modules/sbo-creating-a-postgresql-database-instance.adoc[leveloffset=+1] - -//Deploying the Spring PetClinic sample application -include::modules/sbo-deploying-the-spring-petclinic-sample-application.adoc[leveloffset=+1] - -//Connecting the Spring PetClinic sample application to the PostgreSQL database service -include::modules/sbo-connecting-spring-petclinic-sample-app-to-postgresql-database-service.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_getting-started-sbo"] -== Additional resources -* xref:../../applications/connecting_applications_to_services/installing-sbo.adoc#installing-sbo[Installing Service Binding Operator]. -* xref:../../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[Creating applications using the Developer perspective]. -* xref:../../operators/understanding/crds/crd-managing-resources-from-crds.adoc[Managing resources from custom resource definitions]. -* link:https://github.com/redhat-developer/service-binding-operator#known-bindable-operators[Known bindable Operators]. diff --git a/applications/connecting_applications_to_services/images b/applications/connecting_applications_to_services/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/applications/connecting_applications_to_services/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/applications/connecting_applications_to_services/installing-sbo.adoc b/applications/connecting_applications_to_services/installing-sbo.adoc deleted file mode 100644 index 4d19708106cc..000000000000 --- a/applications/connecting_applications_to_services/installing-sbo.adoc +++ /dev/null @@ -1,30 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="installing-sbo"] -= Installing Service Binding Operator -include::_attributes/common-attributes.adoc[] -include::_attributes/servicebinding-document-attributes.adoc[] -:context: installing-sbo - -toc::[] - -[role="_abstract"] -This guide walks cluster administrators through the process of installing the {servicebinding-title} to an {product-title} cluster. - -You can install {servicebinding-title} on {product-title} 4.7 and later. - -[discrete] -== Prerequisites - -* You have access to an {product-title} cluster using an account with `cluster-admin` permissions. -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -* Your cluster has the xref:../../installing/overview/cluster-capabilities.adoc#operator-marketplace_cluster-capabilities[Marketplace capability] enabled or the Red Hat Operator catalog source configured manually. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] - -//Installing Service Binding Operator using web console - -include::modules/op-installing-sbo-operator-using-the-web-console.adoc[leveloffset=+1] - - -== Additional resources - -* xref:../../applications/connecting_applications_to_services/getting-started-with-service-binding.adoc#getting-started-with-service-binding[Getting started with service binding]. diff --git a/applications/connecting_applications_to_services/modules b/applications/connecting_applications_to_services/modules deleted file mode 120000 index 8b0e8540076d..000000000000 --- a/applications/connecting_applications_to_services/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules \ No newline at end of file diff --git a/applications/connecting_applications_to_services/odc-connecting-an-application-to-a-service-using-the-developer-perspective.adoc b/applications/connecting_applications_to_services/odc-connecting-an-application-to-a-service-using-the-developer-perspective.adoc deleted file mode 100644 index 58c85a0a5fc6..000000000000 --- a/applications/connecting_applications_to_services/odc-connecting-an-application-to-a-service-using-the-developer-perspective.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="odc-connecting-an-application-to-a-service-using-the-developer-perspective"] -= Connecting an application to a service using the Developer perspective -include::_attributes/common-attributes.adoc[] -include::_attributes/servicebinding-document-attributes.adoc[] -:context: odc-connecting-an-application-to-a-service-using-the-developer-perspective - -toc::[] - -[role="_abstract"] - -Use the *Topology* view for the following purposes: - -* Grouping multiple components within an application. - -* Connecting components with each other. - -* Connecting multiple resources to services with labels. - -You can either use a binding or a visual connector to connect components. - - -A binding connection between the components can be established only if the target node is an Operator-backed service. This is indicated by the *Create a binding connector* tool-tip which appears when you drag an arrow to such a target node. When an application is connected to a service by using a binding connector a `ServiceBinding` resource is created. Then, the {servicebinding-title} controller projects the necessary binding data into the application deployment. After the request is successful, the application is redeployed establishing an interaction between the connected components. - -A visual connector establishes only a visual connection between the components, depicting an intent to connect. No interaction between the components is established. If the target node is not an Operator-backed service the *Create a visual connector* tool-tip is displayed when you drag an arrow to a target node. - -include::modules/odc-discovering-and-identifying-operator-backed-bindable-services.adoc[leveloffset=+1] -include::modules/odc-creating-a-visual-connection-between-components.adoc[leveloffset=+1] -include::modules/odc-creating-a-binding-connection-between-components.adoc[leveloffset=+1] -include::modules/odc-verifying-the-status-of-your-service-binding-from-the-topology-view.adoc[leveloffset=+1] -include::modules/odc-visualizing-the-binding-connections-to-resources.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources-odc-connecting-an-application-to-a-service-using-the-developer-perspective"] -== Additional resources -* xref:../../applications/connecting_applications_to_services/getting-started-with-service-binding.adoc#getting-started-with-service-binding[Getting started with service binding] -* link:https://github.com/redhat-developer/service-binding-operator#known-bindable-operators[Known bindable Operators] diff --git a/applications/connecting_applications_to_services/projecting-binding-data.adoc b/applications/connecting_applications_to_services/projecting-binding-data.adoc deleted file mode 100644 index dc23b0b2b071..000000000000 --- a/applications/connecting_applications_to_services/projecting-binding-data.adoc +++ /dev/null @@ -1,26 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="projecting-binding-data"] -= Projecting binding data -include::_attributes/common-attributes.adoc[] -include::_attributes/servicebinding-document-attributes.adoc[] -:context: projecting-binding-data - -toc::[] - -[role="_abstract"] -This section provides information on how you can consume the binding data. - -== Consumption of binding data -After the backing service exposes the binding data, for a workload to access and consume this data, you must project it into the workload from a backing service. {servicebinding-title} automatically projects this set of data into the workload in the following methods: - -. By default, as files. -. As environment variables, after you configure the `.spec.bindAsFiles` parameter from the `ServiceBinding` resource. - -include::modules/sbo-configuration-of-directory-path-to-project-binding-data.adoc[leveloffset=+1] -include::modules/sbo-projecting-the-binding-data.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_projecting-binding-data-sbo"] -== Additional resources -* xref:../../applications/connecting_applications_to_services/exposing-binding-data-from-a-service.adoc#exposing-binding-data-from-a-service[Exposing binding data from a service]. -* link:https://redhat-developer.github.io/service-binding-operator/userguide/using-projected-bindings/using-projected-bindings.html[Using the projected binding data in the source code of the application]. \ No newline at end of file diff --git a/applications/connecting_applications_to_services/sbo-release-notes.adoc b/applications/connecting_applications_to_services/sbo-release-notes.adoc deleted file mode 100644 index 87e63866765a..000000000000 --- a/applications/connecting_applications_to_services/sbo-release-notes.adoc +++ /dev/null @@ -1,73 +0,0 @@ -//OpenShift Service Binding Release Notes -:_mod-docs-content-type: ASSEMBLY -[id="servicebinding-release-notes"] -= Release notes for {servicebinding-title} -:context: servicebinding-release-notes -include::_attributes/common-attributes.adoc[] -include::_attributes/servicebinding-document-attributes.adoc[] - -toc::[] - -The {servicebinding-title} consists of a controller and an accompanying custom resource definition (CRD) for service binding. It manages the data plane for workloads and backing services. The Service Binding Controller reads the data made available by the control plane of backing services. Then, it projects this data to workloads according to the rules specified through the `ServiceBinding` resource. - -With {servicebinding-title}, you can: - -* Bind your workloads together with Operator-managed backing services. -* Automate configuration of binding data. -* Provide service operators a low-touch administrative experience to provision and manage access to services. -* Enrich development lifecycle with a consistent and declarative service binding method that eliminates discrepancies in cluster environments. - -The custom resource definition (CRD) of the {servicebinding-title} supports the following APIs: - -* *Service Binding* with the `binding.operators.coreos.com` API group. -* *Service Binding (Spec API)* with the `servicebinding.io` API group. - -[id="support-matrix"] -== Support matrix - -Some features in the following table are in link:https://access.redhat.com/support/offerings/techpreview[Technology Preview]. These experimental features are not intended for production use. - -In the table, features are marked with the following statuses: - -- *TP*: _Technology Preview_ - -- *GA*: _General Availability_ - -Note the following scope of support on the Red Hat Customer Portal for these features: - -.Support matrix -[options="header"] -|=== -|*{servicebinding-title}* 2+|*API Group and Support Status*|*OpenShift Versions* - -|*Version*|*`binding.operators.coreos.com`* |*`servicebinding.io`* | -|1.3.3 |GA |GA |4.9-4.12 -|1.3.1 |GA |GA |4.9-4.11 -|1.3 |GA |GA |4.9-4.11 -|1.2 |GA |GA |4.7-4.11 -|1.1.1 |GA |TP |4.7-4.10 -|1.1 |GA |TP |4.7-4.10 -|1.0.1 |GA |TP |4.7-4.9 -|1.0 |GA |TP |4.7-4.9 - -|=== - -[id="servicebinding-inclusive-language"] -== Making open source more inclusive - -Red Hat is committed to replacing problematic language in our code, documentation, and web properties. We are beginning with these four terms: master, slave, blacklist, and whitelist. Because of the enormity of this endeavor, these changes will be implemented gradually over several upcoming releases. For more details, see link:https://www.redhat.com/en/blog/making-open-source-more-inclusive-eradicating-problematic-language[Red Hat CTO Chris Wright's message]. - -// Modules included, most to least recent -include::modules/sbo-release-notes-1-3-3.adoc[leveloffset=+1] -include::modules/sbo-release-notes-1-3-1.adoc[leveloffset=+1] -include::modules/sbo-release-notes-1-3.adoc[leveloffset=+1] -include::modules/sbo-release-notes-1-2.adoc[leveloffset=+1] -include::modules/sbo-release-notes-1-1-1.adoc[leveloffset=+1] -include::modules/sbo-release-notes-1-1.adoc[leveloffset=+1] -include::modules/sbo-release-notes-1-0-1.adoc[leveloffset=+1] -include::modules/sbo-release-notes-1-0.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_release-notes-sbo"] -== Additional resources -* xref:../../applications/connecting_applications_to_services/understanding-service-binding-operator.adoc#understanding-service-binding-operator[Understanding Service Binding Operator]. diff --git a/applications/connecting_applications_to_services/snippets b/applications/connecting_applications_to_services/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/applications/connecting_applications_to_services/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/applications/connecting_applications_to_services/understanding-service-binding-operator.adoc b/applications/connecting_applications_to_services/understanding-service-binding-operator.adoc deleted file mode 100644 index 0e05be33061a..000000000000 --- a/applications/connecting_applications_to_services/understanding-service-binding-operator.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="understanding-service-binding-operator"] -= Understanding Service Binding Operator -include::_attributes/common-attributes.adoc[] -include::_attributes/servicebinding-document-attributes.adoc[] -:context: understanding-service-binding-operator - -toc::[] - -[role="_abstract"] -Application developers need access to backing services to build and connect workloads. Connecting workloads to backing services is always a challenge because each service provider suggests a different way to access their secrets and consume them in a workload. In addition, manual configuration and maintenance of this binding together of workloads and backing services make the process tedious, inefficient, and error-prone. - -The {servicebinding-title} enables application developers to easily bind workloads together with Operator-managed backing services, without any manual procedures to configure the binding connection. - -include::modules/sbo-service-binding-terminology.adoc[leveloffset=+1] -include::modules/sbo-about-service-binding-operator.adoc[leveloffset=+1] -include::modules/sbo-key-features.adoc[leveloffset=+1] -include::modules/sbo-api-differences.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_understanding-sbo"] -== Additional resources -* xref:../../applications/connecting_applications_to_services/getting-started-with-service-binding.adoc#getting-started-with-service-binding[Getting started with service binding] \ No newline at end of file diff --git a/applications/creating_applications/_attributes b/applications/creating_applications/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/applications/creating_applications/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/applications/creating_applications/creating-applications-using-cli.adoc b/applications/creating_applications/creating-applications-using-cli.adoc deleted file mode 100644 index b85e6e7390ce..000000000000 --- a/applications/creating_applications/creating-applications-using-cli.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="creating-applications-using-cli"] -= Creating applications by using the CLI -include::_attributes/common-attributes.adoc[] -:context: creating-applications-using-cli - -toc::[] - -You can create an {product-title} application from components that include -source or binary code, images, and templates by using the {product-title} -CLI. - -The set of objects created by `new-app` depends on the artifacts passed as -input: source repositories, images, or templates. - -include::modules/applications-create-using-cli-source-code.adoc[leveloffset=+1] - -include::modules/applications-create-using-cli-image.adoc[leveloffset=+1] - -include::modules/applications-create-using-cli-template.adoc[leveloffset=+1] - -include::modules/applications-create-using-cli-modify.adoc[leveloffset=+1] diff --git a/applications/creating_applications/creating-apps-from-installed-operators.adoc b/applications/creating_applications/creating-apps-from-installed-operators.adoc deleted file mode 100644 index 948b895e253b..000000000000 --- a/applications/creating_applications/creating-apps-from-installed-operators.adoc +++ /dev/null @@ -1,26 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="creating-apps-from-installed-operators"] -= Creating applications from installed Operators -include::_attributes/common-attributes.adoc[] -:context: creating-apps-from-installed-operators - -toc::[] - -_Operators_ are a method of packaging, deploying, and managing a Kubernetes -application. You can create applications on {product-title} using Operators that -have been installed by a cluster administrator. - -This guide walks developers through an example of creating applications from an -installed Operator using the {product-title} web console. - -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -[role="_additional-resources"] -.Additional resources - -* See the -xref:../../operators/understanding/olm-what-operators-are.adoc#olm-what-operators-are[Operators] -guide for more on how Operators work and how the Operator Lifecycle Manager is -integrated in {product-title}. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] - -include::modules/olm-creating-etcd-cluster-from-operator.adoc[leveloffset=+1] diff --git a/applications/creating_applications/images b/applications/creating_applications/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/applications/creating_applications/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/applications/creating_applications/modules b/applications/creating_applications/modules deleted file mode 120000 index 8b0e8540076d..000000000000 --- a/applications/creating_applications/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules \ No newline at end of file diff --git a/applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc b/applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc deleted file mode 100644 index aac14ab28613..000000000000 --- a/applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc +++ /dev/null @@ -1,107 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="odc-creating-applications-using-developer-perspective"] -= Creating applications by using the Developer perspective -include::_attributes/common-attributes.adoc[] -:context: odc-creating-applications-using-developer-perspective - -toc::[] - -The *Developer* perspective in the web console provides you the following options from the *+Add* view to create applications and associated services and deploy them on {product-title}: - -* *Getting started resources*: Use these resources to help you get started with Developer Console. You can choose to hide the header using the Options menu {kebab}. -** *Creating applications using samples*: Use existing code samples to get started with creating applications on the {product-title}. -** *Build with guided documentation*: Follow the guided documentation to build applications and familiarize yourself with key concepts and terminologies. -** *Explore new developer features*: Explore the new features and resources within the *Developer* perspective. - -* *Developer catalog*: Explore the Developer Catalog to select the required applications, services, or source to image builders, and then add it to your project. -** *All Services*: Browse the catalog to discover services across {product-title}. -** *Database*: Select the required database service and add it to your application. -** *Operator Backed*: Select and deploy the required Operator-managed service. -** *Helm chart*: Select the required Helm chart to simplify deployment of applications and services. -** *Devfile*: Select a devfile from the *Devfile registry* to declaratively define a development environment. -** *Event Source*: Select an event source to register interest in a class of events from a particular system. -+ -[NOTE] -==== -The Managed services option is also available if the RHOAS Operator is installed. -==== - -* *Git repository*: Import an existing codebase, Devfile, or Dockerfile from your Git repository using the *From Git*, *From Devfile*, or *From Dockerfile* options respectively, to build and deploy an application on {product-title}. - -* *Container images*: Use existing images from an image stream or registry to deploy it on to the {product-title}. - -* *Pipelines*: Use Tekton pipeline to create CI/CD pipelines for your software delivery process on the {product-title}. - -* *Serverless*: Explore the *Serverless* options to create, build, and deploy stateless and serverless applications on the {product-title}. -** *Channel*: Create a Knative channel to create an event forwarding and persistence layer with in-memory and reliable implementations. - -* *Samples*: Explore the available sample applications to create, build, and deploy an application quickly. - -* *Quick Starts*: Explore the quick start options to create, import, and run applications with step-by-step instructions and tasks. - -* *From Local Machine*: Explore the *From Local Machine* tile to import or upload files on your local machine for building and deploying applications easily. -** *Import YAML*: Upload a YAML file to create and define resources for building and deploying applications. -** *Upload JAR file*: Upload a JAR file to build and deploy Java applications. - -* *Share my Project*: Use this option to add or remove users to a project and provide accessibility options to them. - -* *Helm Chart repositories*: Use this option to add Helm Chart repositories in a namespace. - -* *Re-ordering of resources*: Use these resources to re-order pinned resources added to your navigation pane. The drag-and-drop icon is displayed on the left side of the pinned resource when you hover over it in the navigation pane. The dragged resource can be dropped only in the section where it resides. - -ifdef::openshift-enterprise,openshift-webscale[] -Note that certain options, such as *Pipelines*, *Event Source*, and *Import Virtual Machines*, are displayed only when the link:https://docs.openshift.com/pipelines/latest/install_config/installing-pipelines.html#op-installing-pipelines-operator-in-web-console_installing-pipelines[OpenShift Pipelines Operator], link:https://docs.openshift.com/serverless/1.28/install/install-serverless-operator.html#serverless-install-web-console_install-serverless-operator[{ServerlessOperatorName}], and xref:../../virt/install/installing-virt.adoc#virt-subscribing-cli_installing-virt[OpenShift Virtualization Operator] are installed, respectively. -endif::[] -// dedicated-admin cannot install the Serverless or Virtualization operators, cannot create namespace. -// xref: ../../cicd/pipelines/installing-pipelines.adoc#op-installing-pipelines-operator-in-web-console_installing-pipelines[OpenShift Pipelines Operator] -ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -Note that the *Pipelines* option is displayed only when the OpenShift Pipelines Operator is installed. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] - -[id="prerequisites_odc-creating-applications-using-developer-perspective"] -== Prerequisites - -To create applications using the *Developer* perspective ensure that: - -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -* You have xref:../../web_console/web-console.adoc#web-console[logged in to the web console]. -* You have created a project or have access to a project with the appropriate xref:../../authentication/using-rbac.adoc#default-roles_using-rbac[roles and permissions] to create applications and other workloads in {product-title}. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -* You have logged in to the web console. -// * You have created a project or have access to a project with the appropriate xref: ../../authentication/using-rbac.adoc#default-roles_using-rbac[roles and permissions] to create applications and other workloads in {product-title}. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] - -// dedicated-admin cannot install the Serverless operator. This ifdef should cover this. -ifdef::openshift-enterprise,openshift-webscale[] - -To create serverless applications, in addition to the preceding prerequisites, ensure that: - -* You have link:https://docs.openshift.com/serverless/1.28/install/install-serverless-operator.html#install-serverless-operator[installed the {ServerlessOperatorName}]. -* You have link:https://docs.openshift.com/serverless/1.28/install/installing-knative-serving.html#installing-knative-serving[created a `KnativeServing` resource in the `knative-serving` namespace]. - -endif::[] - -include::modules/odc-creating-sample-applications.adoc[leveloffset=+1] - -include::modules/odc-using-quickstarts.adoc[leveloffset=+1] - -include::modules/odc-importing-codebase-from-git-to-create-application.adoc[leveloffset=+1] - -include::modules/odc-deploying-container-image.adoc[leveloffset=+1] - -include::modules/odc-deploying-java-applications.adoc[leveloffset=+1] - -include::modules/odc-using-the-devfile-registry.adoc[leveloffset=+1] - -include::modules/odc-using-the-developer-catalog-to-add-services-or-components.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_odc-creating-applications-using-developer-perspective"] -== Additional resources - -* For more information about Knative routing settings for {ServerlessProductName}, see link:https://docs.openshift.com/serverless/1.28/knative-serving/external-ingress-routing/routing-overview.html#routing-overview[Routing]. -* For more information about domain mapping settings for {ServerlessProductName}, see link:https://docs.openshift.com/serverless/1.28/knative-serving/config-custom-domains/serverless-custom-domains.html#serverless-custom-domains[Configuring a custom domain for a Knative service]. -* For more information about Knative autoscaling settings for {ServerlessProductName}, see link:https://docs.openshift.com/serverless/1.28/knative-serving/autoscaling/serverless-autoscaling-developer.html#serverless-autoscaling-developer[Autoscaling]. -* For more information about adding a new user to a project, see xref:../../applications/projects/working-with-projects.adoc#odc-providing-project-permissions-using-developer-perspective_projects[Working with projects]. -* For more information about creating a Helm Chart repository, see xref:../../applications/working_with_helm_charts/configuring-custom-helm-chart-repositories.adoc#odc-creating-helm-releases-using-developer-perspective_configuring-custom-helm-chart-repositories[Creating Helm Chart repositories]. diff --git a/applications/creating_applications/snippets b/applications/creating_applications/snippets deleted file mode 120000 index 7bf6da9a51d0..000000000000 --- a/applications/creating_applications/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets \ No newline at end of file diff --git a/applications/creating_applications/templates-using-ruby-on-rails.adoc b/applications/creating_applications/templates-using-ruby-on-rails.adoc deleted file mode 100644 index 15260d01b3fd..000000000000 --- a/applications/creating_applications/templates-using-ruby-on-rails.adoc +++ /dev/null @@ -1,45 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="templates-using-ruby-on-rails"] -= Creating applications using Ruby on Rails -include::_attributes/common-attributes.adoc[] -:context: templates-ruby-on-rails - -toc::[] - -Ruby on Rails is a web framework written in Ruby. This guide covers using Rails 4 on {product-title}. - -[WARNING] -==== -Go through the whole tutorial to have an overview of all the steps necessary to run your application on the {product-title}. If you experience a problem try reading through the entire tutorial and then going back to your issue. It can also be useful to review your previous steps to ensure that all the steps were run correctly. -==== - -== Prerequisites - -* Basic Ruby and Rails knowledge. -* Locally installed version of Ruby 2.0.0+, Rubygems, Bundler. -* Basic Git knowledge. -ifndef::openshift-online[] -* Running instance of {product-title} 4. -endif::[] -ifdef::openshift-online[] -* Provisioned account in OpenShift Online. -endif::[] -* Make sure that an instance of {product-title} is running and is available. Also make sure that your `oc` CLI client is installed and the command is accessible from your command shell, so you can use it to log in using your email address and password. - -include::modules/templates-rails-setting-up-database.adoc[leveloffset=+1] - -include::modules/templates-rails-writing-application.adoc[leveloffset=+1] - -include::modules/templates-rails-creating-welcome-page.adoc[leveloffset=+2] - -include::modules/templates-rails-configuring-application.adoc[leveloffset=+2] - -include::modules/templates-rails-storing-application-in-git.adoc[leveloffset=+2] - -include::modules/templates-rails-deploying-application.adoc[leveloffset=+1] - -include::modules/templates-rails-creating-database-service.adoc[leveloffset=+2] - -include::modules/templates-rails-creating-frontend-service.adoc[leveloffset=+2] - -include::modules/templates-rails-creating-route-for-application.adoc[leveloffset=+2] diff --git a/applications/creating_applications/using-templates.adoc b/applications/creating_applications/using-templates.adoc deleted file mode 100644 index 01fb7298d52b..000000000000 --- a/applications/creating_applications/using-templates.adoc +++ /dev/null @@ -1,53 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="using-templates"] -= Using templates -include::_attributes/common-attributes.adoc[] -:context: using-templates - -toc::[] - -The following sections provide an overview of templates, as well as how to use and create them. - -include::modules/templates-overview.adoc[leveloffset=+1] - -include::modules/templates-uploading.adoc[leveloffset=+1] - -include::modules/templates-creating-from-console.adoc[leveloffset=+1] - -include::modules/templates-using-the-cli.adoc[leveloffset=+1] - -include::modules/templates-cli-labels.adoc[leveloffset=+2] - -include::modules/templates-cli-parameters.adoc[leveloffset=+2] - -include::modules/templates-cli-generating-list-of-objects.adoc[leveloffset=+2] - -include::modules/templates-modifying-uploaded-template.adoc[leveloffset=+1] - -// cannot patch resource "templates" -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -include::modules/templates-using-instant-app-quickstart.adoc[leveloffset=+1] - -include::modules/templates-quickstart.adoc[leveloffset=+2] -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] - -include::modules/templates-writing.adoc[leveloffset=+1] - -include::modules/templates-writing-description.adoc[leveloffset=+2] - -include::modules/templates-writing-labels.adoc[leveloffset=+2] - -include::modules/templates-writing-parameters.adoc[leveloffset=+2] - -include::modules/templates-writing-object-list.adoc[leveloffset=+2] - -include::modules/templates-marking-as-bindable.adoc[leveloffset=+2] - -include::modules/templates-exposing-object-fields.adoc[leveloffset=+2] - -include::modules/templates-waiting-for-readiness.adoc[leveloffset=+2] - -include::modules/templates-create-from-existing-object.adoc[leveloffset=+2] - - -//Add quick start and other relevant tutorials here. diff --git a/applications/deployments/_attributes b/applications/deployments/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/applications/deployments/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/applications/deployments/deployment-strategies.adoc b/applications/deployments/deployment-strategies.adoc deleted file mode 100644 index 0af104aa2bb0..000000000000 --- a/applications/deployments/deployment-strategies.adoc +++ /dev/null @@ -1,63 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="deployment-strategies"] -= Using deployment strategies -include::_attributes/common-attributes.adoc[] -:context: deployment-strategies - -toc::[] - -_Deployment strategies_ are used to change or upgrade applications without downtime so that users barely notice a change. - -Because users generally access applications through a route handled by a router, deployment strategies can focus on `DeploymentConfig` object features or routing features. Strategies that focus on `DeploymentConfig` object features impact all routes that use the application. Strategies that use router features target individual routes. - -Most deployment strategies are supported through the `DeploymentConfig` object, and some additional strategies are supported through router features. - -[id="choosing-deployment-strategies"] -== Choosing a deployment strategy - -Consider the following when choosing a deployment strategy: - -- Long-running connections must be handled gracefully. -- Database conversions can be complex and must be done and rolled back along with the application. -- If the application is a hybrid of microservices and traditional components, downtime might be required to complete the transition. -- You must have the infrastructure to do this. -- If you have a non-isolated test environment, you can break both new and old versions. - -A deployment strategy uses readiness checks to determine if a new pod is ready for use. If a readiness check fails, the `DeploymentConfig` object retries to run the pod until it times out. The default timeout is `10m`, a value set in `TimeoutSeconds` in `dc.spec.strategy.*params`. - -// Rolling strategies -include::modules/deployments-rolling-strategy.adoc[leveloffset=+1] -include::modules/deployments-canary-deployments.adoc[leveloffset=+2] -// Creating rolling deployments -include::modules/creating-rolling-deployments-CLI.adoc[leveloffset=+2] -// Editing a deployment -:context: rolling-strategy -include::modules/odc-editing-deployments.adoc[leveloffset=+2] -// Starting a deployment -include::modules/odc-starting-rolling-deployment.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[Creating and deploying applications on {product-title} using the *Developer* perspective] -* xref:../../applications/odc-viewing-application-composition-using-topology-view.adoc#odc-viewing-application-composition-using-topology-view[Viewing the applications in your project, verifying their deployment status, and interacting with them in the *Topology* view] - -// Recreate strategies -include::modules/deployments-recreate-strategy.adoc[leveloffset=+1] -// Editing a deployment -:context: recreate-strategy -include::modules/odc-editing-deployments.adoc[leveloffset=+2] -// Starting a deployment -include::modules/odc-starting-recreate-deployment.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[Creating and deploying applications on {product-title} using the *Developer* perspective] -* xref:../../applications/odc-viewing-application-composition-using-topology-view.adoc#odc-viewing-application-composition-using-topology-view[Viewing the applications in your project, verifying their deployment status, and interacting with them in the *Topology* view] - -// Custom strategies -include::modules/deployments-custom-strategy.adoc[leveloffset=+1] -// Editing a deployment -:context: custom-strategy -include::modules/odc-editing-deployments.adoc[leveloffset=+2] - -include::modules/deployments-lifecycle-hooks.adoc[leveloffset=+1] diff --git a/applications/deployments/images b/applications/deployments/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/applications/deployments/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/applications/deployments/managing-deployment-processes.adoc b/applications/deployments/managing-deployment-processes.adoc deleted file mode 100644 index 030ba3f10325..000000000000 --- a/applications/deployments/managing-deployment-processes.adoc +++ /dev/null @@ -1,49 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="deployment-operations"] -= Managing deployment processes -include::_attributes/common-attributes.adoc[] -:context: deployment-operations - -toc::[] - -[id="deploymentconfig-operations"] -== Managing DeploymentConfig objects - -include::snippets/deployment-config-deprecated.adoc[] - -`DeploymentConfig` objects can be managed from the {product-title} web console's *Workloads* page or using the `oc` CLI. The following procedures show CLI usage unless otherwise stated. - -include::modules/deployments-starting-deployment.adoc[leveloffset=+2] -include::modules/deployments-viewing-deployment.adoc[leveloffset=+2] -include::modules/deployments-retrying-deployment.adoc[leveloffset=+2] -include::modules/deployments-rolling-back.adoc[leveloffset=+2] -include::modules/deployments-exec-cmd-in-container.adoc[leveloffset=+2] -include::modules/deployments-viewing-logs.adoc[leveloffset=+2] -include::modules/deployments-triggers.adoc[leveloffset=+2] -include::modules/deployments-setting-triggers.adoc[leveloffset=+3] -include::modules/deployments-setting-resources.adoc[leveloffset=+2] - -// When the Nodes book is added to ROSA/OSD, check if this link is valid. -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -[role="_additional-resources"] -.Additional resources - -* For more information about resource limits and requests, see xref:../../nodes/clusters/nodes-cluster-resource-configure.adoc#nodes-cluster-resource-configure-about_nodes-cluster-resource-configure[Understanding managing application memory]. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] - -include::modules/deployments-scaling-manually.adoc[leveloffset=+2] -include::modules/deployments-accessing-private-repos.adoc[leveloffset=+2] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -include::modules/deployments-assigning-pods-to-nodes.adoc[leveloffset=+2] -endif::[] - -ifndef::openshift-online[] -include::modules/deployments-running-pod-svc-acct.adoc[leveloffset=+2] -endif::[] - -//// -== Managing Deployments - -Need docs on managing Deployment objects. -//// diff --git a/applications/deployments/modules b/applications/deployments/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/applications/deployments/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/applications/deployments/osd-config-custom-domains-applications.adoc b/applications/deployments/osd-config-custom-domains-applications.adoc deleted file mode 100644 index d24067185aa6..000000000000 --- a/applications/deployments/osd-config-custom-domains-applications.adoc +++ /dev/null @@ -1,17 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osd-config-custom-domains-applications"] -= Custom domains for applications -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: osd-config-custom-domains-applications - -toc::[] - -[WARNING] -==== -Starting with {product-title} 4.14, the Custom Domain Operator is deprecated. To manage Ingress in {product-title} 4.14, use the Ingress Operator. The functionality is unchanged for {product-title} 4.13 and earlier versions. -==== - -You can configure a custom domain for your applications. Custom domains are specific wildcard domains that can be used with {product-title} applications. - -include::modules/osd-applications-config-custom-domains.adoc[leveloffset=+1] -include::modules/osd-applications-renew-custom-domains.adoc[leveloffset=+1] diff --git a/applications/deployments/rosa-config-custom-domains-applications.adoc b/applications/deployments/rosa-config-custom-domains-applications.adoc deleted file mode 100644 index 8b89532e4da6..000000000000 --- a/applications/deployments/rosa-config-custom-domains-applications.adoc +++ /dev/null @@ -1,17 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="rosa-config-custom-domains-applications"] -= Custom domains for applications -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: rosa-config-custom-domains-applications - -toc::[] - -[WARNING] -==== -Starting with {product-title} 4.14, the Custom Domain Operator is deprecated. To manage Ingress in {product-title} 4.14, use the Ingress Operator. The functionality is unchanged for {product-title} 4.13 and earlier versions. -==== - -You can configure a custom domain for your applications. Custom domains are specific wildcard domains that can be used with {product-title} applications. - -include::modules/rosa-applications-config-custom-domains.adoc[leveloffset=+1] -include::modules/rosa-applications-renew-custom-domains.adoc[leveloffset=+1] \ No newline at end of file diff --git a/applications/deployments/route-based-deployment-strategies.adoc b/applications/deployments/route-based-deployment-strategies.adoc deleted file mode 100644 index 95ae73f43f81..000000000000 --- a/applications/deployments/route-based-deployment-strategies.adoc +++ /dev/null @@ -1,44 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="route-based-deployment-strategies"] -= Using route-based deployment strategies -include::_attributes/common-attributes.adoc[] -:context: route-based-deployment-strategies - -toc::[] - -Deployment strategies provide a way for the application to evolve. Some strategies use `Deployment` objects to make changes that are seen by users of all routes that resolve to the application. Other advanced strategies, such as the ones described in this section, use router features in conjunction with `Deployment` objects to impact specific routes. - -//// -This link keeps breaking Travis for some reason. - -[NOTE] -==== -See -xref:../../applications/deployments/deployment-strategies.adoc#deployment-strategies[Using deployment strategies] -for more on the basic strategy types. -==== -//// - -The most common route-based strategy is to use a _blue-green deployment_. The new version (the green version) is brought up for testing and evaluation, while the users still use the stable version (the blue version). When ready, the users are switched to the green version. If a problem arises, you can switch back to the blue version. - -Alternatively, you can use an _A/B versions_ strategy in which both versions are active at the same time. With this strategy, some users can use _version A_, and other users can use _version B_. You can use this strategy to experiment with user interface changes or other features in order to get user feedback. You can also use it to verify proper operation in a production context where problems impact a limited number of users. - -A canary deployment tests the new version but when a problem is detected it quickly falls back to the previous version. This can be done with both of the above strategies. - -The route-based deployment strategies do not scale the number of pods in the services. To maintain desired performance characteristics the deployment configurations might have to be scaled. - -include::modules/deployments-proxy-shards.adoc[leveloffset=+1] -include::modules/deployments-n1-compatibility.adoc[leveloffset=+1] -include::modules/deployments-graceful-termination.adoc[leveloffset=+1] -include::modules/deployments-blue-green.adoc[leveloffset=+1] -include::modules/deployments-ab-testing.adoc[leveloffset=+1] -include::modules/deployments-ab-testing-lb.adoc[leveloffset=+2] - -ifndef::openshift-rosa-hcp[] -// Remove conditionals when Networking content is in ROSA HCP -[role="_additional-resources"] -[id="additional-resources_{context}"] -== Additional resources - -* xref:../../networking/routes/route-configuration.adoc#nw-route-specific-annotations_route-configuration[Route-specific annotations]. -endif::[] diff --git a/applications/deployments/snippets b/applications/deployments/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/applications/deployments/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/applications/deployments/what-deployments-are.adoc b/applications/deployments/what-deployments-are.adoc deleted file mode 100644 index a8a935138714..000000000000 --- a/applications/deployments/what-deployments-are.adoc +++ /dev/null @@ -1,62 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="what-deployments-are"] -= Understanding deployments -include::_attributes/common-attributes.adoc[] -:context: what-deployments-are - -toc::[] - -The `Deployment` and `DeploymentConfig` API objects in {product-title} provide two similar but different methods for fine-grained management over common user applications. They are composed of the following separate API objects: - -* A `Deployment` or `DeploymentConfig` object, either of which describes the desired state of a particular component of the application as a pod template. -* `Deployment` objects involve one or more _replica sets_, which contain a point-in-time record of the state of a deployment as a pod template. Similarly, `DeploymentConfig` objects involve one or more _replication controllers_, which preceded replica sets. -* One or more pods, which represent an instance of a particular version of an application. - -Use `Deployment` objects unless you need a specific feature or behavior provided by `DeploymentConfig` objects. - -include::snippets/deployment-config-deprecated.adoc[] - -//// -Update when converted: -[role="_additional-resources"] -.Additional resources - -xref:../../applications/deployments/advanced_deployment_strategies.adoc#graceful-termination[graceful shutdown] -xref:../../applications/basic_deployment_operations.adoc#triggers[Triggers] -xref:../../applications/deployment_strategies.adoc#strategies[strategies] -xref:../../applications/deployment_strategies.adoc#lifecycle-hooks[hooks] -xref:../../applications/basic_deployment_operations.adoc#rolling-back-a-deployment[rollbacks] -xref:../../applications/basic_deployment_operations.adoc#scaling[scaling] -xref:../../dev_guide/pod_autoscaling.adoc#dev-guide-pod-autoscaling[autoscaling] -//// - -[id="what-deployments-are-build-blocks"] -== Building blocks of a deployment - -Deployments and deployment configs are enabled by the use of native Kubernetes API objects `ReplicaSet` and `ReplicationController`, respectively, as their building blocks. - -Users do not have to manipulate replica sets, replication controllers, or pods owned by `Deployment` or `DeploymentConfig` objects. The deployment systems ensure changes are propagated appropriately. - -[TIP] -==== -If the existing deployment strategies are not suited for your use case and you must run manual steps during the lifecycle of your deployment, then you should consider creating a custom deployment strategy. -==== - -The following sections provide further details on these objects. - -include::modules/deployments-replicasets.adoc[leveloffset=+2] -include::modules/deployments-replicationcontrollers.adoc[leveloffset=+2] - -include::modules/deployments-kube-deployments.adoc[leveloffset=+1] -include::modules/deployments-deploymentconfigs.adoc[leveloffset=+1] - -include::modules/deployments-comparing-deploymentconfigs.adoc[leveloffset=+1] -//// -Update when converted: -[role="_additional-resources"] -.Additional resources - -- xref:../../dev_guide/managing_images.adoc#dev-guide-managing-images[Imagestreams] -- xref:../../dev_guide/deployments/deployment_strategies.adoc#lifecycle-hooks[Lifecycle hooks] -- xref:../../dev_guide/deployments/deployment_strategies.adoc#custom-strategy[Custom deployment strategies] -//// diff --git a/applications/idling-applications.adoc b/applications/idling-applications.adoc deleted file mode 100644 index 8f163c4463f5..000000000000 --- a/applications/idling-applications.adoc +++ /dev/null @@ -1,16 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="idling-applications"] -= Idling applications -include::_attributes/common-attributes.adoc[] -:context: idling-applications - -toc::[] - -Cluster administrators can idle applications to reduce resource consumption. This is useful when the cluster is deployed on a public cloud where cost is related to resource consumption. - -If any scalable resources are not in use, {product-title} discovers and idles them by scaling their replicas to `0`. The next time network traffic is directed to the resources, the resources are unidled by scaling up the replicas, and normal operation continues. - -Applications are made of services, as well as other scalable resources, such as deployment configs. The action of idling an application involves idling all associated resources. - -include::modules/idle-idling-applications.adoc[leveloffset=+1] -include::modules/idle-unidling-applications.adoc[leveloffset=+1] diff --git a/applications/images b/applications/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/applications/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/applications/index.adoc b/applications/index.adoc deleted file mode 100644 index 0b5e2da919ec..000000000000 --- a/applications/index.adoc +++ /dev/null @@ -1,49 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="building-applications-overview"] -= Building applications overview -include::_attributes/common-attributes.adoc[] -:context: building-applications-overview - -toc::[] - -Using {product-title}, you can create, edit, delete, and manage applications using the web console or command-line interface (CLI). - -[id="working-on-a-project"] -== Working on a project - -Using projects, you can organize and manage applications in isolation. You can manage the entire project lifecycle, including xref:../applications/projects/working-with-projects.adoc#working-with-projects[creating, viewing, and deleting a project] in {product-title}. - -After you create the project, you can xref:../applications/projects/working-with-projects.adoc#odc-providing-project-permissions-using-developer-perspective_projects[grant or revoke access to a project] and xref:../applications/projects/working-with-projects.adoc#odc-customizing-available-cluster-roles-using-the-web-console_projects[manage cluster roles] for the users using the Developer perspective. You can also xref:../applications/projects/configuring-project-creation.adoc#configuring-project-creation[edit the project configuration resource] while creating a project template that is used for automatic provisioning of new projects. - -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -Using the CLI, you can xref:../applications/projects/creating-project-other-user.adoc#creating-project-other-user[create a project as a different user] by impersonating a request to the {product-title} API. When you make a request to create a new project, the {product-title} uses an endpoint to provision the project according to a customizable template. As a cluster administrator, you can choose to xref:../applications/projects/configuring-project-creation.adoc#disabling-project-self-provisioning_configuring-project-creation[prevent an authenticated user group from self-provisioning new projects]. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -As a user with dedicated administrator permissions, you can choose to xref:../applications/projects/configuring-project-creation.adoc#disabling-project-self-provisioning_configuring-project-creation[prevent an authenticated user group from self-provisioning new projects]. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] - - -[id="working-on-application"] -== Working on an application - -[id="creating-application"] -=== Creating an application -To create applications, you must have created a project or have access to a project with the appropriate roles and permissions. You can create an application by using either xref:../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[the Developer perspective in the web console], xref:../applications/creating_applications/creating-apps-from-installed-operators.adoc#creating-apps-from-installed-operators[installed Operators], or xref:../applications/creating_applications/creating-applications-using-cli.adoc#creating-applications-using-cli[the {oc-first}]. You can source the applications to be added to the project from Git, JAR files, devfiles, or the developer catalog. - -You can also use components that include source or binary code, images, and templates to create an application by using the {oc-first}. With the {product-title} web console, you can create an application from an Operator installed by a cluster administrator. - -[id="maintaining-application"] -=== Maintaining an application -After you create the application, you can use the web console to xref:../applications/odc-monitoring-project-and-application-metrics-using-developer-perspective.adoc#odc-monitoring-project-and-application-metrics-using-developer-perspective[monitor your project or application metrics]. You can also xref:../applications/odc-editing-applications.adoc#odc-editing-applications[edit] or xref:../applications/odc-deleting-applications.adoc#odc-deleting-applications[delete] the application using the web console. - -When the application is running, not all applications resources are used. As a cluster administrator, you can choose to xref:../applications/idling-applications.adoc#idling-applications[idle these scalable resources] to reduce resource consumption. - -[id="deploying-application"] -=== Deploying an application -You can deploy your application using xref:../applications/deployments/what-deployments-are.adoc#what-deployments-are[`Deployment` or `DeploymentConfig`] objects and xref:../applications/deployments/managing-deployment-processes.adoc#deployment-operations[manage] them from the web console. You can create xref:../applications/deployments/deployment-strategies.adoc#deployment-strategies[deployment strategies] that help reduce downtime during a change or an upgrade to the application. - -You can also use xref:../applications/working_with_helm_charts/understanding-helm.adoc#understanding-helm[Helm], a software package manager that simplifies deployment of applications and services to {product-title} clusters. - -[id="redhat-marketplace"] -== Using the Red Hat Marketplace -The xref:../applications/red-hat-marketplace.adoc#red-hat-marketplace[Red Hat Marketplace] is an open cloud marketplace where you can discover and access certified software for container-based environments that run on public clouds and on-premise. diff --git a/applications/modules b/applications/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/applications/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/applications/odc-deleting-applications.adoc b/applications/odc-deleting-applications.adoc deleted file mode 100644 index 125ba1aa0fe9..000000000000 --- a/applications/odc-deleting-applications.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="odc-deleting-applications"] -= Deleting applications -include::_attributes/common-attributes.adoc[] -:context: odc-deleting-applications - -toc::[] - -You can delete applications created in your project. - -include::modules/odc-deleting-applications-using-developer-perspective.adoc[leveloffset=+1] diff --git a/applications/odc-editing-applications.adoc b/applications/odc-editing-applications.adoc deleted file mode 100644 index 9e310a369b23..000000000000 --- a/applications/odc-editing-applications.adoc +++ /dev/null @@ -1,26 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="odc-editing-applications"] -= Editing applications -include::_attributes/common-attributes.adoc[] -:context: odc-editing-applications - -toc::[] - -You can edit the configuration and the source code of the application you create using the *Topology* view. - -== Prerequisites -// When the Authentication book is added to ROSA/OSD, check if this link is valid. -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -* You have the appropriate xref:../authentication/using-rbac.adoc#default-roles_using-rbac[roles and permissions] in a project to create and modify applications in {product-title}. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -* You have xref:../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[created and deployed an application on {product-title} using the *Developer* perspective]. -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -* You have xref:../web_console/web-console.adoc#web-console[logged in to the web console] and have switched to xref:../web_console/web-console-overview.adoc#about-developer-perspective_web-console-overview[the *Developer* perspective]. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -* You have logged in to the web console and have switched to the *Developer* perspective. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] - -include::modules/odc-editing-source-code-using-developer-perspective.adoc[leveloffset=+1] - -include::modules/odc-editing-application-configuration-using-developer-perspective.adoc[leveloffset=+1] diff --git a/applications/odc-exporting-applications.adoc b/applications/odc-exporting-applications.adoc deleted file mode 100644 index f89cdf7e56ce..000000000000 --- a/applications/odc-exporting-applications.adoc +++ /dev/null @@ -1,40 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="odc-exporting-applications"] -= Exporting applications -include::_attributes/common-attributes.adoc[] -:context: odc-exporting-applications - -toc::[] - -As a developer, you can export your application in the ZIP file format. Based on your needs, import the exported application to another project in the same cluster or a different cluster by using the *Import YAML* option in the *+Add* view. Exporting your application helps you to reuse your application resources and saves your time. - -[id="prerequisites_odc-exporting-applications"] -== Prerequisites - -* You have installed the gitops-primer Operator from the OperatorHub. -+ -[NOTE] -==== -The *Export application* option is disabled in the *Topology* view even after installing the gitops-primer Operator. -==== - -* You have created an application in the *Topology* view to enable *Export application*. - -[id="odc-exporting-applications-procedure"] -== Procedure - -. In the developer perspective, perform one of the following steps: -.. Navigate to the *+Add* view and click *Export application* in the *Application portability* tile. -.. Navigate to the *Topology* view and click *Export application*. - -. Click *OK* in the *Export Application* dialog box. A notification opens to confirm that the export of resources from your project has started. - -. Optional steps that you might need to perform in the following scenarios: -+ -* If you have started exporting an incorrect application, click *Export application* -> *Cancel Export*. -* If your export is already in progress and you want to start a fresh export, click *Export application* -> *Restart Export*. -* If you want to view logs associated with exporting an application, click *Export application* and the *View Logs* link. -+ -image::export-application-dialog-box.png[] - -. After a successful export, click *Download* in the dialog box to download application resources in ZIP format onto your machine. diff --git a/applications/odc-monitoring-project-and-application-metrics-using-developer-perspective.adoc b/applications/odc-monitoring-project-and-application-metrics-using-developer-perspective.adoc deleted file mode 100644 index 7f6090467339..000000000000 --- a/applications/odc-monitoring-project-and-application-metrics-using-developer-perspective.adoc +++ /dev/null @@ -1,39 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="odc-monitoring-project-and-application-metrics-using-developer-perspective"] -= Monitoring project and application metrics using the Developer perspective -include::_attributes/common-attributes.adoc[] -:context: monitoring-project-and-application-metrics-using-developer-perspective - -toc::[] - - -The *Observe* view in the *Developer* perspective provides options to monitor your project or application metrics, such as CPU, memory, and bandwidth usage, and network related information. - -[id="prerequisites_odc-monitoring-project-and-application-metrics-using-developer-perspective"] -== Prerequisites - -* You have xref:../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[created and deployed applications on {product-title}]. -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -* You have xref:../web_console/web-console.adoc#web-console[logged in to the web console] and have switched to xref:../web_console/web-console-overview.adoc#about-developer-perspective_web-console-overview[the *Developer* perspective]. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -* You have logged in to the web console and have switched to the *Developer* perspective. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] - -include::modules/odc-monitoring-your-project-metrics.adoc[leveloffset=+1] - -include::modules/odc-monitoring-your-application-metrics.adoc[leveloffset=+1] - -include::modules/odc-image-vulnerabilities-breakdown.adoc[leveloffset=+1] - -include::modules/odc-monitoring-your-app-vulnerabilities.adoc[leveloffset=+1] - -ifdef::openshift-rosa,openshift-dedicated[] -[role="_additional-resources"] -[id="additional-resources-odc-monitoring-project-and-application-metrics-using-developer-perspective"] -== Additional resources -* xref:../observability/monitoring/monitoring-overview.adoc#monitoring-overview[Monitoring overview] -endif::openshift-rosa,openshift-dedicated[] -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -* xref:../observability/monitoring/about-ocp-monitoring/about-ocp-monitoring.adoc#about-ocp-monitoring[About {product-title} monitoring] -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] \ No newline at end of file diff --git a/applications/odc-viewing-application-composition-using-topology-view.adoc b/applications/odc-viewing-application-composition-using-topology-view.adoc deleted file mode 100644 index 67f00d2f1bb1..000000000000 --- a/applications/odc-viewing-application-composition-using-topology-view.adoc +++ /dev/null @@ -1,46 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="odc-viewing-application-composition-using-topology-view"] -= Viewing application composition by using the Topology view -include::_attributes/common-attributes.adoc[] -:context: viewing-application-composition-using-topology-view - -toc::[] - -The *Topology* view in the *Developer* perspective of the web console provides a visual representation of all the applications within a project, their build status, and the components and services associated with them. - -== Prerequisites -To view your applications in the *Topology* view and interact with them, ensure that: - -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -* You have xref:../web_console/web-console.adoc#web-console[logged in to the web console]. -* You have the appropriate xref:../authentication/using-rbac.adoc#default-roles_using-rbac[roles and permissions] in a project to create applications and other workloads in {product-title}. -* You are in xref:../web_console/web-console-overview.adoc#about-developer-perspective_web-console-overview[the *Developer* perspective]. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -* You have logged in to the web console. -* You are in the *Developer* perspective. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] - -include::modules/odc-viewing-application-topology.adoc[leveloffset=+1] - -include::modules/odc-interacting-with-applications-and-components.adoc[leveloffset=+1] - -include::modules/odc-scaling-application-pods-and-checking-builds-and-routes.adoc[leveloffset=+1] - -include::modules/odc-adding-components-to-an-existing-project.adoc[leveloffset=+1] - -include::modules/odc-grouping-multiple-components.adoc[leveloffset=+1] - -include::modules/odc-adding-services-to-application.adoc[leveloffset=+1] - -include::modules/odc-removing-services-from-application.adoc[leveloffset=+1] - -include::modules/odc-labels-and-annotations-used-for-topology-view.adoc[leveloffset=+1] - -[role="_additional-resources"] -== Additional resources - -* See xref:../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-importing-codebase-from-git-to-create-application_odc-creating-applications-using-developer-perspective[Importing a codebase from Git to create an application] for more information on creating an application from Git. -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -* See xref:../applications/odc-exporting-applications.adoc#odc-exporting-applications[Exporting applications]. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] diff --git a/applications/projects/_attributes b/applications/projects/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/applications/projects/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/applications/projects/configuring-project-creation.adoc b/applications/projects/configuring-project-creation.adoc deleted file mode 100644 index 9b4242361304..000000000000 --- a/applications/projects/configuring-project-creation.adoc +++ /dev/null @@ -1,21 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="configuring-project-creation"] -= Configuring project creation -include::_attributes/common-attributes.adoc[] -:context: configuring-project-creation - -toc::[] - -In {product-title}, _projects_ are used to group and isolate related objects. -When a request is made to create a new project using the web console or `oc -new-project` command, an endpoint in {product-title} is used to provision the -project according to a template, which can be customized. - -As -a cluster administrator, you can allow and configure how developers and service -accounts can create, or _self-provision_, their own projects. - -include::modules/about-project-creation.adoc[leveloffset=+1] -include::modules/modifying-template-for-new-projects.adoc[leveloffset=+1] -include::modules/disabling-project-self-provisioning.adoc[leveloffset=+1] -include::modules/customizing-project-request-message.adoc[leveloffset=+1] diff --git a/applications/projects/creating-project-other-user.adoc b/applications/projects/creating-project-other-user.adoc deleted file mode 100644 index 304ceebb8d1e..000000000000 --- a/applications/projects/creating-project-other-user.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="creating-project-other-user"] -= Creating a project as another user -include::_attributes/common-attributes.adoc[] -:context: creating-project-other-user - -toc::[] - -Impersonation allows you to create a project as a different user. - -include::modules/authentication-api-impersonation.adoc[leveloffset=+1] - -include::modules/impersonation-project-creation.adoc[leveloffset=+1] diff --git a/applications/projects/images b/applications/projects/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/applications/projects/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/applications/projects/modules b/applications/projects/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/applications/projects/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/applications/projects/snippets b/applications/projects/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/applications/projects/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/applications/projects/working-with-projects.adoc b/applications/projects/working-with-projects.adoc deleted file mode 100644 index 6dea112749db..000000000000 --- a/applications/projects/working-with-projects.adoc +++ /dev/null @@ -1,89 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="working-with-projects"] -= Working with projects -include::_attributes/common-attributes.adoc[] -:context: projects - -toc::[] - -A _project_ allows a community of users to organize and manage their content in -isolation from other communities. - -[NOTE] -==== -Projects starting with `openshift-` and `kube-` are -ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -xref:../../authentication/using-rbac.adoc#rbac-default-projects_using-rbac[default projects]. -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -ifdef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -default projects. -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -These projects host cluster components that run as pods and other infrastructure components. As such, {product-title} does not allow you to create projects starting with `openshift-` or `kube-` using the `oc new-project` command. -ifndef::openshift-dedicated[] -Cluster administrators can create these projects using the `oc adm new-project` command. -endif::openshift-dedicated[] -ifdef::openshift-dedicated[] -For {product-title} clusters that use the Customer Cloud Subscription (CCS) model, users with `cluster-admin` privileges can create these projects using the `oc adm new-project` command. -endif::openshift-dedicated[] -==== - -ifndef::openshift-dedicated[] -include::snippets/default-projects.adoc[] -endif::openshift-dedicated[] -ifdef::openshift-dedicated[] -[NOTE] -==== -In {product-title} clusters that use the Customer Cloud Subscription (CCS) model, you cannot assign an SCC to pods created in one of the default namespaces: `default`, `kube-system`, `kube-public`, `openshift-node`, `openshift-infra`, and `openshift`. You cannot use these namespaces for running pods or services. You cannot create any SCCs for {product-title} clusters that use a Red Hat cloud account, because SCC resource creation requires `cluster-admin` privileges. -==== -endif::openshift-dedicated[] - -[id="working-with-projects-create-project"] -== Creating a project - -You can use the {product-title} web console or the {oc-first} to create a project in your cluster. - -include::modules/creating-a-project-using-the-web-console.adoc[leveloffset=+2] - -// include modules/odc-creating-projects-using-developer-perspective.adoc[leveloffset=+2] - -.Additional resources - -* xref:../../applications/projects/working-with-projects.adoc#odc-customizing-available-cluster-roles-using-the-web-console_projects[Customizing the available cluster roles using the web console] - -include::modules/creating-a-project-using-the-CLI.adoc[leveloffset=+2] - -[id="working-with-projects-viewing-project"] -== Viewing a project - -You can use the {product-title} web console or the {oc-first} to view a project in your cluster. - -include::modules/viewing-a-project-using-the-web-console.adoc[leveloffset=+2] - -include::modules/viewing-a-project-using-the-CLI.adoc[leveloffset=+2] - -include::modules/odc-providing-project-permissions-using-developer-perspective.adoc[leveloffset=+1] - -include::modules/odc-customizing-available-cluster-roles-using-the-web-console.adoc[leveloffset=+1] - -include::modules/adding-to-a-project.adoc[leveloffset=+1] - -[id="working-with-projects-viewing-project-status"] -== Checking the project status - -You can use the {product-title} web console or the {oc-first} to view the status of your project. - -include::modules/checking-project-status-using-the-web-console.adoc[leveloffset=+2] - -include::modules/checking-project-status-using-the-CLI.adoc[leveloffset=+2] - -// The following text comes from deleting-a-project-using-the-CLI.adoc -[id="working-with-projects-deleting-project"] -== Deleting a project - -You can use the {product-title} web console or the {oc-first} to delete a project. - -When you delete a project, the server updates the project status to *Terminating* from *Active*. Then, the server clears all content from a project that is in the *Terminating* state before finally removing the project. While a project is in *Terminating* status, you cannot add new content to the project. Projects can be deleted from the CLI or the web console. - -include::modules/deleting-a-project-using-the-web-console.adoc[leveloffset=+2] - -include::modules/deleting-a-project-using-the-CLI.adoc[leveloffset=+2] diff --git a/applications/pruning-objects.adoc b/applications/pruning-objects.adoc deleted file mode 100644 index 5d08939f2543..000000000000 --- a/applications/pruning-objects.adoc +++ /dev/null @@ -1,71 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="pruning-objects"] -= Pruning objects to reclaim resources -include::_attributes/common-attributes.adoc[] -:context: pruning-objects - -toc::[] - -Over time, API objects created in {product-title} can accumulate in the -cluster's etcd data store through normal user operations, such as when building -and deploying applications. - -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -Cluster administrators can periodically prune older versions of objects from the -cluster that are no longer required. For example, by pruning images you can delete -older images and layers that are no longer in use, but are still taking up disk -space. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -A user with the `dedicated-admin` role can periodically prune older versions of objects from the -cluster that are no longer required. For example, by pruning images you can delete -older images and layers that are no longer in use, but are still taking up disk -space. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] - -include::modules/pruning-basic-operations.adoc[leveloffset=+1] -include::modules/pruning-groups.adoc[leveloffset=+1] -include::modules/pruning-deployments.adoc[leveloffset=+1] -include::modules/pruning-builds.adoc[leveloffset=+1] - -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -[role="_additional-resources"] -.Additional resources -* xref:../cicd/builds/advanced-build-operations.adoc#builds-build-pruning-advanced-build-operations[Performing advanced builds -> Pruning builds] -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] - -include::modules/pruning-images.adoc[leveloffset=+1] -//cannot create resource "serviceaccounts". cannot create resource "cronjobs" - -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -include::modules/pruning-images-manual.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../registry/accessing-the-registry.adoc#accessing-the-registry[Accessing the registry] -* xref:../registry/securing-exposing-registry.adoc#securing-exposing-registry[Exposing the registry] -* See -xref:../registry/configuring-registry-operator.adoc#configuring-registry-operator[Image -Registry Operator in {product-title}] for information on how to create a -registry route. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] - -// cannot patch resource "configs" - -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -include::modules/pruning-hard-pruning-registry.adoc[leveloffset=+1] -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] - -include::modules/pruning-cronjobs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -// When the Operators book is added to ROSA/OSD, check if this link is valid. -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -* xref:../nodes/jobs/nodes-nodes-jobs.adoc#nodes-nodes-jobs_nodes-nodes-jobs[Running tasks in pods using jobs] -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -* xref:../applications/quotas/quotas-setting-across-multiple-projects.adoc#setting-quotas-across-multiple-projects[Resource quotas across multiple projects] -// When the Operators book is added to ROSA/OSD, check if this link is valid. -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -* xref:../authentication/using-rbac.adoc#using-rbac[Using RBAC to define and apply permissions] -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] diff --git a/applications/quotas/_attributes b/applications/quotas/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/applications/quotas/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/applications/quotas/images b/applications/quotas/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/applications/quotas/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/applications/quotas/modules b/applications/quotas/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/applications/quotas/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/applications/quotas/quotas-setting-across-multiple-projects.adoc b/applications/quotas/quotas-setting-across-multiple-projects.adoc deleted file mode 100644 index a4a4330ca134..000000000000 --- a/applications/quotas/quotas-setting-across-multiple-projects.adoc +++ /dev/null @@ -1,17 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="setting-quotas-across-multiple-projects"] -= Resource quotas across multiple projects -include::_attributes/common-attributes.adoc[] -:context: setting-quotas-across-multiple-projects - -toc::[] - -A multi-project quota, defined by a `ClusterResourceQuota` object, allows quotas to be shared across multiple projects. Resources used in each selected project are aggregated and that aggregate is used to limit resources across all the selected projects. - -This guide describes how cluster administrators can set and manage resource quotas across multiple projects. - -include::snippets/default-projects.adoc[] - -include::modules/quotas-selecting-projects.adoc[leveloffset=+1] -include::modules/quotas-viewing-clusterresourcequotas.adoc[leveloffset=+1] -include::modules/quotas-selection-granularity.adoc[leveloffset=+1] diff --git a/applications/quotas/quotas-setting-per-project.adoc b/applications/quotas/quotas-setting-per-project.adoc deleted file mode 100644 index 472879c2fede..000000000000 --- a/applications/quotas/quotas-setting-per-project.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="quotas-setting-per-project"] -= Resource quotas per project -include::_attributes/common-attributes.adoc[] -:context: quotas-setting-per-project - -toc::[] - -A _resource quota_, defined by a `ResourceQuota` object, provides constraints that limit aggregate resource consumption per project. It can limit the quantity of objects that can be created in a project by type, as well as the total amount of compute resources and storage that might be consumed by resources in that project. - -This guide describes how resource quotas work, how cluster administrators can set and manage resource quotas on a per project basis, and how developers and cluster administrators can view them. - -include::modules/quotas-resources-managed.adoc[leveloffset=+1] -include::modules/quotas-scopes.adoc[leveloffset=+1] -include::modules/quotas-enforcement.adoc[leveloffset=+1] -include::modules/quotas-requests-vs-limits.adoc[leveloffset=+1] -include::modules/quotas-sample-resource-quotas-def.adoc[leveloffset=+1] -include::modules/quotas-creating-a-quota.adoc[leveloffset=+1] -include::modules/quotas-creating-object-count-quotas.adoc[leveloffset=+2] -include::modules/setting-resource-quota-for-extended-resources.adoc[leveloffset=+2] -include::modules/quotas-viewing-quotas.adoc[leveloffset=+1] -include::modules/quotas-requiring-explicit-quota.adoc[leveloffset=+1] diff --git a/applications/quotas/snippets b/applications/quotas/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/applications/quotas/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/applications/red-hat-marketplace.adoc b/applications/red-hat-marketplace.adoc deleted file mode 100644 index ac0936d05c40..000000000000 --- a/applications/red-hat-marketplace.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="red-hat-marketplace"] -= Using the Red Hat Marketplace -include::_attributes/common-attributes.adoc[] -:context: red-hat-marketplace - -toc::[] - -The link:https://marketplace.redhat.com[Red Hat Marketplace] is an open cloud marketplace that makes it easy to discover and access certified software for container-based environments that run on public clouds and on-premise. - -include::modules/red-hat-marketplace-features.adoc[leveloffset=+1] diff --git a/applications/snippets b/applications/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/applications/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/applications/working-with-quotas.adoc b/applications/working-with-quotas.adoc deleted file mode 100644 index 85715c12eb2b..000000000000 --- a/applications/working-with-quotas.adoc +++ /dev/null @@ -1,26 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="working-with-quotas"] -= Working with quotas -include::_attributes/common-attributes.adoc[] -:context: working-with-quotas - -toc::[] - -A _resource quota_, defined by a ResourceQuota object, provides constraints that -limit aggregate resource consumption per project. It can limit the quantity of -objects that can be created in a project by type, as well as the total amount of -compute resources and storage that may be consumed by resources in that project. - -An _object quota count_ places a defined quota on all standard namespaced resource -types. When using a resource quota, an object is charged against the quota if it -exists in server storage. These types of quotas are useful to protect against -exhaustion of storage resources. - -This guide describes how resource quotas work and how developers can work with -and view them. - -include::modules/quotas-viewing-quotas.adoc[leveloffset=+1] -include::modules/quotas-resources-managed.adoc[leveloffset=+1] -include::modules/quotas-scopes.adoc[leveloffset=+1] -include::modules/quotas-enforcement.adoc[leveloffset=+1] -include::modules/quotas-requests-vs-limits.adoc[leveloffset=+1] diff --git a/applications/working_with_helm_charts/_attributes b/applications/working_with_helm_charts/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/applications/working_with_helm_charts/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/applications/working_with_helm_charts/configuring-custom-helm-chart-repositories.adoc b/applications/working_with_helm_charts/configuring-custom-helm-chart-repositories.adoc deleted file mode 100644 index 823d7ce3e2d2..000000000000 --- a/applications/working_with_helm_charts/configuring-custom-helm-chart-repositories.adoc +++ /dev/null @@ -1,67 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="configuring-custom-helm-chart-repositories"] -= Configuring custom Helm chart repositories -include::_attributes/common-attributes.adoc[] -:context: configuring-custom-helm-chart-repositories - -toc::[] - -[role="_abstract"] -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -You can create Helm releases on an {product-title} cluster using the following methods: - -* The CLI. -* The *Developer* perspective of the web console. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] - -The *Developer Catalog*, in the *Developer* perspective of the web console, displays the Helm charts available in the cluster. By default, it lists the Helm charts from the Red Hat OpenShift Helm chart repository. For a list of the charts, see link:https://charts.openshift.io/index.yaml[the Red Hat `Helm index` file]. - -As a cluster administrator, you can add multiple cluster-scoped and namespace-scoped Helm chart repositories, separate from the default cluster-scoped Helm repository, and display the Helm charts from these repositories in the *Developer Catalog*. - -As a regular user or project member with the appropriate role-based access control (RBAC) permissions, you can add multiple namespace-scoped Helm chart repositories, apart from the default cluster-scoped Helm repository, and display the Helm charts from these repositories in the *Developer Catalog*. - -In the *Developer* perspective of the web console, you can use the *Helm* page to: - -* Create Helm Releases and Repositories using the *Create* button. - -* Create, update, or delete a cluster-scoped or namespace-scoped Helm chart repository. - -* View the list of the existing Helm chart repositories in the Repositories tab, which can also be easily distinguished as either cluster scoped or namespace scoped. - -// clusterroles.rbac.authorization.k8s.io is forbidden: user (groups=["dedicated-admins" "system:authenticated:oauth" "system:authenticated"]) is attempting to grant RBAC permissions not currently held: -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -include::modules/helm-installing-a-helm-chart-on-an-openshift-cluster.adoc[leveloffset=+1] -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] - -include::modules/odc-creating-helm-releases-using-developer-perspective.adoc[leveloffset=+1] - -== Using Helm in the web terminal - -// ROSA/OSD users can access the web terminal, but the docs are not currently present in the ROSA/OSD books. -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -You can use Helm by xref:../../web_console/web_terminal/odc-using-web-terminal.adoc#odc-access-web-terminal_odc-using-web-terminal[Accessing the web terminal] in the *Developer* perspective of the web console. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -You can use Helm by Accessing the web terminal in the *Developer* perspective of the web console. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] - -include::modules/helm-creating-a-custom-helm-chart-on-openshift.adoc[leveloffset=+1] - -//cannot create resource "helmchartrepositories" in API group "helm.openshift.io" -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -include::modules/helm-adding-helm-chart-repositories.adoc[leveloffset=+1] -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -//projecthelmchartrepositories.helm.openshift.io "azure-sample-repo" is forbidden: User cannot get resource "projecthelmchartrepositories" in API group "helm.openshift.io" -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -include::modules/helm-adding-namespace-scoped-helm-chart-repositories.adoc[leveloffset=+1] -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -//cannot create resource "helmchartrepositories" in API group "helm.openshift.io" -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -include::modules/helm-creating-credentials-and-certificates-to-add-helm-repositories.adoc[leveloffset=+1] -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] - -include::modules/helm-filtering-helm-charts-by-certification-level.adoc[leveloffset=+1] -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -//cannot patch resource "helmchartrepositories" in API group "helm.openshift.io" -include::modules/helm-disabling-helm-chart-repositories.adoc[leveloffset=+1] -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] diff --git a/applications/working_with_helm_charts/images b/applications/working_with_helm_charts/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/applications/working_with_helm_charts/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/applications/working_with_helm_charts/installing-helm.adoc b/applications/working_with_helm_charts/installing-helm.adoc deleted file mode 100644 index 3ac0a5d8f744..000000000000 --- a/applications/working_with_helm_charts/installing-helm.adoc +++ /dev/null @@ -1,116 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="installing-helm"] -= Installing Helm -include::_attributes/common-attributes.adoc[] -:context: installing-helm - -toc::[] - -The following section describes how to install Helm on different platforms using the CLI. - -You can also find the URL to the latest binaries from the {product-title} web console by clicking the *?* icon in the upper-right corner and selecting *Command Line Tools*. - -.Prerequisites -* You have installed Go, version 1.13 or higher. - -== On Linux - -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -. Download the Helm binary and add it to your path: - -* Linux (x86_64, amd64) -+ -[source,terminal] ----- -# curl -L https://mirror.openshift.com/pub/openshift-v4/clients/helm/latest/helm-linux-amd64 -o /usr/local/bin/helm ----- - -* Linux on {ibm-z-name} and {ibm-linuxone-name} (s390x) -+ -[source,terminal] ----- -# curl -L https://mirror.openshift.com/pub/openshift-v4/clients/helm/latest/helm-linux-s390x -o /usr/local/bin/helm ----- - -* Linux on {ibm-power-name} (ppc64le) -+ -[source,terminal] ----- -# curl -L https://mirror.openshift.com/pub/openshift-v4/clients/helm/latest/helm-linux-ppc64le -o /usr/local/bin/helm ----- -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -. Download the Linux x86_64 or Linux amd64 Helm binary and add it to your path: -+ -[source,terminal] ----- -# curl -L https://mirror.openshift.com/pub/openshift-v4/clients/helm/latest/helm-linux-amd64 -o /usr/local/bin/helm ----- -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] - -. Make the binary file executable: -+ -[source,terminal] ----- -# chmod +x /usr/local/bin/helm ----- - -. Check the installed version: -+ -[source,terminal] ----- -$ helm version ----- -+ -.Example output -[source,terminal] ----- -version.BuildInfo{Version:"v3.0", GitCommit:"b31719aab7963acf4887a1c1e6d5e53378e34d93", GitTreeState:"clean", GoVersion:"go1.13.4"} ----- - -== On Windows 7/8 - -. Download the latest link:https://mirror.openshift.com/pub/openshift-v4/clients/helm/latest/helm-windows-amd64.exe[`.exe` file] and put in a directory of your preference. -. Right click *Start* and click *Control Panel*. -. Select *System and Security* and then click *System*. -. From the menu on the left, select *Advanced systems settings* and click *Environment Variables* at the bottom. -. Select *Path* from the *Variable* section and click *Edit*. -. Click *New* and type the path to the folder with the `.exe` file into the field or click *Browse* and select the directory, and click *OK*. - -== On Windows 10 - -. Download the latest link:https://mirror.openshift.com/pub/openshift-v4/clients/helm/latest/helm-windows-amd64.exe[`.exe` file] and put in a directory of your preference. -. Click *Search* and type `env` or `environment`. -. Select *Edit environment variables for your account*. -. Select *Path* from the *Variable* section and click *Edit*. -. Click *New* and type the path to the directory with the exe file into the field or click *Browse* and select the directory, and click *OK*. - - -== On MacOS -. Download the Helm binary and add it to your path: -+ -[source,terminal] ----- -# curl -L https://mirror.openshift.com/pub/openshift-v4/clients/helm/latest/helm-darwin-amd64 -o /usr/local/bin/helm ----- - - -. Make the binary file executable: -+ -[source,terminal] ----- -# chmod +x /usr/local/bin/helm ----- - -. Check the installed version: -+ -[source,terminal] ----- -$ helm version ----- -+ -.Example output -[source,terminal] ----- -version.BuildInfo{Version:"v3.0", GitCommit:"b31719aab7963acf4887a1c1e6d5e53378e34d93", GitTreeState:"clean", GoVersion:"go1.13.4"} ----- diff --git a/applications/working_with_helm_charts/modules b/applications/working_with_helm_charts/modules deleted file mode 120000 index 8b0e8540076d..000000000000 --- a/applications/working_with_helm_charts/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules \ No newline at end of file diff --git a/applications/working_with_helm_charts/odc-working-with-helm-releases.adoc b/applications/working_with_helm_charts/odc-working-with-helm-releases.adoc deleted file mode 100644 index 351e17669223..000000000000 --- a/applications/working_with_helm_charts/odc-working-with-helm-releases.adoc +++ /dev/null @@ -1,25 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="odc-working-with-helm-releases"] -= Working with Helm releases -include::_attributes/common-attributes.adoc[] -:context: working-with-helm-releases - -toc::[] - -You can use the *Developer* perspective in the web console to update, rollback, or delete a Helm release. - -== Prerequisites - -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -* You have logged in to the web console and have switched to xref:../../web_console/web-console-overview.adoc#about-developer-perspective_web-console-overview[the *Developer* perspective]. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -* You have logged in to the web console and have switched to the *Developer* perspective. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] - - -include::modules/odc-upgrading-helm-release.adoc[leveloffset=+1] - -include::modules/odc-rolling-back-helm-release.adoc[leveloffset=+1] - -include::modules/odc-deleting-helm-release.adoc[leveloffset=+1] diff --git a/applications/working_with_helm_charts/snippets b/applications/working_with_helm_charts/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/applications/working_with_helm_charts/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/applications/working_with_helm_charts/understanding-helm.adoc b/applications/working_with_helm_charts/understanding-helm.adoc deleted file mode 100644 index 65ee3b1c079e..000000000000 --- a/applications/working_with_helm_charts/understanding-helm.adoc +++ /dev/null @@ -1,46 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="understanding-helm"] -= Understanding Helm -include::_attributes/common-attributes.adoc[] -:context: understanding-helm - -toc::[] - -[role="_abstract"] -Helm is a software package manager that simplifies deployment of applications and services to {product-title} clusters. - -Helm uses a packaging format called _charts_. -A Helm chart is a collection of files that describes the {product-title} resources. - -Creating a chart in a cluster creates a running instance of the chart known as a _release_. - -Each time a chart is created, or a release is upgraded or rolled back, an incremental revision is created. - - -== Key features - -Helm provides the ability to: - -* Search through a large collection of charts stored in the chart repository. -* Modify existing charts. -* Create your own charts with {product-title} or Kubernetes resources. -* Package and share your applications as charts. - -// No tech preview in ROSA/OSD, added ifndef in case this note gets un-commented. -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -//[NOTE] -//==== -// In {product-title} 4.10 and 4.11, Helm is disabled for the xref: ../../web_console/web-console.adoc#multi-cluster-about_web-console[Multicluster Console] (Technology Preview). -//==== -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] - -== Red Hat Certification of Helm charts for OpenShift - -You can choose to verify and certify your Helm charts by Red Hat for all the components you will be deploying on the Red Hat {product-title}. Charts go through an automated Red Hat OpenShift certification workflow that guarantees security compliance as well as best integration and experience with the platform. Certification assures the integrity of the chart and ensures that the Helm chart works seamlessly on Red Hat OpenShift clusters. - -[role="_additional-resources"] -== Additional resources -* For more information on how to certify your Helm charts as a Red Hat partner, see link:https://redhat-connect.gitbook.io/partner-guide-for-red-hat-openshift-and-container/helm-chart-certification/overview[Red Hat Certification of Helm charts for OpenShift]. -* For more information on OpenShift and Container certification guides for Red Hat partners, see link:https://access.redhat.com/documentation/en-us/red_hat_software_certification/8.51/html-single/red_hat_software_certification_workflow_guide/index#con_container-certification_openshift-sw-cert-workflow-introduction-to-redhat-openshift-operator-certification[Partner Guide for OpenShift and Container Certification]. -* For a list of the charts, see link:https://charts.openshift.io/index.yaml[the Red Hat `Helm index` file]. -* You can view the available charts at the link:https://marketplace.redhat.com/en-us/documentation/access-red-hat-marketplace[Red Hat Marketplace]. For more information, see xref:../../applications/red-hat-marketplace.adoc#red-hat-marketplace[Using the Red Hat Marketplace]. diff --git a/applications_and_projects/working-with-projects.adoc b/applications_and_projects/working-with-projects.adoc deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/architecture/admission-plug-ins.adoc b/architecture/admission-plug-ins.adoc deleted file mode 100644 index 700c382dd6d3..000000000000 --- a/architecture/admission-plug-ins.adoc +++ /dev/null @@ -1,36 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="admission-plug-ins"] -= Admission plugins -include::_attributes/common-attributes.adoc[] -:context: admission-plug-ins - -toc::[] - -Admission plugins are used to help regulate how {product-title} functions. - -// Concept modules -include::modules/admission-plug-ins-about.adoc[leveloffset=+1] - -include::modules/admission-plug-ins-default.adoc[leveloffset=+1] - -include::modules/admission-webhooks-about.adoc[leveloffset=+1] - -include::modules/admission-webhook-types.adoc[leveloffset=+1] - -// user (groups=["dedicated-admins" "system:authenticated:oauth" "system:authenticated"]) is attempting to grant RBAC permissions not currently held, clusterroles.rbac.authorization.k8s.io "system:openshift:online:my-webhook-server" not found, cannot get resource "rolebindings", cannot create resource "apiservices", cannot create resource "validatingwebhookconfigurations" -ifndef::openshift-rosa,openshift-dedicated[] -// Procedure module -include::modules/configuring-dynamic-admission.adoc[leveloffset=+1] -endif::openshift-rosa,openshift-dedicated[] - -[role="_additional-resources"] -[id="admission-plug-ins-additional-resources"] -== Additional resources - -ifndef::openshift-rosa,openshift-dedicated[] -* xref:../networking/networking_operators/sr-iov-operator/configuring-sriov-operator.adoc#configuring-sriov-operator_configuring-sriov-operator[Configuring the SR-IOV Network Operator] - -* xref:../nodes/scheduling/nodes-scheduler-taints-tolerations.adoc#nodes-scheduler-taints-tolerations_dedicating_nodes-scheduler-taints-tolerations[Controlling pod placement using node taints] -endif::openshift-rosa,openshift-dedicated[] - -* xref:../nodes/pods/nodes-pods-priority.adoc#admin-guide-priority-preemption-names_nodes-pods-priority[Pod priority names] diff --git a/architecture/architecture-installation.adoc b/architecture/architecture-installation.adoc deleted file mode 100644 index 9f0ee0f76946..000000000000 --- a/architecture/architecture-installation.adoc +++ /dev/null @@ -1,39 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="architecture-installation"] -= Installation and update -include::_attributes/common-attributes.adoc[] -:context: architecture-installation - -toc::[] - -include::modules/installation-overview.adoc[leveloffset=+1] - -include::modules/supported-platforms-for-openshift-clusters.adoc[leveloffset=+2] - -include::modules/installation-process.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../scalability_and_performance/recommended-performance-scale-practices/recommended-etcd-practices.adoc#recommended-etcd-practices[Recommended etcd practices] - -* xref:../scalability_and_performance/recommended-performance-scale-practices/recommended-control-plane-practices.adoc#master-node-sizing_recommended-control-plane-practices[Control plane node sizing] - -[discrete] -=== Installation scope - -The scope of the {product-title} installation program is intentionally narrow. It is designed for simplicity and ensured success. You can complete many more configuration tasks after installation completes. - -[role="_additional-resources"] -.Additional resources - -* See xref:../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Available cluster customizations] for details about {product-title} configuration resources. - -include::modules/update-service-overview.adoc[leveloffset=+1] - -include::modules/unmanaged-operators.adoc[leveloffset=+1] - -[id="architecture-installation-next-steps"] -== Next steps - -* xref:../installing/overview/installing-preparing.adoc#installing-preparing[Selecting a cluster installation method and preparing it for users] diff --git a/architecture/architecture-rhcos.adoc b/architecture/architecture-rhcos.adoc deleted file mode 100644 index 1954f535e300..000000000000 --- a/architecture/architecture-rhcos.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="architecture-rhcos"] -= {op-system-first} -include::_attributes/common-attributes.adoc[] -:context: architecture-rhcos - -toc::[] - -include::modules/rhcos-about.adoc[leveloffset=+1] -include::modules/ignition-config-viewing.adoc[leveloffset=+1] -include::modules/digging-into-machine-config.adoc[leveloffset=+1] diff --git a/architecture/architecture.adoc b/architecture/architecture.adoc deleted file mode 100644 index 1f1426cb951b..000000000000 --- a/architecture/architecture.adoc +++ /dev/null @@ -1,40 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="architecture"] -= {product-title} architecture -include::_attributes/common-attributes.adoc[] -:context: architecture - -toc::[] - -include::modules/architecture-platform-introduction.adoc[leveloffset=+1] - -include::modules/architecture-kubernetes-introduction.adoc[leveloffset=+2] - -include::modules/architecture-container-application-benefits.adoc[leveloffset=+2] - -include::modules/architecture-platform-benefits.adoc[leveloffset=+2] -//// -== User facing components -* Workloads (Deployments, Jobs, ReplicaSets, etc) -* Operator Lifecycle Manager -* xref:../cicd/builds/understanding-image-builds.adoc[Builds] - The build component -provides an API and infrastructure for producing new container images using a -variety of techniques including industry standard Dockerfiles and publishing -them to either the cluster image registry, or an external registry. It also -provides integration with Jenkins based pipeline continuous integration -workflows. -* xref:../registry/index.adoc[Image Registry] - -The image registry provides a scalable repository for storing and retrieving -container images that are produced by and run on the cluster. Image access is -integrated with the cluster's role-based access controls and user authentication -system. -* xref:../openshift_images/images-understand.adoc[Image -streams] - The imagestream API provides an abstraction over container images -that exist in registries. It allows workloads to reference an image indirectly, -retains a history of the images that have been referenced, and allows -notification when an image is updated with a new version. -//// - -ifndef::openshift-dedicated,openshift-rosa[] -include::modules/cluster-entitlements.adoc[leveloffset=+2] -endif::openshift-dedicated,openshift-rosa[] diff --git a/architecture/argocd.adoc b/architecture/argocd.adoc deleted file mode 100644 index ede48546c22b..000000000000 --- a/architecture/argocd.adoc +++ /dev/null @@ -1,25 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="argocd"] -= Using ArgoCD with {product-title} -include::_attributes/common-attributes.adoc[] - -:context: argocd - -toc::[] - -[id="argocd-what"] -== What does ArgoCD do? - -ArgoCD is a declarative continuous delivery tool that leverages GitOps to maintain cluster resources. ArgoCD is implemented as a controller that continuously monitors application definitions and configurations defined in a Git repository and compares the specified state of those configurations with their live state on the cluster. Configurations that deviate from their specified state in the Git repository are classified as OutOfSync. ArgoCD reports these differences and allows administrators to automatically or manually resync configurations to the defined state. - -ArgoCD enables you to deliver global custom resources, like the resources that are used to configure {product-title} clusters. - -[id="argocd-support"] -== Statement of support - -Red Hat does not provide support for this tool. To obtain support for ArgoCD, see link:https://argoproj.github.io/argo-cd/SUPPORT/[Support] in the ArgoCD documentation. - -[id="argocd-documentation"] -== ArgoCD documentation - -For more information about using ArgoCD, see the link:https://argoproj.github.io/argo-cd/[ArgoCD documentation]. diff --git a/architecture/cicd_gitops.adoc b/architecture/cicd_gitops.adoc deleted file mode 100644 index d99084f9ae1e..000000000000 --- a/architecture/cicd_gitops.adoc +++ /dev/null @@ -1,60 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="cicd_gitops"] -= The CI/CD methodology and practice -include::_attributes/common-attributes.adoc[] -:context: cicd_gitops - -toc::[] - -Using a _continuous integration/continuous delivery_ (CI/CD) methodology enables you to regularly deliver applications to customers by introducing automation into the stages of application development, from integration and testing phases to delivery and deployment. The CI/CD process is often referred to as a "CI/CD pipeline." The main concepts attributed to CI/CD are continuous integration, continuous delivery, and continuous deployment. - -[id="cicd_admin"] -== CI/CD for cluster administration and application configuration management - -_Continuous integration_ is an automation process for developers. Code changes to an application are regularly built, tested, and merged to a shared repository. - -_Continuous delivery_ and _continuous deployment_ are closely related concepts that are sometimes used interchangeably and refer to automation of the pipeline. -Continuous delivery uses automation to ensure that a developer's changes to an application are tested and sent to a repository, where an operations team can deploy them to a production environment. Continuous deployment enables the release of changes, starting from the repository and ending in production. Continuous deployment speeds up application delivery and prevents the operations team from getting overloaded. - -[id="cicd_gitops_methodology"] -== The GitOps methodology and practice - -_GitOps_ is a set of practices that use Git pull requests to manage infrastructure and application configurations. The Git repository in GitOps is the only source of truth for system and application configuration. The repository contains the entire state of the system so that the trail of changes to the system state are visible and auditable. GitOps enables you to implement a DevOps methodology. - -You can use GitOps tooling to create repeatable and predictable processes for managing and recreating {product-title} clusters and applications. By using GitOps, you can address the issues of infrastructure and application configuration sprawl. It simplifies the propagation of infrastructure and application configuration changes across multiple clusters by defining your infrastructure and applications definitions as “code.” Implementing GitOps for your cluster configuration files can make automated installation easier and allow you to configure automated cluster customizations. You can apply the core principles of developing and maintaining software in a Git repository to the creation and management of your cluster and application configuration files. - -By using {product-title} to automate both your cluster configuration and container development process, you can pick and choose where and when to adopt GitOps practices. Using a CI pipeline that pairs with your GitOps strategy and execution plan is ideal. {product-title} provides the flexibility to choose when and how you integrate this methodology into your business practices and pipelines. - -With GitOps integration, you can declaratively configure and store your {product-title} cluster configuration - -GitOps works well with {product-title} because you can both declaratively configure clusters and store the state of the cluster configuration in Git. For more information, see xref:../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Available cluster customizations]. - -[id="cicd_gitops_cluster_administration"] -=== GitOps for single-cluster and multi-cluster administration - -Whether you need one or more independent or cooperative {product-title} clusters, you can use a GitOps strategy to manage the following tasks: - -* Ensure that the clusters have similar states for configuration, monitoring, or storage. -* Recover or recreate clusters from a known state. -* Create clusters with a known state. -* Apply or revert configuration changes to multiple {product-title} clusters. -* Associate templated configuration with different environments. - -[id="cicd_gitops_application_configuration"] -=== GitOps for application configuration management - -You can also use GitOps practices to manage application configuration. This practice ensures consistency in applications when you deploy them to different clusters in different environments, like development, stage, and production. Managing application configuration with GitOps is also beneficial when you must deploy applications across multiple clusters, whether on-cloud or on-premise, for availability and scalability purposes. - -You can use a GitOps strategy to: - -* Promote applications across clusters, from stage to production. -* Apply or revert application changes to multiple {product-title} clusters. - -[id="cicd_gitops_integrators"] -=== GitOps technology providers and integrators - -There are several community offerings and third-party vendors that provide a high level of integration with {product-title}. - -You can integrate GitOps into {product-title} with the following community partners and third-party integrators: - -* xref:../architecture/argocd.adoc#argocd[ArgoCD] diff --git a/architecture/control-plane.adoc b/architecture/control-plane.adoc deleted file mode 100644 index fb2fa4b8e096..000000000000 --- a/architecture/control-plane.adoc +++ /dev/null @@ -1,74 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="control-plane"] -= Control plane architecture -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: control-plane - -toc::[] - -The _control plane_, which is composed of control plane machines, manages the {product-title} cluster. -The control plane machines manage workloads on the compute machines, which are also known as worker machines. -The cluster itself manages all upgrades to the machines by the actions of the Cluster Version Operator (CVO), -ifndef::openshift-dedicated,openshift-rosa[] -the Machine Config Operator, -endif::openshift-dedicated,openshift-rosa[] -and a set of individual Operators. - -ifdef::openshift-rosa[] -:FeatureName: This control plane architecture -include::snippets/rosa-classic-support.adoc[] -endif::openshift-rosa[] - -// This module does not apply to OSD/ROSA -ifndef::openshift-dedicated,openshift-rosa[] -include::modules/architecture-machine-config-pools.adoc[leveloffset=+1] -endif::openshift-dedicated,openshift-rosa[] - -ifndef::openshift-dedicated,openshift-rosa[] -[role="_additional-resources"] -.Additional resources -* xref:../machine_configuration/index.adoc#machine-config-drift-detection_machine-config-overview[Understanding configuration drift detection] -endif::openshift-dedicated,openshift-rosa[] - -include::modules/architecture-machine-roles.adoc[leveloffset=+1] - -// This additional resource does not apply to OSD/ROSA -ifndef::openshift-dedicated,openshift-rosa[] -[role="_additional-resources"] -.Additional resources -* xref:../hosted_control_planes/index.adoc#hcp-overview[{hcp-capital} overview] -endif::openshift-dedicated,openshift-rosa[] - -include::modules/operators-overview.adoc[leveloffset=+1] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -include::modules/arch-cluster-operators.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../operators/operator-reference.adoc#cluster-operators-ref[Cluster Operators reference] -endif::[] - -include::modules/arch-olm-operators.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* For more details on running add-on Operators in {product-title}, see the _Operators_ guide sections on xref:../operators/understanding/olm/olm-understanding-olm.adoc#olm-understanding-olm[Operator Lifecycle Manager (OLM)] and xref:../operators/understanding/olm-understanding-operatorhub.adoc#olm-understanding-operatorhub[OperatorHub]. -* For more details on the Operator SDK, see xref:../operators/operator_sdk/osdk-about.adoc#osdk-about[Developing Operators]. - -include::modules/etcd-overview.adoc[leveloffset=+1] - -// These modules only apply to ROSA/OSD -ifdef::openshift-dedicated,openshift-rosa[] -include::modules/cpmso-feat-auto-update.adoc[leveloffset=+1] -include::modules/cpmso-control-plane-recovery.adoc[leveloffset=+1] -endif::openshift-dedicated,openshift-rosa[] - -// These xrefs do not apply to OSD/ROSA -ifndef::openshift-dedicated,openshift-rosa[] -[role="_additional-resources"] -.Additional resources -* xref:../scalability_and_performance/recommended-performance-scale-practices/recommended-etcd-practices.adoc#recommended-etcd-practices[Recommended etcd practices] -* xref:../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backing-up-etcd[Backing up etcd] -endif::openshift-dedicated,openshift-rosa[] \ No newline at end of file diff --git a/architecture/images b/architecture/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/architecture/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/architecture/index.adoc b/architecture/index.adoc deleted file mode 100644 index eb419440bbc9..000000000000 --- a/architecture/index.adoc +++ /dev/null @@ -1,105 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="architecture-overview"] -= Architecture overview -include::_attributes/common-attributes.adoc[] -ifdef::openshift-dedicated,openshift-rosa[] -include::_attributes/attributes-openshift-dedicated.adoc[] -endif::openshift-dedicated,openshift-rosa[] -:context: architecture-overview - -toc::[] - -{product-title} is a cloud-based Kubernetes container platform. -The foundation of {product-title} is based on Kubernetes and therefore shares the same technology. -To learn more about {product-title} and Kubernetes, see xref:../architecture/architecture.adoc#architecture[product architecture]. - -include::modules/openshift-architecture-common-terms.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -// Topic not included in the OSD/ROSA docs -ifndef::openshift-dedicated,openshift-rosa[] -* For more information on networking, see xref:../networking/understanding-networking.adoc#understanding-networking[{product-title} networking]. -endif::openshift-dedicated,openshift-rosa[] -* For more information on storage, see xref:../storage/index.adoc#index[{product-title} storage]. -* For more information on authentication, see xref:../authentication/index.adoc#index[{product-title} authentication]. -* For more information on Operator Lifecycle Manager (OLM), see xref:../operators/understanding/olm/olm-understanding-olm.adoc#olm-understanding-olm[OLM]. -// Topic not included in the OSD/ROSA docs -ifndef::openshift-dedicated,openshift-rosa[] -* For more information on over-the-air (OTA) updates, see xref:../updating/understanding_updates/intro-to-updates.adoc#understanding-openshift-updates[Introduction to OpenShift updates]. -endif::openshift-dedicated,openshift-rosa[] - -ifdef::openshift-dedicated,openshift-rosa[] -include::modules/sd-vs-ocp.adoc[leveloffset=+1] -endif::openshift-dedicated,openshift-rosa[] - -ifndef::openshift-dedicated,openshift-rosa[] -[id="about-installation-and-updates"] -== About installation and updates - -As a cluster administrator, you can use the {product-title} xref:../architecture/architecture-installation.adoc#architecture-installation[installation program] to install and deploy a cluster by using one of the following methods: - -* Installer-provisioned infrastructure -* User-provisioned infrastructure -endif::openshift-dedicated,openshift-rosa[] - -[id="about-control-planes"] -== About the control plane - -The xref:../architecture/control-plane.adoc#control-plane[control plane] manages the worker nodes and the pods in your cluster. You can configure nodes with the use of machine config pools (MCPs). -MCPs are groups of machines, such as control plane components or user workloads, that are based on the resources that they handle. -{product-title} assigns different roles to hosts. These roles define the function of a machine in a cluster. -The cluster contains definitions for the standard control plane and worker role types. - -You can use Operators to package, deploy, and manage services on the control plane. -Operators are important components in {product-title} because they provide the following services: - -* Perform health checks -* Provide ways to watch applications -* Manage over-the-air updates -* Ensure applications stay in the specified state - -ifndef::openshift-dedicated,openshift-rosa[] -[role="_additional-resources"] -.Additional resources - -* xref:../hosted_control_planes/index.adoc#hcp-overview[{hcp-capital} overview] -endif::openshift-dedicated,openshift-rosa[] - -[id="about-containerized-applications-for-developers"] -== About containerized applications for developers - -As a developer, you can use different tools, methods, and formats to xref:../architecture/understanding-development.adoc#understanding-development[develop your containerized application] based on your unique requirements, for example: - -* Use various build-tool, base-image, and registry options to build a simple container application. -* Use supporting components such as OperatorHub and templates to develop your application. -* Package and deploy your application as an Operator. - -You can also create a Kubernetes manifest and store it in a Git repository. -Kubernetes works on basic units called pods. A pod is a single instance of a running process in your cluster. Pods can contain one or more containers. -You can create a service by grouping a set of pods and their access policies. -Services provide permanent internal IP addresses and host names for other applications to use as pods are created and destroyed. Kubernetes defines workloads based on the type of your application. - -ifndef::openshift-dedicated,openshift-rosa[] -[id="coreos-and-ignition"] -== About {op-system-first} and Ignition - -As a cluster administrator, you can perform the following {op-system-first} tasks: - -** Learn about the next generation of xref:../architecture/architecture-rhcos.adoc#architecture-rhcos[single-purpose container operating system technology]. -** Choose how to configure {op-system-first} -** Choose how to deploy {op-system-first}: -*** Installer-provisioned deployment -*** User-provisioned deployment - -The {product-title} installation program creates the Ignition configuration files that you need to deploy your cluster. -{op-system-first} uses Ignition during the initial configuration to perform common disk tasks, such as partitioning, formatting, writing files, and configuring users. -During the first boot, Ignition reads its configuration from the installation media or the location that you specify and applies the configuration to the machines. - -You can learn how xref:../architecture/architecture-rhcos.adoc#architecture-rhcos[Ignition works], the process for a {op-system-first} machine in an {product-title} cluster, view Ignition configuration files, and change Ignition configuration after an installation. -endif::openshift-dedicated,openshift-rosa[] - -[id="about-admission-plug-ins"] -== About admission plugins -You can use xref:../architecture/admission-plug-ins.adoc#admission-plug-ins[admission plugins] to regulate how {product-title} functions. After a resource request is authenticated and authorized, admission plugins intercept the resource request to the master API to validate resource requests and to ensure that scaling policies are adhered to. Admission plugins are used to enforce security policies, resource limitations, configuration requirements, and other settings. diff --git a/architecture/mce-overview-ocp.adoc b/architecture/mce-overview-ocp.adoc deleted file mode 100644 index 94ec6e1854de..000000000000 --- a/architecture/mce-overview-ocp.adoc +++ /dev/null @@ -1,35 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="mce-overview-ocp"] -include::_attributes/common-attributes.adoc[] -= About the {mce} -:context: mce-overview-ocp - -toc::[] - -One of the challenges of scaling Kubernetes environments is managing the lifecycle of a growing fleet. To meet that challenge, you can use the {mce-short}. The operator delivers full lifecycle capabilities for managed {product-title} clusters and partial lifecycle management for other Kubernetes distributions. It is available in two ways: - -* As a standalone operator that you install as part of your {product-title} or {oke} subscription -* As part of link:https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_management_for_kubernetes[Red Hat Advanced Cluster Management for Kubernetes] - -[id="mce-on-ocp"] -== Cluster management with multicluster engine on {product-title} - -When you enable multicluster engine on {product-title}, you gain the following capabilities: - -* xref:../hosted_control_planes/index.adoc#hcp-overview[{hcp-capital}], which is a feature that is based on the HyperShift project. With a centralized hosted control plane, you can operate {product-title} clusters in a hyperscale manner. -* Hive, which provisions self-managed {product-title} clusters to the hub and completes the initial configurations for those clusters. -* klusterlet agent, which registers managed clusters to the hub. -* Infrastructure Operator, which manages the deployment of the Assisted Service to orchestrate on-premise bare metal and vSphere installations of {product-title}, such as {sno} on bare metal. The Infrastructure Operator includes xref:../edge_computing/ztp-deploying-far-edge-clusters-at-scale.adoc#ztp-challenges-of-far-edge-deployments_ztp-deploying-far-edge-clusters-at-scale[{ztp-first}], which fully automates cluster creation on bare metal and vSphere provisioning with GitOps workflows to manage deployments and configuration changes. -* Open cluster management, which provides resources to manage Kubernetes clusters. - -The multicluster engine is included with your {product-title} support subscription and is delivered separately from the core payload. To start to use multicluster engine, you deploy the {product-title} cluster and then install the operator. For more information, see link:https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_management_for_kubernetes/2.13/html/clusters/cluster_mce_overview#mce-install-intro[Installing and upgrading multicluster engine operator]. - -[id="mce-on-rhacm"] -== Cluster management with Red Hat Advanced Cluster Management - -If you need cluster management capabilities beyond what {product-title} with multicluster engine can provide, consider Red Hat Advanced Cluster Management. The multicluster engine is an integral part of Red Hat Advanced Cluster Management and is enabled by default. - -[id="mce-additional-resources-ocp"] -== Additional resources - -For the complete documentation for multicluster engine, see link:https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_management_for_kubernetes/2.13/html/clusters/cluster_mce_overview[Cluster lifecycle with multicluster engine documentation], which is part of the product documentation for Red Hat Advanced Cluster Management. \ No newline at end of file diff --git a/architecture/modules b/architecture/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/architecture/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/architecture/nvidia-gpu-architecture-overview.adoc b/architecture/nvidia-gpu-architecture-overview.adoc deleted file mode 100644 index 11d53f8200ed..000000000000 --- a/architecture/nvidia-gpu-architecture-overview.adoc +++ /dev/null @@ -1,104 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="nvidia-gpu-architecture-overview"] -= NVIDIA GPU architecture overview -include::_attributes/common-attributes.adoc[] -:context: nvidia-gpu-architecture-overview - -toc::[] - -NVIDIA supports the use of graphics processing unit (GPU) resources on {product-title}. {product-title} is a security-focused and hardened Kubernetes platform developed and supported by Red Hat for deploying and managing Kubernetes clusters at scale. {product-title} includes enhancements to Kubernetes so that users can easily configure and use NVIDIA GPU resources to accelerate workloads. - -The NVIDIA GPU Operator leverages the Operator framework within {product-title} to manage the full lifecycle of NVIDIA software components required to run GPU-accelerated workloads. - -These components include the NVIDIA drivers (to enable CUDA), the Kubernetes device plugin for GPUs, the NVIDIA Container Toolkit, automatic node tagging using GPU feature discovery (GFD), DCGM-based monitoring, and others. - -[NOTE] -==== -The NVIDIA GPU Operator is only supported by NVIDIA. For more information about obtaining support from NVIDIA, see link:https://access.redhat.com/solutions/5174941[Obtaining Support from NVIDIA]. -==== - -include::modules/nvidia-gpu-prerequisites.adoc[leveloffset=+1] - -// New enablement modules -ifndef::openshift-dedicated,openshift-rosa[] -include::modules/nvidia-gpu-enablement.adoc[leveloffset=+1] - -include::modules/nvidia-gpu-bare-metal.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* link:https://docs.nvidia.com/ai-enterprise/deployment-guide-openshift-on-bare-metal/0.1.0/on-bare-metal.html[Red Hat OpenShift on Bare Metal Stack] - -include::modules/nvidia-gpu-virtualization.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* link:https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/latest/openshift/openshift-virtualization.html[NVIDIA GPU Operator with OpenShift Virtualization] - -include::modules/nvidia-gpu-vsphere.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* link:https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/latest/openshift/nvaie-with-ocp.html#openshift-container-platform-on-vmware-vsphere-with-nvidia-vgpus[OpenShift Container Platform on VMware vSphere with NVIDIA vGPUs] - -include::modules/nvidia-gpu-kvm.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* link:https://computingforgeeks.com/how-to-deploy-openshift-container-platform-on-kvm/[How To Deploy OpenShift Container Platform 4.13 on KVM] - -include::modules/nvidia-gpu-csps.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* link:https://docs.nvidia.com/ai-enterprise/deployment-guide-cloud/0.1.0/aws-redhat-openshift.html[Red Hat Openshift in the Cloud] -endif::openshift-dedicated,openshift-rosa[] - -// Include this module at a higher leveloffset for OSD/ROSA. -ifdef::openshift-dedicated,openshift-rosa[] -include::modules/nvidia-gpu-csps.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* link:https://docs.nvidia.com/ai-enterprise/deployment-guide-cloud/0.1.0/aws-redhat-openshift.html[Red Hat Openshift in the Cloud] -endif::openshift-dedicated,openshift-rosa[] - -ifndef::openshift-dedicated,openshift-rosa[] -include::modules/nvidia-gpu-red-hat-device-edge.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* link:https://cloud.redhat.com/blog/how-to-accelerate-workloads-with-nvidia-gpus-on-red-hat-device-edge[How to accelerate workloads with NVIDIA GPUs on Red Hat Device Edge] -endif::openshift-dedicated,openshift-rosa[] - -// TELCODOCS-1092 GPU sharing methods -include::modules/nvidia-gpu-sharing-methods.adoc[leveloffset=+1] -.Additional resources -* link:https://developer.nvidia.com/blog/improving-gpu-utilization-in-kubernetes/[Improving GPU Utilization] - -include::modules/nvidia-gpu-cuda-streams.adoc[leveloffset=+2] -.Additional resources -* link:https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#asynchronous-concurrent-execution[Asynchronous Concurrent Execution] - -include::modules/nvidia-gpu-time-slicing.adoc[leveloffset=+2] - -include::modules/nvidia-gpu-cuda-mps.adoc[leveloffset=+2] -.Additional resources -* link:https://docs.nvidia.com/deploy/mps/index.html[CUDA MPS] - -include::modules/nvidia-gpu-mig-gpu.adoc[leveloffset=+2] -.Additional resources -* link:https://docs.nvidia.com/datacenter/tesla/mig-user-guide/[NVIDIA Multi-Instance GPU User Guide] - -include::modules/nvidia-gpu-virtualization-with-gpu.adoc[leveloffset=+2] -.Additional resources -* link:https://www.nvidia.com/en-us/data-center/virtual-solutions/[Virtual GPUs] - -include::modules/nvidia-gpu-features.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources - -* link:https://docs.nvidia.com/ngc/ngc-deploy-on-premises/nvidia-certified-systems/index.html[NVIDIA-Certified Systems] -* link:https://docs.nvidia.com/ai-enterprise/index.html#deployment-guides[NVIDIA AI Enterprise] -* link:https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/overview.html#[NVIDIA Container Toolkit] -* link:https://docs.nvidia.com/datacenter/cloud-native/openshift/latest/enable-gpu-monitoring-dashboard.html[Enabling the GPU Monitoring Dashboard] -* link:https://docs.nvidia.com/datacenter/cloud-native/openshift/latest/mig-ocp.html[MIG Support in OpenShift Container Platform] -* link:https://docs.nvidia.com/datacenter/cloud-native/openshift/latest/time-slicing-gpus-in-openshift.html[Time-slicing NVIDIA GPUs in OpenShift] -* link:https://docs.nvidia.com/datacenter/cloud-native/openshift/latest/mirror-gpu-ocp-disconnected.html[Deploy GPU Operators in a disconnected or airgapped environment] -// Topic not available in OSD/ROSA -ifndef::openshift-dedicated,openshift-rosa[] -* xref:../hardware_enablement/psap-node-feature-discovery-operator.html[Node Feature Discovery Operator] -endif::openshift-dedicated,openshift-rosa[] diff --git a/architecture/ocm-overview-ocp.adoc b/architecture/ocm-overview-ocp.adoc deleted file mode 100644 index 8ad963fc5d45..000000000000 --- a/architecture/ocm-overview-ocp.adoc +++ /dev/null @@ -1,57 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="ocm-overview-ocp"] -= Red Hat OpenShift Cluster Manager -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: ocm-overview-ocp -toc::[] - -{cluster-manager-first} is a managed service where you can install, modify, operate, and upgrade your Red Hat OpenShift clusters. This service allows you to work with all of your organization’s clusters from a single dashboard. - -{cluster-manager} guides you to install {OCP}, Red Hat OpenShift Service on AWS (ROSA), and {product-short-name} clusters. It is also responsible for managing both {OCP} clusters after self-installation as well as your ROSA and {product-short-name} clusters. - -You can use {cluster-manager} to do the following actions: - -* Create new clusters -* View cluster details and metrics -* Manage your clusters with tasks such as scaling, changing node labels, networking, authentication -* Manage access control -* Monitor clusters -* Schedule upgrades - -include::modules/ocm-accessing.adoc[leveloffset=+1] - -[id="ocm-general-actions-ocp"] -== General actions - -On the top right of the cluster page, there are some actions that a user can perform on the entire cluster: - -* **Open console** launches a web console so that the cluster owner can issue commands to the cluster. -* **Actions** drop-down menu allows the cluster owner to rename the display name of the cluster, change the amount of load balancers and persistent storage on the cluster, if applicable, manually set the node count, and delete the cluster. -* **Refresh** icon forces a refresh of the cluster. - -[id="ocm-cluster-tabs-ocp"] -== Cluster tabs - -Selecting an active, installed cluster shows tabs associated with that cluster. The following tabs display after the cluster's installation completes: - -* Overview -* Access control -* Add-ons -* Networking -* Insights Advisor -* Machine pools -* Support -* Settings - -include::modules/ocm-overview-tab.adoc[leveloffset=+2] -include::modules/ocm-accesscontrol-tab.adoc[leveloffset=+2] -include::modules/ocm-addons-tab.adoc[leveloffset=+2] -include::modules/ocm-insightsadvisor-tab.adoc[leveloffset=+2] -include::modules/ocm-machinepools-tab.adoc[leveloffset=+2] -include::modules/ocm-support-tab.adoc[leveloffset=+2] -include::modules/ocm-settings-tab.adoc[leveloffset=+2] - -[id="ocm-additional-resources-ocp"] -== Additional resources - -* For the complete documentation for {cluster-manager}, see link:https://access.redhat.com/documentation/en-us/openshift_cluster_manager/2022/html-single/managing_clusters/index[{cluster-manager} documentation]. diff --git a/architecture/osd-architecture-models-gcp.adoc b/architecture/osd-architecture-models-gcp.adoc deleted file mode 100644 index 54565a1d07f8..000000000000 --- a/architecture/osd-architecture-models-gcp.adoc +++ /dev/null @@ -1,26 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osd-architecture-models-gcp"] -= {product-title} on {GCP} architecture models -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: osd-architecture-models-gcp - -toc::[] - -With {product-title} on {GCP}, you can create clusters that are accessible over public or private networks. - -include::modules/osd-gcp-architecture.adoc[leveloffset=+1] -include::modules/osd-understanding-private-service-connect.adoc[leveloffset=+1] -include::modules/private-service-connect-psc-architecture.adoc[leveloffset=+2] -include::modules/osd-private-psc-architecture-model-gcp.adoc[leveloffset=+1] -include::modules/osd-private-architecture-model-gcp.adoc[leveloffset=+1] -include::modules/osd-public-architecture-model-gcp.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="osd-architecture-models-additional-resources"] -== Additional resources - -* xref:../osd_gcp_clusters/creating-a-gcp-psc-enabled-private-cluster.adoc#creating-a-gcp-psc-enabled-private-cluster[Private Service Connect overview] - -* xref:../osd_gcp_clusters/creating-a-gcp-cluster-with-workload-identity-federation.adoc#osd-creating-a-cluster-on-gcp-with-workload-identity-federation[Creating a cluster on GCP with Workload Identity Federation authentication] - -* xref:../osd_gcp_clusters/creating-a-gcp-cluster-sa.adoc#osd-creating-a-cluster-on-gcp-sa[Creating a cluster on GCP with Service Account authentication] diff --git a/architecture/rosa-architecture-models.adoc b/architecture/rosa-architecture-models.adoc deleted file mode 100644 index 7e1030c1a0fd..000000000000 --- a/architecture/rosa-architecture-models.adoc +++ /dev/null @@ -1,30 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="rosa-architecture-models"] -= Architecture models -include::_attributes/attributes-openshift-dedicated.adoc[] -include::_attributes/common-attributes.adoc[] -:context: rosa-architecture-models - -toc::[] - -{product-rosa} (ROSA) has the following cluster topologies: - -* Hosted control plane (HCP) - The control plane is hosted in a Red{nbsp}Hat account and the worker nodes are deployed in the customer's AWS account. -* Classic - The control plane and the worker nodes are deployed in the customer's AWS account. - -include::modules/rosa-hcp-classic-comparison.adoc[leveloffset=+1] - -.Additional resources - -* xref:../rosa_architecture/rosa_policy_service_definition/rosa-hcp-service-definition.adoc#rosa-sdpolicy-regions-az_rosa-hcp-service-definition[Regions and availability zones] - -* xref:../rosa_architecture/rosa_policy_service_definition/rosa-policy-process-security.adoc#rosa-policy-security-regulation-compliance_rosa-policy-process-security[Security and regulation compliance] - -include::modules/rosa-hcp-architecture.adoc[leveloffset=+1] -include::modules/rosa-architecture.adoc[leveloffset=+1] -include::modules/osd-aws-privatelink-architecture.adoc[leveloffset=+2] -include::modules/rosa-architecture-local-zones.adoc[leveloffset=+2] - -.Additional resources - -* xref:../rosa_cluster_admin/rosa_nodes/rosa-nodes-machinepools-configuring.html[Configuring machine pools in Local Zones] diff --git a/architecture/snippets b/architecture/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/architecture/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/architecture/understanding-development.adoc b/architecture/understanding-development.adoc deleted file mode 100644 index 28e8b1afffef..000000000000 --- a/architecture/understanding-development.adoc +++ /dev/null @@ -1,396 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="understanding-development"] -= Understanding {product-title} development -include::_attributes/common-attributes.adoc[] -:context: understanding-development - -toc::[] - -To fully leverage the capability of containers when developing and running -enterprise-quality applications, ensure your environment is supported by tools -that allow containers to be: - -* Created as discrete microservices that can be connected to other -containerized, and non-containerized, services. For example, you might want to -join your application with a database or attach a monitoring application to it. - -* Resilient, so if a server crashes or needs to go down for maintenance or to be -decommissioned, containers can start on another machine. - -* Automated to pick up code changes automatically and then start and deploy new -versions of themselves. - -* Scaled up, or replicated, to have more instances serving clients as demand -increases and then spun down to fewer instances as demand declines. - -* Run in different ways, depending on the type of application. For example, one -application might run once a month to produce a report and then exit. Another -application might need to run constantly and be highly available to clients. - -* Managed so you can watch the state of your application and react when -something goes wrong. - -Containers’ widespread acceptance, and the resulting requirements for tools and -methods to make them enterprise-ready, resulted in many options for them. - -The rest of this section explains options for -assets you can create when you build and deploy containerized Kubernetes -applications in {product-title}. It also describes which approaches you might -use for different kinds of applications and development requirements. - -[id="developing-containerized-applications"] -== About developing containerized applications - -You can approach application development with containers in many ways, and -different approaches might be more appropriate for different situations. To -illustrate some of this variety, the series of approaches that is presented -starts with developing a single container and ultimately deploys that container -as a mission-critical application for a large enterprise. These approaches -show different tools, formats, and methods that you can employ with containerized -application development. This topic describes: - -* Building a simple container and storing it in a registry -* Creating a Kubernetes manifest and saving it to a Git repository -* Making an Operator to share your application with others - -[id="building-simple-container"] -== Building a simple container - -You have an idea for an application and you want to containerize it. - -First you require a tool for building a container, like buildah or docker, -and a file that describes what goes in your container, which is typically a -link:https://docs.docker.com/engine/reference/builder/[Dockerfile]. - -Next, you require a location to push the resulting container image so you can -pull it to run anywhere you want it to run. This location is a container -registry. - -Some examples of each of these components are installed by default on most -Linux operating systems, except for the Dockerfile, which you provide yourself. - -The following diagram displays the process of building and pushing an image: - -.Create a simple containerized application and push it to a registry -image::create-push-app.png[Creating and pushing a containerized application] - -If you use a computer that runs {op-system-base-full} as the operating -system, the process of creating a containerized application requires the -following steps: - -. Install container build tools: {op-system-base} contains a set of tools that includes -podman, buildah, and skopeo that you use to build and manage containers. -. Create a Dockerfile to combine base image and software: Information about -building your container goes into a file that is named `Dockerfile`. In that -file, you identify the base image you build from, the software packages you -install, and the software you copy into the container. You also identify -parameter values like network ports that you expose outside the container and -volumes that you mount inside the container. Put your Dockerfile and the -software you want to containerize in a directory on your {op-system-base} system. -. Run buildah or docker build: Run the `buildah build-using-dockerfile` or -the `docker build` command to pull your chosen base image to the local system and -create a container image that is stored locally. You can also build container images -without a Dockerfile by using buildah. -. Tag and push to a registry: Add a tag to your new container image that -identifies the location of the registry in which you want to store and share -your container. Then push that image to the registry by running the -`podman push` or `docker push` command. -. Pull and run the image: From any system that has a container client tool, -such as podman or docker, run a command that identifies your new image. -For example, run the `podman run ` or `docker run ` -command. Here `` is the name of your new container image, which -resembles `quay.io/myrepo/myapp:latest`. The registry might require credentials -to push and pull images. - -ifdef::openshift-origin,openshift-enterprise,openshift-webscale[] -For more details on the process of building container images, pushing them to -registries, and running them, see -xref:../cicd/builds/custom-builds-buildah.adoc#custom-builds-buildah[Custom image builds with Buildah]. -endif::openshift-origin,openshift-enterprise,openshift-webscale[] - -[id="container-build-tool-options"] -=== Container build tool options - -Building and managing containers with buildah, podman, and skopeo results in industry standard container images that include features specifically tuned for deploying containers in {product-title} or other Kubernetes environments. These tools are daemonless and can run without root privileges, requiring less overhead to run them. - -[IMPORTANT] -==== -Support for Docker Container Engine as a container runtime is deprecated in Kubernetes 1.20 and will be removed in a future release. However, Docker-produced images will continue to work in your cluster with all runtimes, including CRI-O. For more information, see the link:https://kubernetes.io/blog/2020/12/02/dont-panic-kubernetes-and-docker/[Kubernetes blog announcement]. -==== - -When you ultimately run your containers in {product-title}, you use the -link:https://cri-o.io/[CRI-O] container engine. CRI-O runs on every worker and -control plane machine in an {product-title} cluster, but CRI-O is not yet supported as -a standalone runtime outside of {product-title}. - -[id="base-image-options"] -=== Base image options - -The base image you choose to build your application on contains a set of -software that resembles a Linux system to your application. When you build your -own image, your software is placed into that file system and sees that file -system as though it were looking at its operating system. Choosing this base -image has major impact on how secure, efficient and upgradeable your container -is in the future. - -Red Hat provides a new set of base images referred to as -link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_atomic_host/7/html-single/getting_started_with_containers/index#using_red_hat_base_container_images_standard_and_minimal[Red Hat Universal Base Images] (UBI). -These images are based on Red Hat Enterprise Linux and are similar to base -images that Red Hat has offered in the past, with one major difference: they -are freely redistributable without a Red Hat subscription. As a result, you can -build your application on UBI images without having to worry about how they -are shared or the need to create different images for different environments. - -These UBI images have standard, init, and minimal versions. You can also use the -link:https://access.redhat.com/documentation/en-us/red_hat_software_collections/3/html-single/using_red_hat_software_collections_container_images/index[Red Hat Software Collections] -images as a foundation for applications that rely on specific runtime -environments such as Node.js, Perl, or Python. Special versions of some of -these runtime base images are referred to as Source-to-Image (S2I) images. With -S2I images, you can insert your code into a base image environment that is ready -to run that code. - -S2I images are available for you to use directly from the {product-title} web UI. In the Developer perspective, navigate to the *+Add* view and in the *Developer Catalog* tile, view all of the available services in the Developer Catalog. - -.Choose S2I base images for apps that need specific runtimes -image::developer-catalog.png[{product-title} Developer Catalog] - -[id="understanding-development-registry-options"] -=== Registry options - -Container registries are where you store container images so you can share them -with others and make them available to the platform where they ultimately run. -You can select large, public container registries that offer free accounts or a -premium version that offer more storage and special features. You can also -install your own registry that can be exclusive to your organization or -selectively shared with others. - -To get Red Hat images and certified partner images, you can draw from the -Red Hat Registry. The Red Hat Registry is represented by two locations: -`registry.access.redhat.com`, which is unauthenticated and deprecated, and -`registry.redhat.io`, which requires authentication. You can learn about the Red -Hat and partner images in the Red Hat Registry from the -link:https://catalog.redhat.com/software/containers/explore[Container images section of the Red Hat Ecosystem Catalog]. -Besides listing Red Hat container images, it also shows extensive information -about the contents and quality of those images, including health scores that are -based on applied security updates. - -Large, public registries include link:https://hub.docker.com/[Docker Hub] and -link:https://quay.io/[Quay.io]. The Quay.io registry is owned and managed by Red -Hat. Many of the components used in {product-title} are stored in Quay.io, -including container images and the Operators that are used to deploy -{product-title} itself. Quay.io also offers the means of storing other types of -content, including Helm charts. - -If you want your own, private container registry, {product-title} itself -includes a private container registry that is installed with {product-title} -and runs on its cluster. Red Hat also offers a private version of the Quay.io -registry called link:https://access.redhat.com/products/red-hat-quay[Red Hat Quay]. -Red Hat Quay includes geo replication, Git build triggers, Clair image scanning, -and many other features. - -All of the registries mentioned here can require credentials to download images -from those registries. Some of those credentials are presented on a cluster-wide -basis from {product-title}, while other credentials can be assigned to individuals. - -[id="creating-kubernetes-manifest-openshift"] -== Creating a Kubernetes manifest for {product-title} - -While the container image is the basic building block for a containerized -application, more information is required to manage and deploy that application -in a Kubernetes environment such as {product-title}. The typical next steps after -you create an image are to: - -* Understand the different resources you work with in Kubernetes manifests -* Make some decisions about what kind of an application you are running -* Gather supporting components -* Create a manifest and store that manifest in a Git repository so you can store -it in a source versioning system, audit it, track it, promote and deploy it -to the next environment, roll it back to earlier versions, if necessary, and -share it with others - -[id="understanding-kubernetes-pods"] -=== About Kubernetes pods and services - -While the container image is the basic unit with docker, the basic units that -Kubernetes works with are called -link:https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/[pods]. -Pods represent the next step in building out an application. A pod can contain -one or more than one container. The key is that the pod is the single unit -that you deploy, scale, and manage. - -Scalability and namespaces are probably the main items to consider when determining -what goes in a pod. For ease of deployment, you might want to deploy a container -in a pod and include its own logging and monitoring container in the pod. Later, -when you run the pod and need to scale up an additional instance, those other -containers are scaled up with it. For namespaces, containers in a pod share the -same network interfaces, shared storage volumes, and resource limitations, -such as memory and CPU, which makes it easier to manage the contents of the pod -as a single unit. Containers in a pod can also communicate with each other by -using standard inter-process communications, such as System V semaphores or -POSIX shared memory. - -While individual pods represent a scalable unit in Kubernetes, a -link:https://kubernetes.io/docs/concepts/services-networking/service/[service] -provides a means of grouping together a set of pods to create a complete, stable -application that can complete tasks such as load balancing. A service is also -more permanent than a pod because the service remains available from the same -IP address until you delete it. When the service is in use, it is requested by -name and the {product-title} cluster resolves that name into the IP addresses -and ports where you can reach the pods that compose the service. - -By their nature, containerized applications are separated from the operating -systems where they run and, by extension, their users. Part of your Kubernetes -manifest describes how to expose the application to internal and external -networks by defining -link:https://kubernetes.io/docs/concepts/services-networking/network-policies/[network policies] -that allow fine-grained control over communication with your containerized -applications. To connect incoming requests for HTTP, HTTPS, and other services -from outside your cluster to services inside your cluster, you can use an -link:https://kubernetes.io/docs/concepts/services-networking/ingress/[`Ingress`] -resource. - -If your container requires on-disk storage instead of database storage, which -might be provided through a service, you can add -link:https://kubernetes.io/docs/concepts/storage/volumes/[volumes] -to your manifests to make that storage available to your pods. You can configure -the manifests to create persistent volumes (PVs) or dynamically create volumes that -are added to your `Pod` definitions. - -After you define a group of pods that compose your application, you can define -those pods in -link:https://kubernetes.io/docs/concepts/workloads/controllers/deployment/[`Deployment`] -and -// This xref points to a topic that is not currently included in the OSD/ROSA docs. -ifndef::openshift-dedicated,openshift-rosa[] -xref:../applications/deployments/what-deployments-are.adoc#what-deployments-are[`DeploymentConfig`] objects. -endif::openshift-dedicated,openshift-rosa[] -ifdef::openshift-dedicated,openshift-rosa[] -`DeploymentConfig` objects. -endif::openshift-dedicated,openshift-rosa[] - -[id="application-types"] -=== Application types - -Next, consider how your application type influences how to run it. - -Kubernetes defines different types of workloads that are appropriate for -different kinds of applications. To determine the appropriate workload for your -application, consider if the application is: - -* Meant to run to completion and be done. An example is an application that -starts up to produce a report and exits when the report is complete. The -application might not run again then for a month. Suitable {product-title} -objects for these types of applications include -link:https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/[`Job`] -and https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/[`CronJob`] objects. - -* Expected to run continuously. For long-running applications, you can write a -// This xref points to a topic that is not currently included in the OSD/ROSA docs. -ifndef::openshift-dedicated,openshift-rosa[] -xref:../applications/deployments/what-deployments-are.adoc#deployments-kube-deployments[deployment]. -endif::openshift-dedicated,openshift-rosa[] -ifdef::openshift-dedicated,openshift-rosa[] -deployment. -endif::openshift-dedicated,openshift-rosa[] - -* Required to be highly available. If your application requires high -availability, then you want to size your deployment to have more than one -instance. A `Deployment` or `DeploymentConfig` object can incorporate a -link:https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/[replica set] -for that type of application. With replica sets, pods run across multiple nodes -to make sure the application is always available, even if a worker goes down. -* Need to run on every node. Some types of Kubernetes applications are intended -to run in the cluster itself on every master or worker node. DNS and monitoring -applications are examples of applications that need to run continuously on every -node. You can run this type of application as a -link:https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/[daemon set]. -You can also run a daemon set on a subset of nodes, based on node labels. -* Require life-cycle management. When you want to hand off your application so -that others can use it, consider creating an -link:https://www.openshift.com/learn/topics/operators[Operator]. Operators let you build in -intelligence, so it can handle things like backups and upgrades automatically. -Coupled with the Operator Lifecycle Manager (OLM), cluster managers can expose -Operators to selected namespaces so that users in the cluster can run them. -* Have identity or numbering requirements. An application might have identity -requirements or numbering requirements. For example, you might be -required to run exactly three instances of the application and to name the -instances `0`, `1`, and `2`. A -https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/[stateful set] -is suitable for this application. Stateful sets are most useful for applications -that require independent storage, such as databases and zookeeper clusters. - -[id="supporting-components"] -=== Available supporting components - -The application you write might need supporting components, like a database or -a logging component. To fulfill that need, you might be able to obtain the -required component from the following Catalogs that are available in the -{product-title} web console: - -* OperatorHub, which is available in each {product-title} {product-version} -cluster. The OperatorHub makes Operators available from Red Hat, -certified Red Hat partners, and community members to the cluster operator. The -cluster operator can make those Operators available in all or selected -namespaces in the cluster, so developers can launch them and configure them -with their applications. -* Templates, which are useful for a one-off type of application, where the -lifecycle of a component is not important after it is installed. A template provides an easy -way to get started developing a Kubernetes application with minimal overhead. -A template can be a list of resource definitions, which could be `Deployment`, -`Service`, `Route`, or other objects. If you want to change names or resources, -you can set these values as parameters in the template. - -You can configure the supporting Operators and -templates to the specific needs of your development team and then make them -available in the namespaces in which your developers work. Many people add -shared templates to the `openshift` namespace because it is accessible from all -other namespaces. - -[id="applying-manifest"] -=== Applying the manifest - -Kubernetes manifests let you create a more complete picture of the components -that make up your Kubernetes applications. You write these manifests as YAML -files and deploy them by applying them to the cluster, for example, by running -the `oc apply` command. - -[id="manifest-next-steps"] -=== Next steps - -At this point, consider ways to automate your container development process. -Ideally, you have some sort of CI pipeline that builds the images and pushes -them to a registry. In particular, a GitOps pipeline integrates your container -development with the Git repositories that you use to store the software that -is required to build your applications. - -The workflow to this point might look like: - -* Day 1: You write some YAML. You then run the `oc apply` command to apply that -YAML to the cluster and test that it works. -* Day 2: You put your YAML container configuration file into your own Git -repository. From there, people who want to install that app, or help you improve -it, can pull down the YAML and apply it to their cluster to run the app. -* Day 3: Consider writing an Operator for your application. - -[id="develop-for-operators"] -== Develop for Operators - -Packaging and deploying your application as an Operator might be preferred -if you make your application available for others to run. As noted earlier, -Operators add a lifecycle component to your application that acknowledges that -the job of running an application is not complete as soon as it is installed. - -When you create an application as an Operator, you can build in your own -knowledge of how to run and maintain the application. You can build in features -for upgrading the application, backing it up, scaling it, or keeping track of -its state. If you configure the application correctly, maintenance tasks, -like updating the Operator, can happen automatically and invisibly to the -Operator's users. - -An example of a useful Operator is one that is set up to automatically back up -data at particular times. Having an Operator manage an application's backup at -set times can save a system administrator from remembering to do it. - -Any application maintenance that has traditionally been completed manually, -like backing up data or rotating certificates, can be completed automatically -with an Operator. diff --git a/authentication/assuming-an-aws-iam-role-for-a-service-account.adoc b/authentication/assuming-an-aws-iam-role-for-a-service-account.adoc deleted file mode 100644 index 3b6f55d07d8f..000000000000 --- a/authentication/assuming-an-aws-iam-role-for-a-service-account.adoc +++ /dev/null @@ -1,42 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="assuming-an-aws-iam-role-for-a-service-account"] -= Assuming an AWS IAM role for a service account -include::_attributes/common-attributes.adoc[] -ifdef::openshift-rosa,openshift-dedicated,openshift-rosa-hcp[] -include::_attributes/attributes-openshift-dedicated.adoc[] -endif::openshift-rosa,openshift-dedicated,openshift-rosa-hcp[] -:context: assuming-an-aws-iam-role-for-a-service-account - -toc::[] - -[role="_abstract"] -ifdef::openshift-rosa,openshift-rosa-hcp[] -In {product-title} clusters that use the AWS Security Token Service (STS), the OpenShift API server can be enabled to project signed service account tokens that can be used to assume an AWS Identity and Access Management (IAM) role in a pod. If the assumed IAM role has the required AWS permissions, the pods can authenticate against the AWS API using temporary STS credentials to perform AWS operations. -endif::openshift-rosa,openshift-rosa-hcp[] - -You can use the pod identity webhook to project service account tokens to assume an AWS Identity and Access Management (IAM) role for your own workloads. If the assumed IAM role has the required AWS permissions, the pods can run AWS SDK operations by using temporary STS credentials. - -include::modules/how-service-accounts-assume-aws-iam-roles-in-sre-owned-projects.adoc[leveloffset=+1] -include::modules/understanding-pod-identity-webhook-workflow-in-user-defined-projects.adoc[leveloffset=+1] -include::modules/assuming-an-aws-iam-role-in-your-own-pods.adoc[leveloffset=+1] -include::modules/setting-up-an-aws-iam-role-a-service-account.adoc[leveloffset=+2] -include::modules/creating-a-service-account-in-your-project.adoc[leveloffset=+2] -include::modules/creating-an-example-aws-sdk-container-image.adoc[leveloffset=+2] -include::modules/deploying-a-pod-that-includes-an-aws-sdk.adoc[leveloffset=+2] -include::modules/verifying-the-assumed-iam-role-in-your-pod.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_configuring-alert-notifications"] -== Additional resources - -* For more information about using AWS IAM roles with service accounts, see link:https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html[IAM roles for service accounts] in the AWS documentation. - -* For information about AWS IAM role delegation, see link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-service.html[Creating a role to delegate permissions to an AWS service] in the AWS documentation. - -* For details about AWS SDKs, see link:https://docs.aws.amazon.com/sdkref/latest/guide/overview.html[AWS SDKs and Tools Reference Guide] in the AWS documentation. - -* For more information about installing and using the AWS Boto3 SDK for Python, see the link:https://boto3.amazonaws.com/v1/documentation/api/latest/index.html[AWS Boto3 documentation]. - -ifdef::openshift-rosa,openshift-dedicated,openshift-rosa-hcp[] -* For general information about webhook admission plugins for OpenShift, see link:https://docs.openshift.com/container-platform/4.18/architecture/admission-plug-ins.html#admission-webhooks-about_admission-plug-ins[Webhook admission plugins] in the OpenShift Container Platform documentation. -endif::openshift-rosa,openshift-dedicated,openshift-rosa-hcp[] diff --git a/authentication/bound-service-account-tokens.adoc b/authentication/bound-service-account-tokens.adoc deleted file mode 100644 index 791036d2c566..000000000000 --- a/authentication/bound-service-account-tokens.adoc +++ /dev/null @@ -1,30 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="bound-service-account-tokens"] -= Using bound service account tokens -include::_attributes/common-attributes.adoc[] -:context: bound-service-account-tokens - -toc::[] - -You can use bound service account tokens, which improves the ability to integrate with cloud provider identity access management (IAM) services, such as {product-title} on AWS IAM or Google Cloud Platform IAM. - -// About bound service account tokens -include::modules/bound-sa-tokens-about.adoc[leveloffset=+1] - -// Configuring bound service account tokens using volume projection -include::modules/bound-sa-tokens-configuring.adoc[leveloffset=+1] - -// Creating bound service account tokens outside the pod -include::modules/bound-sa-tokens-configuring-externally.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -// This xref target does not exist in the OSD/ROSA docs. -ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -* xref:../nodes/nodes/nodes-nodes-rebooting.adoc#nodes-nodes-rebooting-gracefully_nodes-nodes-rebooting[Rebooting a node gracefully] -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] - -* xref:../authentication/understanding-and-creating-service-accounts.adoc#service-accounts-managing_understanding-service-accounts[Creating service accounts] - -// TODO: Verify distros: openshift-enterprise,openshift-webscale,openshift-origin diff --git a/authentication/configuring-internal-oauth.adoc b/authentication/configuring-internal-oauth.adoc deleted file mode 100644 index 2d37dfb0e118..000000000000 --- a/authentication/configuring-internal-oauth.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="configuring-internal-oauth"] -= Configuring the internal OAuth server -include::_attributes/common-attributes.adoc[] -:context: configuring-internal-oauth - -toc::[] - -include::modules/oauth-server-overview.adoc[leveloffset=+1] - -include::modules/oauth-internal-tokens.adoc[leveloffset=+1] - -include::modules/oauth-internal-options.adoc[leveloffset=+1] - -include::modules/oauth-configuring-internal-oauth.adoc[leveloffset=+1] - -include::modules/oauth-configuring-token-inactivity-timeout.adoc[leveloffset=+1] - -include::modules/oauth-customizing-the-oauth-server-URL.adoc[leveloffset=+1] - -include::modules/oauth-server-metadata.adoc[leveloffset=+1] - -include::modules/oauth-troubleshooting-api-events.adoc[leveloffset=+1] diff --git a/authentication/configuring-ldap-failover.adoc b/authentication/configuring-ldap-failover.adoc deleted file mode 100644 index 5558ccb42d60..000000000000 --- a/authentication/configuring-ldap-failover.adoc +++ /dev/null @@ -1,19 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="configuring-ldap-failover"] -= Configuring LDAP failover -include::_attributes/common-attributes.adoc[] -:context: sssd-ldap-failover - -toc::[] - -include::modules/ldap-failover-overview.adoc[] - -include::modules/ldap-failover-prereqs.adoc[leveloffset=+1] - -include::modules/ldap-failover-generate-certs.adoc[leveloffset=+1] - -include::modules/ldap-failover-configure-sssd.adoc[leveloffset=+1] - -include::modules/ldap-failover-configure-apache.adoc[leveloffset=+1] - -include::modules/ldap-failover-configure-openshift.adoc[leveloffset=+1] diff --git a/authentication/configuring-oauth-clients.adoc b/authentication/configuring-oauth-clients.adoc deleted file mode 100644 index f41836a8276b..000000000000 --- a/authentication/configuring-oauth-clients.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="configuring-oauth-clients"] -= Configuring OAuth clients -include::_attributes/common-attributes.adoc[] -:context: configuring-oauth-clients - -toc::[] - -Several OAuth clients are created by default in {product-title}. You can also register and configure additional OAuth clients. - -// Default OAuth clients -include::modules/oauth-default-clients.adoc[leveloffset=+1] - -// Register an additional OAuth client -include::modules/oauth-register-additional-client.adoc[leveloffset=+1] - -// Configuring token inactivity timeout for OAuth clients -include::modules/oauth-configuring-token-inactivity-timeout-clients.adoc[leveloffset=+1] - -[role="_additional-resources"] -== Additional resources - -* xref:../rest_api/oauth_apis/oauthclient-oauth-openshift-io-v1.adoc#oauthclient-oauth-openshift-io-v1[OAuthClient [oauth.openshift.io/v1]] diff --git a/authentication/dedicated-understanding-authentication.adoc b/authentication/dedicated-understanding-authentication.adoc deleted file mode 100644 index 23927ffdaa5f..000000000000 --- a/authentication/dedicated-understanding-authentication.adoc +++ /dev/null @@ -1,39 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="understanding-identity-provider"] -= Understanding identity provider configuration -include::_attributes/common-attributes.adoc[] -:context: understanding-identity-provider - -toc::[] - -include::modules/identity-provider-parameters.adoc[leveloffset=+1] - -[id="supported-identity-providers"] -== Supported identity providers - -You can configure the following types of identity providers: - -[cols="2a,8a",options="header"] -|=== - -|Identity provider -|Description - -|xref:../authentication/identity_providers/configuring-ldap-identity-provider.adoc#configuring-ldap-identity-provider[LDAP] -|Configure the `ldap` identity provider to validate user names and passwords -against an LDAPv3 server, using simple bind authentication. - -|xref:../authentication/identity_providers/configuring-github-identity-provider.adoc#configuring-github-identity-provider[GitHub or GitHub Enterprise] -|Configure a `github` identity provider to validate user names and passwords -against GitHub or GitHub Enterprise's OAuth authentication server. - -|xref:../authentication/identity_providers/configuring-google-identity-provider.adoc#configuring-google-identity-provider[Google] -|Configure a `google` identity provider using -link:https://developers.google.com/identity/protocols/OpenIDConnect[Google's OpenID Connect integration]. - -|xref:../authentication/identity_providers/configuring-oidc-identity-provider.adoc#configuring-oidc-identity-provider[OpenID Connect] -|Configure an `oidc` identity provider to integrate with an OpenID Connect -identity provider using an -link:http://openid.net/specs/openid-connect-core-1_0.html#CodeFlowAuth[Authorization Code Flow]. - -|=== diff --git a/authentication/identity_providers/_attributes b/authentication/identity_providers/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/authentication/identity_providers/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/authentication/identity_providers/configuring-basic-authentication-identity-provider.adoc b/authentication/identity_providers/configuring-basic-authentication-identity-provider.adoc deleted file mode 100644 index 70691f0be455..000000000000 --- a/authentication/identity_providers/configuring-basic-authentication-identity-provider.adoc +++ /dev/null @@ -1,31 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="configuring-basic-authentication-identity-provider"] -= Configuring a basic authentication identity provider -include::_attributes/common-attributes.adoc[] -:context: configuring-basic-authentication-identity-provider - -toc::[] - -Configure the `basic-authentication` identity provider for users to log in to {product-title} with credentials validated against a remote identity provider. Basic authentication is a generic back-end integration mechanism. - -include::modules/identity-provider-overview.adoc[leveloffset=+1] - -include::modules/identity-provider-about-basic-authentication.adoc[leveloffset=+1] - -include::modules/identity-provider-secret-tls.adoc[leveloffset=+1] - -include::modules/identity-provider-config-map.adoc[leveloffset=+1] - -include::modules/identity-provider-basic-authentication-CR.adoc[leveloffset=+1] - -// Included here so that it is associated with the above module -[role="_additional-resources"] -.Additional resources - -* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers. - -include::modules/identity-provider-add.adoc[leveloffset=+1] - -include::modules/example-apache-httpd-configuration.adoc[leveloffset=+1] - -include::modules/identity-provider-basic-authentication-troubleshooting.adoc[leveloffset=+1] diff --git a/authentication/identity_providers/configuring-github-identity-provider.adoc b/authentication/identity_providers/configuring-github-identity-provider.adoc deleted file mode 100644 index e170b8ff627b..000000000000 --- a/authentication/identity_providers/configuring-github-identity-provider.adoc +++ /dev/null @@ -1,40 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="configuring-github-identity-provider"] -= Configuring a GitHub or GitHub Enterprise identity provider -include::_attributes/common-attributes.adoc[] -:context: configuring-github-identity-provider - -toc::[] - -Configure the `github` identity provider to validate user names and passwords against GitHub or GitHub Enterprise's OAuth authentication server. OAuth facilitates a token exchange flow between {product-title} and GitHub or GitHub Enterprise. - -You can use the GitHub integration to connect to either GitHub or GitHub Enterprise. For GitHub Enterprise integrations, you must provide the `hostname` of your instance and can optionally provide a `ca` certificate bundle to use in requests to the server. - -[NOTE] -==== -The following steps apply to both GitHub and GitHub Enterprise unless noted. -==== - -ifdef::openshift-origin,openshift-enterprise,openshift-webscale[] -include::modules/identity-provider-overview.adoc[leveloffset=+1] -endif::openshift-origin,openshift-enterprise,openshift-webscale[] - -include::modules/identity-provider-github-about.adoc[leveloffset=+1] - -include::modules/identity-provider-registering-github.adoc[leveloffset=+1] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -include::modules/identity-provider-secret.adoc[leveloffset=+1] - -include::modules/identity-provider-config-map.adoc[leveloffset=+1] - -include::modules/identity-provider-github-CR.adoc[leveloffset=+1] - -// Included here so that it is associated with the above module -[role="_additional-resources"] -.Additional resources - -* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers. - -include::modules/identity-provider-add.adoc[leveloffset=+1] -endif::[] diff --git a/authentication/identity_providers/configuring-gitlab-identity-provider.adoc b/authentication/identity_providers/configuring-gitlab-identity-provider.adoc deleted file mode 100644 index 75f8dc914a08..000000000000 --- a/authentication/identity_providers/configuring-gitlab-identity-provider.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="configuring-gitlab-identity-provider"] -= Configuring a GitLab identity provider -include::_attributes/common-attributes.adoc[] -:context: configuring-gitlab-identity-provider - -toc::[] - -Configure the `gitlab` identity provider using link:https://gitlab.com/[GitLab.com] or any other GitLab instance as an identity provider. - -ifdef::openshift-origin,openshift-enterprise,openshift-webscale[] -include::modules/identity-provider-overview.adoc[leveloffset=+1] -endif::openshift-origin,openshift-enterprise,openshift-webscale[] - -include::modules/identity-provider-gitlab-about.adoc[leveloffset=+1] - -include::modules/identity-provider-secret.adoc[leveloffset=+1] - -include::modules/identity-provider-config-map.adoc[leveloffset=+1] - -include::modules/identity-provider-gitlab-CR.adoc[leveloffset=+1] - -// Included here so that it is associated with the above module -[role="_additional-resources"] -.Additional resources - -* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers. - -include::modules/identity-provider-add.adoc[leveloffset=+1] diff --git a/authentication/identity_providers/configuring-google-identity-provider.adoc b/authentication/identity_providers/configuring-google-identity-provider.adoc deleted file mode 100644 index b447062ec2b5..000000000000 --- a/authentication/identity_providers/configuring-google-identity-provider.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="configuring-google-identity-provider"] -= Configuring a Google identity provider -include::_attributes/common-attributes.adoc[] -:context: configuring-google-identity-provider - -toc::[] - -Configure the `google` identity provider using the link:https://developers.google.com/identity/protocols/OpenIDConnect[Google OpenID Connect integration]. - -ifdef::openshift-origin,openshift-enterprise,openshift-webscale[] -include::modules/identity-provider-overview.adoc[leveloffset=+1] -endif::openshift-origin,openshift-enterprise,openshift-webscale[] - -include::modules/identity-provider-google-about.adoc[leveloffset=+1] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -include::modules/identity-provider-secret.adoc[leveloffset=+1] - -include::modules/identity-provider-google-CR.adoc[leveloffset=+1] - -// Included here so that it is associated with the above module -[role="_additional-resources"] -.Additional resources - -* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers. - -include::modules/identity-provider-add.adoc[leveloffset=+1] -endif::[] diff --git a/authentication/identity_providers/configuring-htpasswd-identity-provider.adoc b/authentication/identity_providers/configuring-htpasswd-identity-provider.adoc deleted file mode 100644 index 2a7a2882f7c8..000000000000 --- a/authentication/identity_providers/configuring-htpasswd-identity-provider.adoc +++ /dev/null @@ -1,52 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="configuring-htpasswd-identity-provider"] -= Configuring an htpasswd identity provider -include::_attributes/common-attributes.adoc[] -:context: configuring-htpasswd-identity-provider - -toc::[] - -Configure the `htpasswd` identity provider to allow users to log in to {product-title} with credentials from an htpasswd file. - -To define an htpasswd identity provider, perform the following tasks: - -. xref:../../authentication/identity_providers/configuring-htpasswd-identity-provider.adoc#creating-htpasswd-file[Create an `htpasswd` file] to store the user and password information. -. xref:../../authentication/identity_providers/configuring-htpasswd-identity-provider.adoc#identity-provider-creating-htpasswd-secret_{context}[Create -a secret] to represent the `htpasswd` file. -. xref:../../authentication/identity_providers/configuring-htpasswd-identity-provider.adoc#identity-provider-htpasswd-CR_{context}[Define an htpasswd identity provider resource] that references the secret. -. xref:../../authentication/identity_providers/configuring-htpasswd-identity-provider.adoc#add-identity-provider_{context}[Apply the resource] to -the default OAuth configuration to add the identity provider. - -ifdef::openshift-origin,openshift-enterprise,openshift-webscale[] -include::modules/identity-provider-overview.adoc[leveloffset=+1] -endif::openshift-origin,openshift-enterprise,openshift-webscale[] - -include::modules/identity-provider-htpasswd-about.adoc[leveloffset=+1] - -[id="creating-htpasswd-file"] -== Creating the htpasswd file - -See one of the following sections for instructions about how to create the htpasswd file: - -* xref:../../authentication/identity_providers/configuring-htpasswd-identity-provider.adoc#identity-provider-creating-htpasswd-file-linux_configuring-htpasswd-identity-provider[Creating an htpasswd file using Linux] -* xref:../../authentication/identity_providers/configuring-htpasswd-identity-provider.adoc#identity-provider-creating-htpasswd-file-windows_configuring-htpasswd-identity-provider[Creating an htpasswd file using Windows] - -include::modules/identity-provider-creating-htpasswd-file-linux.adoc[leveloffset=+2] - -include::modules/identity-provider-creating-htpasswd-file-windows.adoc[leveloffset=+2] - -include::modules/identity-provider-htpasswd-secret.adoc[leveloffset=+1] - -include::modules/identity-provider-htpasswd-CR.adoc[leveloffset=+1] - -// Included here so that it is associated with the above module -[role="_additional-resources"] -.Additional resources - -* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers. - -include::modules/identity-provider-add.adoc[leveloffset=+1] - -include::modules/identity-provider-htpasswd-update-users.adoc[leveloffset=+1] - -include::modules/identity-provider-configuring-using-web-console.adoc[leveloffset=+1] diff --git a/authentication/identity_providers/configuring-keystone-identity-provider.adoc b/authentication/identity_providers/configuring-keystone-identity-provider.adoc deleted file mode 100644 index a53b1779cf06..000000000000 --- a/authentication/identity_providers/configuring-keystone-identity-provider.adoc +++ /dev/null @@ -1,27 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="configuring-keystone-identity-provider"] -= Configuring a Keystone identity provider -include::_attributes/common-attributes.adoc[] -:context: configuring-keystone-identity-provider - -toc::[] - -Configure the `keystone` identity provider to integrate your {product-title} cluster with Keystone to enable shared authentication with an OpenStack Keystone v3 server configured to store users in an internal database. This configuration allows users to log in to {product-title} with their Keystone credentials. - -include::modules/identity-provider-overview.adoc[leveloffset=+1] - -include::modules/identity-provider-keystone-about.adoc[leveloffset=+1] - -include::modules/identity-provider-secret-tls.adoc[leveloffset=+1] - -include::modules/identity-provider-config-map.adoc[leveloffset=+1] - -include::modules/identity-provider-keystone-CR.adoc[leveloffset=+1] - -// Included here so that it is associated with the above module -[role="_additional-resources"] -.Additional resources - -* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers. - -include::modules/identity-provider-add.adoc[leveloffset=+1] diff --git a/authentication/identity_providers/configuring-ldap-identity-provider.adoc b/authentication/identity_providers/configuring-ldap-identity-provider.adoc deleted file mode 100644 index dce6d697af01..000000000000 --- a/authentication/identity_providers/configuring-ldap-identity-provider.adoc +++ /dev/null @@ -1,31 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="configuring-ldap-identity-provider"] -= Configuring an LDAP identity provider -include::_attributes/common-attributes.adoc[] -:context: configuring-ldap-identity-provider - -toc::[] - -Configure the `ldap` identity provider to validate user names and passwords against an LDAPv3 server, using simple bind authentication. - -ifdef::openshift-origin,openshift-enterprise,openshift-webscale[] -include::modules/identity-provider-overview.adoc[leveloffset=+1] -endif::openshift-origin,openshift-enterprise,openshift-webscale[] - -include::modules/identity-provider-about-ldap.adoc[leveloffset=+1] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -include::modules/identity-provider-ldap-secret.adoc[leveloffset=+1] - -include::modules/identity-provider-config-map.adoc[leveloffset=+1] - -include::modules/identity-provider-ldap-CR.adoc[leveloffset=+1] - -// Included here so that it is associated with the above module -[role="_additional-resources"] -.Additional resources - -* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers. - -include::modules/identity-provider-add.adoc[leveloffset=+1] -endif::[] diff --git a/authentication/identity_providers/configuring-oidc-identity-provider.adoc b/authentication/identity_providers/configuring-oidc-identity-provider.adoc deleted file mode 100644 index 9482be8b009f..000000000000 --- a/authentication/identity_providers/configuring-oidc-identity-provider.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="configuring-oidc-identity-provider"] -= Configuring an OpenID Connect identity provider -include::_attributes/common-attributes.adoc[] -:context: configuring-oidc-identity-provider - -toc::[] - -Configure the `oidc` identity provider to integrate with an OpenID Connect identity provider using an link:http://openid.net/specs/openid-connect-core-1_0.html#CodeFlowAuth[Authorization Code Flow]. - -ifdef::openshift-origin,openshift-enterprise,openshift-webscale[] -include::modules/identity-provider-overview.adoc[leveloffset=+1] -endif::openshift-origin,openshift-enterprise,openshift-webscale[] - -include::modules/identity-provider-oidc-about.adoc[leveloffset=+1] - -ifdef::openshift-enterprise[] -include::modules/identity-provider-oidc-supported.adoc[leveloffset=+1] -endif::openshift-enterprise[] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -include::modules/identity-provider-secret.adoc[leveloffset=+1] - -include::modules/identity-provider-config-map.adoc[leveloffset=+1] - -include::modules/identity-provider-oidc-CR.adoc[leveloffset=+1] - -// Included here so that it is associated with the above module -[role="_additional-resources"] -.Additional resources - -* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers. - -include::modules/identity-provider-add.adoc[leveloffset=+1] - -include::modules/identity-provider-configuring-using-web-console.adoc[leveloffset=+1] -endif::[] diff --git a/authentication/identity_providers/configuring-request-header-identity-provider.adoc b/authentication/identity_providers/configuring-request-header-identity-provider.adoc deleted file mode 100644 index 2cea646b461b..000000000000 --- a/authentication/identity_providers/configuring-request-header-identity-provider.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="configuring-request-header-identity-provider"] -= Configuring a request header identity provider -include::_attributes/common-attributes.adoc[] -:context: configuring-request-header-identity-provider - -toc::[] - -Configure the `request-header` identity provider to identify users from request header values, such as `X-Remote-User`. It is typically used in combination with an authenticating proxy, which sets the request header value. - -include::modules/identity-provider-overview.adoc[leveloffset=+1] - -include::modules/identity-provider-about-request-header.adoc[leveloffset=+1] - -include::modules/identity-provider-config-map.adoc[leveloffset=+1] - -include::modules/identity-provider-request-header-CR.adoc[leveloffset=+1] - -// Included here so that it is associated with the above module -[role="_additional-resources"] -.Additional resources - -* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers. - -include::modules/identity-provider-add.adoc[leveloffset=+1] - -[id="example-apache-auth-config-using-request-header"] -== Example Apache authentication configuration using request header - -This example configures an Apache authentication proxy for the {product-title} -using the request header identity provider. - -[discrete] -include::modules/identity-provider-apache-custom-proxy-configuration.adoc[leveloffset=+2] - -[discrete] -include::modules/identity-provider-configuring-apache-request-header.adoc[leveloffset=+2] diff --git a/authentication/identity_providers/images b/authentication/identity_providers/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/authentication/identity_providers/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/authentication/identity_providers/modules b/authentication/identity_providers/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/authentication/identity_providers/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/authentication/identity_providers/snippets b/authentication/identity_providers/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/authentication/identity_providers/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/authentication/images b/authentication/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/authentication/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/authentication/impersonating-system-admin.adoc b/authentication/impersonating-system-admin.adoc deleted file mode 100644 index c9ea3d2cfee5..000000000000 --- a/authentication/impersonating-system-admin.adoc +++ /dev/null @@ -1,15 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="impersonating-system-admin"] -= Impersonating the system:admin user -include::_attributes/common-attributes.adoc[] -:context: impersonating-system-admin - -toc::[] - -include::modules/authentication-api-impersonation.adoc[leveloffset=+1] - -include::modules/impersonation-system-admin-user.adoc[leveloffset=+1] - -include::modules/impersonation-system-admin-group.adoc[leveloffset=+1] - -include::modules/unauthenticated-users-cluster-role-binding.adoc[leveloffset=+1] \ No newline at end of file diff --git a/authentication/index.adoc b/authentication/index.adoc deleted file mode 100644 index 54d98755b7c7..000000000000 --- a/authentication/index.adoc +++ /dev/null @@ -1,99 +0,0 @@ -[id="overview-of-authentication-authorization"] -= Overview of authentication and authorization -include::_attributes/common-attributes.adoc[] -:context: overview-of-authentication-authorization - -toc::[] - -include::modules/authentication-authorization-common-terms.adoc[leveloffset=+1] - -[id="authentication-overview"] -== About authentication in {product-title} -To control access to an {product-title} cluster, -ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -a cluster administrator -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -ifdef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -an administrator with the `dedicated-admin` role -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -can configure xref:../authentication/understanding-authentication.adoc#understanding-authentication[user authentication] and ensure only approved users access the cluster. - -To interact with an {product-title} cluster, users must first authenticate to the {product-title} API in some way. You can authenticate by providing an xref:../authentication/understanding-authentication.adoc#rbac-api-authentication_understanding-authentication[OAuth access token or an X.509 client certificate] in your requests to the {product-title} API. - -[NOTE] -==== -If you do not present a valid access token or certificate, your request is unauthenticated and you receive an HTTP 401 error. -==== - -ifdef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -An administrator can configure authentication by configuring an identity provider. You can define any xref:../authentication/sd-configuring-identity-providers.adoc#understanding-idp-supported_sd-configuring-identity-providers[supported identity provider in {product-title}] and add it to your cluster. -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] - -ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -An administrator can configure authentication through the following tasks: - -* Configuring an identity provider: You can define any xref:../authentication/understanding-identity-provider.adoc#supported-identity-providers[supported identity provider in {product-title}] and add it to your cluster. - -* xref:../authentication/configuring-internal-oauth.adoc#configuring-internal-oauth[Configuring the internal OAuth server]: The {product-title} control plane includes a built-in OAuth server that determines the user's identity from the configured identity provider and creates an access token. You can configure the token duration and inactivity timeout, and customize the internal OAuth server URL. -+ -[NOTE] -==== -Users can xref:../authentication/managing-oauth-access-tokens.adoc#managing-oauth-access-tokens[view and manage OAuth tokens owned by them]. -==== - -* Registering an OAuth client: {product-title} includes several xref:../authentication/configuring-oauth-clients.adoc#oauth-default-clients_configuring-oauth-clients[default OAuth clients]. You can xref:../authentication/configuring-oauth-clients.adoc#oauth-register-additional-client_configuring-oauth-clients[register and configure additional OAuth clients]. -+ -[NOTE] -==== -When users send a request for an OAuth token, they must specify either a default or custom OAuth client that receives and uses the token. -==== - -* Managing cloud provider credentials using the xref:../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc#about-cloud-credential-operator[Cloud Credentials Operator]: Cluster components use cloud provider credentials to get permissions required to perform cluster-related tasks. -* Impersonating a system admin user: You can grant cluster administrator permissions to a user by xref:../authentication/impersonating-system-admin.adoc#impersonating-system-admin[impersonating a system admin user]. -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] - -[id="authorization-overview"] -== About authorization in {product-title} -Authorization involves determining whether the identified user has permissions to perform the requested action. - -Administrators can define permissions and assign them to users using the xref:../authentication/using-rbac.adoc#authorization-overview_using-rbac[RBAC objects, such as rules, roles, and bindings]. To understand how authorization works in {product-title}, see xref:../authentication/using-rbac.adoc#evaluating-authorization_using-rbac[Evaluating authorization]. - -You can also control access to an {product-title} cluster through xref:../authentication/using-rbac.adoc#rbac-projects-namespaces_using-rbac[projects and namespaces]. - -Along with controlling user access to a cluster, you can also control the actions a pod can perform and the resources it can access using xref:../authentication/managing-security-context-constraints.adoc#managing-pod-security-policies[security context constraints (SCCs)]. - -You can manage authorization for {product-title} through the following tasks: - -* Viewing xref:../authentication/using-rbac.adoc#viewing-local-roles_using-rbac[local] and xref:../authentication/using-rbac.adoc#viewing-cluster-roles_using-rbac[cluster] roles and bindings. - -* Creating a xref:../authentication/using-rbac.adoc#creating-local-role_using-rbac[local role] and assigning it to a user or group. - -ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -* Creating a cluster role and assigning it to a user or group: {product-title} includes a set of xref:../authentication/using-rbac.adoc#default-roles_using-rbac[default cluster roles]. You can create additional xref:../authentication/using-rbac.adoc#creating-cluster-role_using-rbac[cluster roles] and xref:../authentication/using-rbac.adoc#adding-roles_using-rbac[add them to a user or group]. -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -ifdef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -* Assigning a cluster role to a user or group: {product-title} includes a set of xref:../authentication/using-rbac.adoc#default-roles_using-rbac[default cluster roles]. You can xref:../authentication/using-rbac.adoc#adding-roles_using-rbac[add them to a user or group]. -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] - -ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -* Creating a cluster-admin user: By default, your cluster has only one cluster administrator called `kubeadmin`. You can xref:../authentication/using-rbac.adoc#creating-cluster-admin_using-rbac[create another cluster administrator]. Before creating a cluster administrator, ensure that you have configured an identity provider. -+ -[NOTE] -==== -After creating the cluster admin user, xref:../authentication/remove-kubeadmin.adoc#removing-kubeadmin_removing-kubeadmin[delete the existing kubeadmin user] to improve cluster security. -==== -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] - -ifdef::openshift-rosa,openshift-rosa-hcp[] -* Creating cluster-admin and dedicated-admin users: The user who created the {product-title} cluster can grant access to other xref:../authentication/using-rbac.adoc#rosa-create-cluster-admins_using-rbac[`cluster-admin`] and xref:../authentication/using-rbac.adoc#rosa-create-dedicated-cluster-admins_using-rbac[`dedicated-admin`] users. -endif::openshift-rosa,openshift-rosa-hcp[] - -ifdef::openshift-dedicated[] -* Granting administrator privileges to users: You can xref:../authentication/using-rbac.adoc#osd-grant-admin-privileges_using-rbac[grant `dedicated-admin` privileges to users]. -endif::openshift-dedicated[] - -* Creating service accounts: xref:../authentication/understanding-and-creating-service-accounts.adoc#service-accounts-overview_understanding-service-accounts[Service accounts] provide a flexible way to control API access without sharing a regular user’s credentials. A user can xref:../authentication/understanding-and-creating-service-accounts.adoc#service-accounts-managing_understanding-service-accounts[create and use a service account in applications] and also as xref:../authentication/using-service-accounts-as-oauth-client.adoc#using-service-accounts-as-oauth-client[an OAuth client]. - -* xref:../authentication/tokens-scoping.adoc#tokens-scoping[Scoping tokens]: A scoped token is a token that identifies as a specific user who can perform only specific operations. You can create scoped tokens to delegate some of your permissions to another user or a service account. - -* Syncing LDAP groups: You can manage user groups in one place by xref:../authentication/ldap-syncing.adoc#ldap-syncing[syncing the groups stored in an LDAP server] with the {product-title} user groups. diff --git a/authentication/ldap-syncing.adoc b/authentication/ldap-syncing.adoc deleted file mode 100644 index a1bbda824cee..000000000000 --- a/authentication/ldap-syncing.adoc +++ /dev/null @@ -1,75 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="ldap-syncing"] -= Syncing LDAP groups -include::_attributes/common-attributes.adoc[] -:context: ldap-syncing-groups - -toc::[] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -As an administrator, -endif::[] -ifdef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -As an administrator with the `dedicated-admin` role, -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -you can use groups to manage users, change -their permissions, and enhance collaboration. Your organization may have already -created user groups and stored them in an LDAP server. {product-title} can sync -those LDAP records with internal {product-title} records, enabling you to manage -your groups in one place. {product-title} currently supports group sync with -LDAP servers using three common schemas for defining group membership: RFC 2307, -Active Directory, and augmented Active Directory. - -ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -For more information on configuring LDAP, see -xref:../authentication/identity_providers/configuring-ldap-identity-provider.adoc#configuring-ldap-identity-provider[Configuring an LDAP identity provider]. -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] - -ifdef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -For more information on configuring LDAP, see -xref:../authentication/sd-configuring-identity-providers.adoc#config-ldap-idp_sd-configuring-identity-providers[Configuring an LDAP identity provider]. -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -[NOTE] -==== -You must have `cluster-admin` privileges to sync groups. -==== -endif::[] -ifdef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -[NOTE] -==== -You must have `dedicated-admin` privileges to sync groups. -==== -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] - -include::modules/ldap-syncing-about.adoc[leveloffset=+1] -include::modules/ldap-syncing-config-rfc2307.adoc[leveloffset=+2] -include::modules/ldap-syncing-config-activedir.adoc[leveloffset=+2] -include::modules/ldap-syncing-config-augmented-activedir.adoc[leveloffset=+2] -include::modules/ldap-syncing-running.adoc[leveloffset=+1] -include::modules/ldap-syncing-running-all-ldap.adoc[leveloffset=+2] -include::modules/ldap-syncing-running-openshift.adoc[leveloffset=+2] -include::modules/ldap-syncing-running-subset.adoc[leveloffset=+2] -include::modules/ldap-syncing-pruning.adoc[leveloffset=+1] - -// OSD and ROSA dedicated-admins cannot create the cluster roles and cluster role bindings required for this procedure. -ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -// Automatically syncing LDAP groups -include::modules/ldap-auto-syncing.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../authentication/identity_providers/configuring-ldap-identity-provider.adoc#configuring-ldap-identity-provider[Configuring an LDAP identity provider] -* xref:../nodes/jobs/nodes-nodes-jobs.adoc#nodes-nodes-jobs-creating-cron_nodes-nodes-jobs[Creating cron jobs] -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] - -include::modules/ldap-syncing-examples.adoc[leveloffset=+1] -include::modules/ldap-syncing-rfc2307.adoc[leveloffset=+2] -include::modules/ldap-syncing-rfc2307-user-defined.adoc[leveloffset=+2] -include::modules/ldap-syncing-rfc2307-user-defined-error.adoc[leveloffset=+2] -include::modules/ldap-syncing-activedir.adoc[leveloffset=+2] -include::modules/ldap-syncing-augmented-activedir.adoc[leveloffset=+2] -include::modules/ldap-syncing-nesting.adoc[leveloffset=+2] -include::modules/ldap-syncing-spec.adoc[leveloffset=+1] diff --git a/authentication/managing-oauth-access-tokens.adoc b/authentication/managing-oauth-access-tokens.adoc deleted file mode 100644 index 6ad43641f0c3..000000000000 --- a/authentication/managing-oauth-access-tokens.adoc +++ /dev/null @@ -1,21 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="managing-oauth-access-tokens"] -= Managing user-owned OAuth access tokens -include::_attributes/common-attributes.adoc[] -:context: managing-oauth-access-tokens - -toc::[] - -Users can review their own OAuth access tokens and delete any that are no longer needed. - -// Listing user-owned OAuth access tokens -include::modules/oauth-list-tokens.adoc[leveloffset=+1] - -// Viewing the details of a user-owned OAuth access token -include::modules/oauth-view-details-tokens.adoc[leveloffset=+1] - -// Deleting user-owned OAuth access tokens -include::modules/oauth-delete-tokens.adoc[leveloffset=+1] - -// Adding unauthenticated groups to ClusterRoleBindings -include::modules/unauthenticated-users-cluster-role-binding.adoc[leveloffset=+1] diff --git a/authentication/managing-security-context-constraints.adoc b/authentication/managing-security-context-constraints.adoc deleted file mode 100644 index 94ac04555d6d..000000000000 --- a/authentication/managing-security-context-constraints.adoc +++ /dev/null @@ -1,55 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="managing-pod-security-policies"] -= Managing security context constraints -include::_attributes/common-attributes.adoc[] -:context: configuring-internal-oauth - -toc::[] - -In {product-title}, you can use security context constraints (SCCs) to control permissions for the pods in your cluster. - -Default SCCs are created during installation and when you install some Operators or other components. As a cluster administrator, you can also create your own SCCs by using the OpenShift CLI (`oc`). - -[IMPORTANT] -==== -Do not modify the default SCCs. Customizing the default SCCs can lead to issues when some of the platform pods deploy or -ifndef::openshift-rosa,openshift-rosa-hcp[] -{product-title} -endif::[] -ifdef::openshift-rosa,openshift-rosa-hcp[] -ROSA -endif::openshift-rosa,openshift-rosa-hcp[] -is upgraded. Additionally, the default SCC values are reset to the defaults during some cluster upgrades, which discards all customizations to those SCCs. -ifdef::openshift-origin,openshift-enterprise,openshift-webscale,openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] - -Instead of modifying the default SCCs, create and modify your own SCCs as needed. For detailed steps, see xref:../authentication/managing-security-context-constraints.adoc#security-context-constraints-creating_configuring-internal-oauth[Creating security context constraints]. -endif::[] -==== - -ifdef::openshift-dedicated[] -[NOTE] -==== -In {product-title} deployments, you can create your own SCCs only for clusters that use the Customer Cloud Subscription (CCS) model. You cannot create SCCs for {product-title} clusters that use a Red Hat cloud account, because SCC resource creation requires `cluster-admin` privileges. -==== -endif::openshift-dedicated[] - -include::modules/security-context-constraints-about.adoc[leveloffset=+1] -include::modules/security-context-constraints-pre-allocated-values.adoc[leveloffset=+1] -include::modules/security-context-constraints-example.adoc[leveloffset=+1] -include::modules/security-context-constraints-creating.adoc[leveloffset=+1] - -// Configuring a workload to require a specific SCC -include::modules/security-context-constraints-requiring.adoc[leveloffset=+1] -include::modules/security-context-constraints-rbac.adoc[leveloffset=+1] -include::modules/security-context-constraints-command-reference.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_configuring-internal-oauth"] -== Additional resources - -ifndef::openshift-rosa-hcp[] -* xref:../support/getting-support.adoc#getting-support[Getting support] -endif::openshift-rosa-hcp[] -ifdef::openshift-rosa-hcp[] -* link:https://docs.openshift.com/rosa/support/getting-support.html[Getting support] -endif::openshift-rosa-hcp[] \ No newline at end of file diff --git a/authentication/managing_cloud_provider_credentials/_attributes b/authentication/managing_cloud_provider_credentials/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/authentication/managing_cloud_provider_credentials/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc b/authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc deleted file mode 100644 index 741c5bc5c8fe..000000000000 --- a/authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc +++ /dev/null @@ -1,115 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="about-cloud-credential-operator"] -= About the Cloud Credential Operator -include::_attributes/common-attributes.adoc[] -:context: about-cloud-credential-operator - -toc::[] - -The Cloud Credential Operator (CCO) manages cloud provider credentials as custom resource definitions (CRDs). The CCO syncs on `CredentialsRequest` custom resources (CRs) to allow {product-title} components to request cloud provider credentials with the specific permissions that are required for the cluster to run. - -By setting different values for the `credentialsMode` parameter in the `install-config.yaml` file, the CCO can be configured to operate in several different modes. If no mode is specified, or the `credentialsMode` parameter is set to an empty string (`""`), the CCO operates in its default mode. - -[id="about-cloud-credential-operator-modes_{context}"] -== Modes - -By setting different values for the `credentialsMode` parameter in the `install-config.yaml` file, the CCO can be configured to operate in _mint_, _passthrough_, or _manual_ mode. These options provide transparency and flexibility in how the CCO uses cloud credentials to process `CredentialsRequest` CRs in the cluster, and allow the CCO to be configured to suit the security requirements of your organization. Not all CCO modes are supported for all cloud providers. - -* **xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#cco-mode-mint[Mint]**: In mint mode, the CCO uses the provided admin-level cloud credential to create new credentials for components in the cluster with only the specific permissions that are required. - -* **xref:../../authentication/managing_cloud_provider_credentials/cco-mode-passthrough.adoc#cco-mode-passthrough[Passthrough]**: In passthrough mode, the CCO passes the provided cloud credential to the components that request cloud credentials. - -* **xref:../../authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc#cco-mode-manual[Manual mode with long-term credentials for components]**: In manual mode, you can manage long-term cloud credentials instead of the CCO. - -* **xref:../../authentication/managing_cloud_provider_credentials/cco-short-term-creds.adoc#cco-short-term-creds[Manual mode with short-term credentials for components]**: For some providers, you can use the CCO utility (`ccoctl`) during installation to implement short-term credentials for individual components. These credentials are created and managed outside the {product-title} cluster. - -.CCO mode support matrix -[cols="<.^2,^.^1,^.^1,^.^1,^.^1"] -|==== -|Cloud provider |Mint |Passthrough |Manual with long-term credentials |Manual with short-term credentials - -|Amazon Web Services (AWS) -|X -|X -|X -|X - -|Global Microsoft Azure -| -|X -|X -|X - -|Microsoft Azure Stack Hub -| -| -|X -| - -|Google Cloud Platform (GCP) -|X -|X -|X -|X - -|{ibm-cloud-name} -| -| -|X ^[1]^ -| - -|Nutanix -| -| -|X ^[1]^ -| - -|{rh-openstack-first} -| -|X -| -| - -|VMware vSphere -| -|X -| -| - -|==== -[.small] --- -1. This platform uses the `ccoctl` utility during installation to configure long-term credentials. --- - -[id="cco-determine-mode_{context}"] -== Determining the Cloud Credential Operator mode - -For platforms that support using the CCO in multiple modes, you can determine what mode the CCO is configured to use by using the web console or the CLI. - -.Determining the CCO configuration -image::334_OpenShift_cluster_updating_and_CCO_workflows_0923_4.11_A_AliCloud_patch.png[Decision tree showing how to determine the configured CCO credentials mode for your cluster.] - -//Determining the Cloud Credential Operator mode by using the web console -include::modules/cco-determine-mode-gui.adoc[leveloffset=+2] - -//Determining the Cloud Credential Operator mode by using the CLI -include::modules/cco-determine-mode-cli.adoc[leveloffset=+2] - -[id="about-cloud-credential-operator-default_{context}"] -== Default behavior -For platforms on which multiple modes are supported (AWS, Azure, and GCP), when the CCO operates in its default mode, it checks the provided credentials dynamically to determine for which mode they are sufficient to process `CredentialsRequest` CRs. - -By default, the CCO determines whether the credentials are sufficient for mint mode, which is the preferred mode of operation, and uses those credentials to create appropriate credentials for components in the cluster. If the credentials are not sufficient for mint mode, it determines whether they are sufficient for passthrough mode. If the credentials are not sufficient for passthrough mode, the CCO cannot adequately process `CredentialsRequest` CRs. - -If the provided credentials are determined to be insufficient during installation, the installation fails. For AWS, the installation program fails early in the process and indicates which required permissions are missing. Other providers might not provide specific information about the cause of the error until errors are encountered. - -If the credentials are changed after a successful installation and the CCO determines that the new credentials are insufficient, the CCO puts conditions on any new `CredentialsRequest` CRs to indicate that it cannot process them because of the insufficient credentials. - -To resolve insufficient credentials issues, provide a credential with sufficient permissions. If an error occurred during installation, try installing again. For issues with new `CredentialsRequest` CRs, wait for the CCO to try to process the CR again. As an alternative, you can configure your cluster to use a different CCO mode that is supported for your cloud provider. - -[role="_additional-resources"] -[id="additional-resources_about-cloud-credential-operator_{context}"] -== Additional resources - -* xref:../../operators/operator-reference.adoc#cloud-credential-operator_cluster-operators-ref[Cluster Operators reference page for the Cloud Credential Operator] diff --git a/authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc b/authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc deleted file mode 100644 index 04ddfd66ff27..000000000000 --- a/authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc +++ /dev/null @@ -1,35 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="cco-mode-manual"] -= Manual mode with long-term credentials for components -include::_attributes/common-attributes.adoc[] -:context: cco-mode-manual - -toc::[] - -Manual mode is supported for Amazon Web Services (AWS), global Microsoft Azure, Microsoft Azure Stack Hub, Google Cloud Platform (GCP), {ibm-cloud-name}, and Nutanix. - -[id="manual-mode-classic_{context}"] -== User-managed credentials - -In manual mode, a user manages cloud credentials instead of the Cloud Credential Operator (CCO). To use this mode, you must examine the `CredentialsRequest` CRs in the release image for the version of {product-title} that you are running or installing, create corresponding credentials in the underlying cloud provider, and create Kubernetes Secrets in the correct namespaces to satisfy all `CredentialsRequest` CRs for the cluster's cloud provider. Some platforms use the CCO utility (`ccoctl`) to facilitate this process during installation and updates. - -Using manual mode with long-term credentials allows each cluster component to have only the permissions it requires, without storing an administrator-level credential in the cluster. This mode also does not require connectivity to services such as the AWS public IAM endpoint. However, you must manually reconcile permissions with new release images for every upgrade. - -For information about configuring your cloud provider to use manual mode, see the manual credentials management options for your cloud provider. - -[NOTE] -==== -An AWS, global Azure, or GCP cluster that uses manual mode might be configured to use short-term credentials for different components. For more information, see xref:../../authentication/managing_cloud_provider_credentials/cco-short-term-creds.adoc#cco-short-term-creds[Manual mode with short-term credentials for components]. -==== - -[role="_additional-resources"] -[id="additional-resources_cco-mode-manual"] -== Additional resources - -* xref:../../installing/installing_aws/ipi/installing-aws-customizations.adoc#manually-create-iam_installing-aws-customizations[Manually creating long-term credentials for AWS] -* xref:../../installing/installing_azure/ipi/installing-azure-customizations.adoc#manually-create-iam_installing-azure-customizations[Manually creating long-term credentials for Azure] -* xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#manually-create-iam_installing-gcp-customizations[Manually creating long-term credentials for GCP] -* xref:../../installing/installing_ibm_cloud/configuring-iam-ibm-cloud.adoc#configuring-iam-ibm-cloud[Configuring IAM for {ibm-cloud-name}] -* xref:../../installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc#manually-create-iam-nutanix_installing-nutanix-installer-provisioned[Configuring IAM for Nutanix] -* xref:../../authentication/managing_cloud_provider_credentials/cco-short-term-creds.adoc#cco-short-term-creds[Manual mode with short-term credentials for components] -* xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials] \ No newline at end of file diff --git a/authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc b/authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc deleted file mode 100644 index b7bb544055db..000000000000 --- a/authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc +++ /dev/null @@ -1,80 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="cco-mode-mint"] -= The Cloud Credential Operator in mint mode -include::_attributes/common-attributes.adoc[] -:context: cco-mode-mint - -toc::[] - -Mint mode is the default Cloud Credential Operator (CCO) credentials mode for {product-title} on platforms that support it. Mint mode supports Amazon Web Services (AWS) and Google Cloud Platform (GCP) clusters. - -[id="mint-mode-about"] -== Mint mode credentials management - -For clusters that use the CCO in mint mode, the administrator-level credential is stored in the `kube-system` namespace. -The CCO uses the `admin` credential to process the `CredentialsRequest` objects in the cluster and create users for components with limited permissions. - -With mint mode, each cluster component has only the specific permissions it requires. -Cloud credential reconciliation is automatic and continuous so that components can perform actions that require additional credentials or permissions. - -For example, a minor version cluster update (such as updating from {product-title} {ocp-nminus1} to {product-version}) might include an updated `CredentialsRequest` resource for a cluster component. -The CCO, operating in mint mode, uses the `admin` credential to process the `CredentialsRequest` resource and create users with limited permissions to satisfy the updated authentication requirements. - -[NOTE] -==== -By default, mint mode requires storing the `admin` credential in the cluster `kube-system` namespace. If this approach does not meet the security requirements of your organization, you can xref:../../post_installation_configuration/changing-cloud-credentials-configuration.adoc#manually-removing-cloud-creds_changing-cloud-credentials-configuration[remove the credential after installing the cluster]. -==== - -[id="mint-mode-permissions"] -=== Mint mode permissions requirements -When using the CCO in mint mode, ensure that the credential you provide meets the requirements of the cloud on which you are running or installing {product-title}. If the provided credentials are not sufficient for mint mode, the CCO cannot create an IAM user. - -The credential you provide for mint mode in Amazon Web Services (AWS) must have the following permissions: - -.Required AWS permissions -[%collapsible] -==== -* `iam:CreateAccessKey` -* `iam:CreateUser` -* `iam:DeleteAccessKey` -* `iam:DeleteUser` -* `iam:DeleteUserPolicy` -* `iam:GetUser` -* `iam:GetUserPolicy` -* `iam:ListAccessKeys` -* `iam:PutUserPolicy` -* `iam:TagUser` -* `iam:SimulatePrincipalPolicy` -==== - -The credential you provide for mint mode in Google Cloud Platform (GCP) must have the following permissions: - -.Required GCP permissions -[%collapsible] -==== -* `resourcemanager.projects.get` -* `serviceusage.services.list` -* `iam.serviceAccountKeys.create` -* `iam.serviceAccountKeys.delete` -* `iam.serviceAccountKeys.list` -* `iam.serviceAccounts.create` -* `iam.serviceAccounts.delete` -* `iam.serviceAccounts.get` -* `iam.roles.create` -* `iam.roles.get` -* `iam.roles.list` -* `iam.roles.undelete` -* `iam.roles.update` -* `resourcemanager.projects.getIamPolicy` -* `resourcemanager.projects.setIamPolicy` -==== - -//Admin credentials root secret format -include::modules/admin-credentials-root-secret-formats.adoc[leveloffset=+2] - -//Rotating cloud provider credentials manually -include::modules/manually-rotating-cloud-creds.adoc[leveloffset=+1] - -[role="_additional-resources"] -== Additional resources -* xref:../../post_installation_configuration/changing-cloud-credentials-configuration.adoc#manually-removing-cloud-creds_changing-cloud-credentials-configuration[Removing cloud provider credentials] \ No newline at end of file diff --git a/authentication/managing_cloud_provider_credentials/cco-mode-passthrough.adoc b/authentication/managing_cloud_provider_credentials/cco-mode-passthrough.adoc deleted file mode 100644 index 3598fdcce782..000000000000 --- a/authentication/managing_cloud_provider_credentials/cco-mode-passthrough.adoc +++ /dev/null @@ -1,106 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="cco-mode-passthrough"] -= The Cloud Credential Operator in passthrough mode -include::_attributes/common-attributes.adoc[] -:context: cco-mode-passthrough - -toc::[] - -Passthrough mode is supported for Amazon Web Services (AWS), Microsoft Azure, Google Cloud Platform (GCP), {rh-openstack-first}, and VMware vSphere. - -In passthrough mode, the Cloud Credential Operator (CCO) passes the provided cloud credential to the components that request cloud credentials. The credential must have permissions to perform the installation and complete the operations that are required by components in the cluster, but does not need to be able to create new credentials. The CCO does not attempt to create additional limited-scoped credentials in passthrough mode. - -[NOTE] -==== -xref:../../authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc#cco-mode-manual[Manual mode] is the only supported CCO configuration for Microsoft Azure Stack Hub. -==== - -[id="passthrough-mode-permissions"] -== Passthrough mode permissions requirements -When using the CCO in passthrough mode, ensure that the credential you provide meets the requirements of the cloud on which you are running or installing {product-title}. If the provided credentials the CCO passes to a component that creates a `CredentialsRequest` CR are not sufficient, that component will report an error when it tries to call an API that it does not have permissions for. - -[id="passthrough-mode-permissions-aws"] -=== Amazon Web Services (AWS) permissions -The credential you provide for passthrough mode in AWS must have all the requested permissions for all `CredentialsRequest` CRs that are required by the version of {product-title} you are running or installing. - -To locate the `CredentialsRequest` CRs that are required, see xref:../../installing/installing_aws/ipi/installing-aws-customizations.adoc#manually-create-iam_installing-aws-customizations[Manually creating long-term credentials for AWS]. - -[id="passthrough-mode-permissions-azure"] -=== Microsoft Azure permissions -The credential you provide for passthrough mode in Azure must have all the requested permissions for all `CredentialsRequest` CRs that are required by the version of {product-title} you are running or installing. - -To locate the `CredentialsRequest` CRs that are required, see xref:../../installing/installing_azure/ipi/installing-azure-customizations.adoc#manually-create-iam_installing-azure-customizations[Manually creating long-term credentials for Azure]. - -[id="passthrough-mode-permissions-gcp"] -=== Google Cloud Platform (GCP) permissions -The credential you provide for passthrough mode in GCP must have all the requested permissions for all `CredentialsRequest` CRs that are required by the version of {product-title} you are running or installing. - -To locate the `CredentialsRequest` CRs that are required, see xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#manually-create-iam_installing-gcp-customizations[Manually creating long-term credentials for GCP]. - -[id="passthrough-mode-permissions-rhosp"] -=== {rh-openstack-first} permissions -To install an {product-title} cluster on {rh-openstack}, the CCO requires a credential with the permissions of a `member` user role. - -[id="passthrough-mode-permissions-vsware"] -=== VMware vSphere permissions -To install an {product-title} cluster on VMware vSphere, the CCO requires a credential with the following vSphere privileges: - -.Required vSphere privileges -[cols="1,2"] -|==== -|Category |Privileges - -|Datastore -|_Allocate space_ - -|Folder -|_Create folder_, _Delete folder_ - -|vSphere Tagging -|All privileges - -|Network -|_Assign network_ - -|Resource -|_Assign virtual machine to resource pool_ - -|Profile-driven storage -|All privileges - -|vApp -|All privileges - -|Virtual machine -|All privileges - -|==== - -//Admin credentials root secret format -include::modules/admin-credentials-root-secret-formats.adoc[leveloffset=+1] - -[id="passthrough-mode-maintenance"] -== Passthrough mode credential maintenance -If `CredentialsRequest` CRs change over time as the cluster is upgraded, you must manually update the passthrough mode credential to meet the requirements. To avoid credentials issues during an upgrade, check the `CredentialsRequest` CRs in the release image for the new version of {product-title} before upgrading. To locate the `CredentialsRequest` CRs that are required for your cloud provider, see _Manually creating long-term credentials_ for xref:../../installing/installing_aws/ipi/installing-aws-customizations.adoc#manually-create-iam_installing-aws-customizations[AWS], xref:../../installing/installing_azure/ipi/installing-azure-customizations.adoc#manually-create-iam_installing-azure-customizations[Azure], or xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#manually-create-iam_installing-gcp-customizations[GCP]. - -//Rotating cloud provider credentials manually -include::modules/manually-rotating-cloud-creds.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc[vSphere CSI Driver Operator] - -[id="passthrough-mode-reduce-permissions"] -== Reducing permissions after installation -When using passthrough mode, each component has the same permissions used by all other components. If you do not reduce the permissions after installing, all components have the broad permissions that are required to run the installer. - -After installation, you can reduce the permissions on your credential to only those that are required to run the cluster, as defined by the `CredentialsRequest` CRs in the release image for the version of {product-title} that you are using. - -To locate the `CredentialsRequest` CRs that are required for AWS, Azure, or GCP and learn how to change the permissions the CCO uses, see _Manually creating long-term credentials_ for xref:../../installing/installing_aws/ipi/installing-aws-customizations.adoc#manually-create-iam_installing-aws-customizations[AWS], xref:../../installing/installing_azure/ipi/installing-azure-customizations.adoc#manually-create-iam_installing-azure-customizations[Azure], or xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#manually-create-iam_installing-gcp-customizations[GCP]. - -[role="_additional-resources"] -== Additional resources - -* xref:../../installing/installing_aws/ipi/installing-aws-customizations.adoc#manually-create-iam_installing-aws-customizations[Manually creating long-term credentials for AWS] -* xref:../../installing/installing_azure/ipi/installing-azure-customizations.adoc#manually-create-iam_installing-azure-customizations[Manually creating long-term credentials for Azure] -* xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#manually-create-iam_installing-gcp-customizations[Manually creating long-term credentials for GCP] diff --git a/authentication/managing_cloud_provider_credentials/cco-short-term-creds.adoc b/authentication/managing_cloud_provider_credentials/cco-short-term-creds.adoc deleted file mode 100644 index 0576c46de0b9..000000000000 --- a/authentication/managing_cloud_provider_credentials/cco-short-term-creds.adoc +++ /dev/null @@ -1,170 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="cco-short-term-creds"] -= Manual mode with short-term credentials for components -include::_attributes/common-attributes.adoc[] -:context: cco-short-term-creds - -toc::[] - -During installation, you can configure the Cloud Credential Operator (CCO) to operate in manual mode and use the CCO utility (`ccoctl`) to implement short-term security credentials for individual components that are created and managed outside the {product-title} cluster. - -[NOTE] -==== -This credentials strategy is supported for {aws-first}, {gcp-first}, and global {azure-full} only. - -For {aws-short} and {gcp-short} clusters, you must configure your cluster to use this strategy during installation of a new {product-title} cluster. -You cannot configure an existing {aws-short} or {gcp-short} cluster that uses a different credentials strategy to use this feature. - -If you did not configure your {azure-short} cluster to use {entra-first} during installation, you can xref:../../post_installation_configuration/changing-cloud-credentials-configuration.adoc#post-install-enable-token-auth_changing-cloud-credentials-configuration[enable this authentication method on an existing cluster]. -==== - -//todo: Should provide some more info about the benefits of this here as well. Note: Azure is not yet limited-priv, but still gets the benefit of not storing root creds on the cluster and some sort of time-based rotation - -Cloud providers use different terms for their implementation of this authentication method. - -.Short-term credentials provider terminology -|==== -|Cloud provider |Provider nomenclature - -|{aws-first} -|{aws-short} {sts-first} - -|{gcp-first} -|{gcp-wid-short} - -|Global Microsoft Azure -|{entra-first} - -|==== - -[id="cco-short-term-creds-aws_{context}"] -== {aws-short} {sts-full} - -In manual mode with {sts-first}, the individual {product-title} cluster components use the {aws-short} {sts-short} to assign components IAM roles that provide short-term, limited-privilege security credentials. These credentials are associated with IAM roles that are specific to each component that makes {aws-short} API calls. - -[role="_additional-resources"] -.Additional resources -* xref:../../installing/installing_aws/ipi/installing-aws-customizations.adoc#installing-aws-with-short-term-creds_installing-aws-customizations[Configuring an {aws-short} cluster to use short-term credentials] - -//AWS Security Token Service authentication process -include::modules/cco-short-term-creds-auth-flow-aws.adoc[leveloffset=+2] - -//AWS component secret formats -include::modules/cco-short-term-creds-format-aws.adoc[leveloffset=+2] - -//AWS component secret permissions requirements -include::modules/cco-short-term-creds-component-permissions-aws.adoc[leveloffset=+2] - -//OLM-managed Operator support for authentication with AWS STS -include::modules/cco-short-term-creds-aws-olm.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../operators/operator_sdk/token_auth/osdk-cco-aws-sts.adoc#osdk-cco-aws-sts[CCO-based workflow for OLM-managed Operators with {aws-short} {sts-short}] - -// Content stub for later addition: -//// -// Application support for AWS STS service account tokens -// Extra context so module can be reused within assembly (unset in module) -:context: aws -// Attributes used in module with cloud-specific values (unset in module) -:cloud-auth-first: {aws-short} {sts-first} -:cloud-auth-short: {aws-short} {sts-short} -include::modules/cco-short-term-creds-workloads.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xr3f:../../nodes/pods/nodes-pods-short-term-auth.adoc#nodes-pods-short-term-auth-configuring-aws_nodes-pods-short-term-auth[Configuring {aws-short} {sts-short} authentication for pods on {aws-short}] - -:context: cco-short-term-creds -//// - -[id="cco-short-term-creds-gcp_{context}"] -== {gcp-wid-short} - -In manual mode with {gcp-wid-short}, the individual {product-title} cluster components use the {gcp-short} workload identity provider to allow components to impersonate {gcp-short} service accounts using short-term, limited-privilege credentials. - -[role="_additional-resources"] -.Additional resources -* xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-with-short-term-creds_installing-gcp-customizations[Configuring a {gcp-short} cluster to use short-term credentials] - -//GCP Workload Identity authentication process -include::modules/cco-short-term-creds-auth-flow-gcp.adoc[leveloffset=+2] - -//GCP component secret formats -include::modules/cco-short-term-creds-format-gcp.adoc[leveloffset=+2] - -//GCP component secret permissions requirements (placeholder) -//include::modules/cco-short-term-creds-component-permissions-gcp.adoc[leveloffset=+2] - -//OLM-managed Operator support for authentication with GCP Workload Identity -include::modules/cco-short-term-creds-gcp-olm.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../operators/operator_sdk/token_auth/osdk-cco-gcp.adoc#osdk-cco-gcp[CCO-based workflow for OLM-managed Operators with {gcp-wid-first}] - -// Application support for GCP Workload Identity service account tokens -// Extra context so module can be reused within assembly (unset in module) -:context: gcp -// Attributes used in module with cloud-specific values (unset in module) -:cloud-auth-first: {gcp-wid-first} -:cloud-auth-short: {gcp-wid-short} -include::modules/cco-short-term-creds-workloads.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../nodes/pods/nodes-pods-short-term-auth.adoc#nodes-pods-short-term-auth-configuring-gcp_nodes-pods-short-term-auth[Configuring {gcp-wid-short} authentication for applications on {gcp-short}] - -:context: cco-short-term-creds - -[id="cco-short-term-creds-azure_{context}"] -== {entra-first} - -In manual mode with {entra-first}, the individual {product-title} cluster components use the {entra-short} provider to assign components short-term security credentials. - -[role="_additional-resources"] -.Additional resources -* xref:../../installing/installing_azure/ipi/installing-azure-customizations.adoc#installing-azure-with-short-term-creds_installing-azure-customizations[Configuring a global {azure-first} cluster to use short-term credentials] - -//Microsoft Entra Workload ID authentication process -include::modules/cco-short-term-creds-auth-flow-azure.adoc[leveloffset=+2] - -//Azure component secret formats -include::modules/cco-short-term-creds-format-azure.adoc[leveloffset=+2] - -//Azure component secret permissions requirements -include::modules/cco-short-term-creds-component-permissions-azure.adoc[leveloffset=+2] - -//OLM-managed Operator support for authentication with Microsoft Entra Workload ID -include::modules/cco-short-term-creds-azure-olm.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../operators/operator_sdk/token_auth/osdk-cco-azure.adoc#osdk-cco-azure[CCO-based workflow for OLM-managed Operators with {entra-first}] - -// Content stub for later addition: -//// -// Application support for Microsoft Entra Workload ID service account tokens -// Extra context so module can be reused within assembly (unset in module) -:context: azure -// Attributes used in module with cloud-specific values (unset in module) -:cloud-auth-first: {entra-first} -:cloud-auth-short: {entra-short} -include::modules/cco-short-term-creds-workloads.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xr3f:../../nodes/pods/nodes-pods-short-term-auth.adoc#nodes-pods-short-term-auth-configuring-azure_nodes-pods-short-term-auth[Configuring {entra-first} authentication for pods on {azure-short}] - -:context: cco-short-term-creds -//// - -[role="_additional-resources"] -[id="additional-resources_{context}"] -== Additional resources - -* xref:../../installing/installing_aws/ipi/installing-aws-customizations.adoc#installing-aws-with-short-term-creds_installing-aws-customizations[Configuring an {aws-short} cluster to use short-term credentials] -* xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-with-short-term-creds_installing-gcp-customizations[Configuring a {gcp-short} cluster to use short-term credentials] -* xref:../../installing/installing_azure/ipi/installing-azure-customizations.adoc#installing-azure-with-short-term-creds_installing-azure-customizations[Configuring a global {azure-first} cluster to use short-term credentials] -* xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials] \ No newline at end of file diff --git a/authentication/managing_cloud_provider_credentials/images b/authentication/managing_cloud_provider_credentials/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/authentication/managing_cloud_provider_credentials/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/authentication/managing_cloud_provider_credentials/modules b/authentication/managing_cloud_provider_credentials/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/authentication/managing_cloud_provider_credentials/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/authentication/managing_cloud_provider_credentials/snippets b/authentication/managing_cloud_provider_credentials/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/authentication/managing_cloud_provider_credentials/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/authentication/modules b/authentication/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/authentication/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/authentication/osd-admin-roles.adoc b/authentication/osd-admin-roles.adoc deleted file mode 100644 index f600adf8ea16..000000000000 --- a/authentication/osd-admin-roles.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osd-admin-roles"] -= Managing administration roles and users -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: osd-admin-roles - -toc::[] - -// TODO: needs intro - -include::modules/understanding-admin-roles.adoc[leveloffset=+1] - -include::modules/managing-dedicated-administrators.adoc[leveloffset=+1] diff --git a/authentication/osd-revoking-cluster-privileges.adoc b/authentication/osd-revoking-cluster-privileges.adoc deleted file mode 100644 index e533f4d93ad2..000000000000 --- a/authentication/osd-revoking-cluster-privileges.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osd-revoking-cluster-privileges"] -= Revoking privileges and access to an {product-title} cluster -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: osd-revoking-cluster-privileges - -toc::[] - -[role="_abstract"] -As cluster owner, you can revoke admin privileges and user access to a {product-title} cluster. - -include::modules/osd-revoke-admin-privileges.adoc[leveloffset=+1] -include::modules/osd-revoke-user-access.adoc[leveloffset=+1] \ No newline at end of file diff --git a/authentication/remove-kubeadmin.adoc b/authentication/remove-kubeadmin.adoc deleted file mode 100644 index 19addc10ca9d..000000000000 --- a/authentication/remove-kubeadmin.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="removing-kubeadmin"] -= Removing the kubeadmin user -include::_attributes/common-attributes.adoc[] -:context: removing-kubeadmin - -toc::[] - -include::modules/authentication-kubeadmin.adoc[leveloffset=+1] - -include::modules/authentication-remove-kubeadmin.adoc[leveloffset=+1] diff --git a/authentication/sd-configuring-identity-providers.adoc b/authentication/sd-configuring-identity-providers.adoc deleted file mode 100644 index 1c1f1f93d936..000000000000 --- a/authentication/sd-configuring-identity-providers.adoc +++ /dev/null @@ -1,33 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="sd-configuring-identity-providers"] -= Configuring identity providers -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: sd-configuring-identity-providers - -toc::[] - -After your {product-title} cluster is created, you must configure identity providers to determine how users log in to access the cluster. - -ifdef::openshift-rosa,openshift-rosa-hcp[] -The following topics describe how to configure an identity provider using {cluster-manager} console. Alternatively, you can use the ROSA CLI (`rosa`) to configure an identity provider and access the cluster. -endif::openshift-rosa,openshift-rosa-hcp[] - -include::modules/understanding-idp.adoc[leveloffset=+1] -include::modules/identity-provider-parameters.adoc[leveloffset=+2] -include::modules/config-github-idp.adoc[leveloffset=+1] -include::modules/config-gitlab-idp.adoc[leveloffset=+1] -include::modules/config-google-idp.adoc[leveloffset=+1] -include::modules/config-ldap-idp.adoc[leveloffset=+1] -include::modules/config-openid-idp.adoc[leveloffset=+1] -include::modules/config-htpasswd-idp.adoc[leveloffset=+1] -ifdef::openshift-dedicated[] -include::modules/access-cluster.adoc[leveloffset=+1] -endif::openshift-dedicated[] - -ifdef::openshift-rosa[] -[id="additional-resources-cluster-access-sts"] -[role="_additional-resources"] -== Additional resources -* xref:../rosa_install_access_delete_clusters/rosa-sts-accessing-cluster.adoc#rosa-sts-accessing-cluster[Accessing a cluster] -* xref:../rosa_getting_started/rosa-sts-getting-started-workflow.adoc#rosa-sts-understanding-the-deployment-workflow[Understanding the ROSA with STS deployment workflow] -endif::openshift-rosa[] diff --git a/authentication/snippets b/authentication/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/authentication/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/authentication/tokens-scoping.adoc b/authentication/tokens-scoping.adoc deleted file mode 100644 index 71b1b0c7896c..000000000000 --- a/authentication/tokens-scoping.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="tokens-scoping"] -= Scoping tokens -include::_attributes/common-attributes.adoc[] -:context: configuring-internal-oauth - -toc::[] - -include::modules/tokens-scoping-about.adoc[leveloffset=+1] - -include::modules/unauthenticated-users-cluster-role-binding.adoc[leveloffset=+1] \ No newline at end of file diff --git a/authentication/understanding-and-creating-service-accounts.adoc b/authentication/understanding-and-creating-service-accounts.adoc deleted file mode 100644 index d0cec995b025..000000000000 --- a/authentication/understanding-and-creating-service-accounts.adoc +++ /dev/null @@ -1,19 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="understanding-and-creating-service-accounts"] -= Understanding and creating service accounts -include::_attributes/common-attributes.adoc[] -:context: understanding-service-accounts - -toc::[] - -include::modules/service-accounts-overview.adoc[leveloffset=+1] - -include::modules/service-account-auto-secret-removed.adoc[leveloffset=+2] - -// include::modules/service-accounts-enabling-authentication.adoc[leveloffset=+1] - -include::modules/service-accounts-creating.adoc[leveloffset=+1] - -// include::modules/service-accounts-configuration-parameters.adoc[leveloffset=+1] - -include::modules/service-accounts-granting-roles.adoc[leveloffset=+1] diff --git a/authentication/understanding-and-managing-pod-security-admission.adoc b/authentication/understanding-and-managing-pod-security-admission.adoc deleted file mode 100644 index c4ecac8a8f2a..000000000000 --- a/authentication/understanding-and-managing-pod-security-admission.adoc +++ /dev/null @@ -1,50 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="understanding-and-managing-pod-security-admission"] -= Understanding and managing pod security admission -include::_attributes/common-attributes.adoc[] -:context: understanding-and-managing-pod-security-admission - -toc::[] - -Pod security admission is an implementation of the link:https://kubernetes.io/docs/concepts/security/pod-security-standards/[Kubernetes pod security standards]. Use pod security admission to restrict the behavior of pods. - -// About pod security admission -include::modules/security-context-constraints-psa-about.adoc[leveloffset=+1] - -// Understanding pod security admission coexistence -include::modules/security-context-constraints-psa-coexistence.adoc[leveloffset=+2] - -// About pod security admission synchronization -include::modules/security-context-constraints-psa-synchronization.adoc[leveloffset=+1] - -// Pod security admission synchronization namespace exclusions -include::modules/security-context-constraints-psa-sync-exclusions.adoc[leveloffset=+2] - -// Controlling pod security admission synchronization -include::modules/security-context-constraints-psa-opting.adoc[leveloffset=+1] - -.Additional resources - -* xref:../authentication/understanding-and-managing-pod-security-admission.adoc#security-context-constraints-psa-sync-exclusions_understanding-and-managing-pod-security-admission[Pod security admission synchronization namespace exclusions] - -// Configuring pod security admission for a namespace -include::modules/security-context-constraints-psa-label.adoc[leveloffset=+1] - -// About pod security admission alerts -include::modules/security-context-constraints-psa-rectifying.adoc[leveloffset=+1] - -// OSD and ROSA dedicated-admin users cannot use the must-gather tool. -ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -// Identifying pod security violations -include::modules/security-context-constraints-psa-alert-eval.adoc[leveloffset=+2] -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] - -[role="_additional-resources"] -[id="additional-resources_managing-pod-security-admission"] -== Additional resources - -// Module not included in the HCP distro -ifndef::openshift-rosa-hcp[] -* xref:../security/audit-log-view.adoc#nodes-nodes-audit-log-basic-viewing_audit-log-view[Viewing audit logs] -endif::openshift-rosa-hcp[] -* xref:../authentication/managing-security-context-constraints.adoc#managing-pod-security-policies[Managing security context constraints] diff --git a/authentication/understanding-authentication.adoc b/authentication/understanding-authentication.adoc deleted file mode 100644 index 975a771f0169..000000000000 --- a/authentication/understanding-authentication.adoc +++ /dev/null @@ -1,32 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="understanding-authentication"] -= Understanding authentication -include::_attributes/common-attributes.adoc[] -:context: understanding-authentication - -toc::[] - -For users to interact with {product-title}, they must first authenticate -to the cluster. The authentication layer identifies the user associated with requests to the -{product-title} API. The authorization layer then uses information about the -requesting user to determine if the request is allowed. - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -As an administrator, you can configure authentication for {product-title}. -endif::[] - -include::modules/rbac-users.adoc[leveloffset=+1] - -include::modules/rbac-groups.adoc[leveloffset=+1] - -include::modules/rbac-api-authentication.adoc[leveloffset=+1] - -include::modules/oauth-server-overview.adoc[leveloffset=+2] - -include::modules/oauth-token-requests.adoc[leveloffset=+2] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -include::modules/authentication-api-impersonation.adoc[leveloffset=+3] - -include::modules/authentication-prometheus-system-metrics.adoc[leveloffset=+3] -endif::[] diff --git a/authentication/understanding-identity-provider.adoc b/authentication/understanding-identity-provider.adoc deleted file mode 100644 index 9c4a9a8f5f2d..000000000000 --- a/authentication/understanding-identity-provider.adoc +++ /dev/null @@ -1,87 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="understanding-identity-provider"] -= Understanding identity provider configuration -include::_attributes/common-attributes.adoc[] -:context: understanding-identity-provider - -toc::[] - -The {product-title} master includes a built-in OAuth server. Developers and -administrators obtain OAuth access tokens to authenticate themselves to the API. - -As an administrator, you can configure OAuth to specify an identity provider -after you install your cluster. - -include::modules/identity-provider-overview.adoc[leveloffset=+1] - -[id="supported-identity-providers"] -== Supported identity providers - -You can configure the following types of identity providers: - -[cols="2a,8a",options="header"] -|=== - -|Identity provider -|Description - -|xref:../authentication/identity_providers/configuring-htpasswd-identity-provider.adoc#configuring-htpasswd-identity-provider[htpasswd] -|Configure the `htpasswd` identity provider to validate user names and passwords -against a flat file generated using -link:http://httpd.apache.org/docs/2.4/programs/htpasswd.html[`htpasswd`]. - -|xref:../authentication/identity_providers/configuring-keystone-identity-provider.adoc#configuring-keystone-identity-provider[Keystone] -|Configure the `keystone` identity provider to integrate -your {product-title} cluster with Keystone to enable shared authentication with -an OpenStack Keystone v3 server configured to store users in an internal -database. - -|xref:../authentication/identity_providers/configuring-ldap-identity-provider.adoc#configuring-ldap-identity-provider[LDAP] -|Configure the `ldap` identity provider to validate user names and passwords -against an LDAPv3 server, using simple bind authentication. - -|xref:../authentication/identity_providers/configuring-basic-authentication-identity-provider.adoc#configuring-basic-authentication-identity-provider[Basic authentication] -|Configure a `basic-authentication` identity provider for users to log in to -{product-title} with credentials validated against a remote identity provider. -Basic authentication is a generic backend integration mechanism. - -|xref:../authentication/identity_providers/configuring-request-header-identity-provider.adoc#configuring-request-header-identity-provider[Request header] -|Configure a `request-header` identity provider to identify users from request -header values, such as `X-Remote-User`. It is typically used in combination with -an authenticating proxy, which sets the request header value. - -|xref:../authentication/identity_providers/configuring-github-identity-provider.adoc#configuring-github-identity-provider[GitHub or GitHub Enterprise] -|Configure a `github` identity provider to validate user names and passwords -against GitHub or GitHub Enterprise's OAuth authentication server. - -|xref:../authentication/identity_providers/configuring-gitlab-identity-provider.adoc#configuring-gitlab-identity-provider[GitLab] -|Configure a `gitlab` identity provider to use -link:https://gitlab.com/[GitLab.com] or any other GitLab instance as an identity -provider. - -|xref:../authentication/identity_providers/configuring-google-identity-provider.adoc#configuring-google-identity-provider[Google] -|Configure a `google` identity provider using -link:https://developers.google.com/identity/protocols/OpenIDConnect[Google's OpenID Connect integration]. - -|xref:../authentication/identity_providers/configuring-oidc-identity-provider.adoc#configuring-oidc-identity-provider[OpenID Connect] -|Configure an `oidc` identity provider to integrate with an OpenID Connect -identity provider using an -link:http://openid.net/specs/openid-connect-core-1_0.html#CodeFlowAuth[Authorization Code Flow]. - -|=== - -Once an identity provider has been defined, you can -xref:../authentication/using-rbac.adoc#authorization-overview_using-rbac[use RBAC to define and apply permissions]. - -include::modules/authentication-remove-kubeadmin.adoc[leveloffset=+1] - -include::modules/identity-provider-parameters.adoc[leveloffset=+1] - -include::modules/identity-provider-default-CR.adoc[leveloffset=+1] - -include::modules/identity-provider-provisioning-user-lookup-mapping.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* link:https://access.redhat.com/solutions/6006921[How to create user, identity and map user and identity in LDAP authentication for `mappingMethod` as `lookup` inside the OAuth manifest] -* link:https://access.redhat.com/solutions/7072510[How to create user, identity and map user and identity in OIDC authentication for `mappingMethod` as `lookup`] diff --git a/authentication/using-rbac.adoc b/authentication/using-rbac.adoc deleted file mode 100644 index acefe675e3e9..000000000000 --- a/authentication/using-rbac.adoc +++ /dev/null @@ -1,46 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="using-rbac"] -= Using RBAC to define and apply permissions -include::_attributes/common-attributes.adoc[] -:context: using-rbac - -toc::[] - -include::modules/rbac-overview.adoc[leveloffset=+1] - -include::modules/rbac-projects-namespaces.adoc[leveloffset=+1] - -include::modules/rbac-default-projects.adoc[leveloffset=+1] - -include::modules/rbac-viewing-cluster-roles.adoc[leveloffset=+1] - -include::modules/rbac-viewing-local-roles.adoc[leveloffset=+1] - -include::modules/rbac-adding-roles.adoc[leveloffset=+1] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin,openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -include::modules/rbac-creating-local-role.adoc[leveloffset=+1] - -include::modules/rbac-creating-cluster-role.adoc[leveloffset=+1] -endif::[] - -include::modules/rbac-local-role-binding-commands.adoc[leveloffset=+1] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin,openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -include::modules/rbac-cluster-role-binding-commands.adoc[leveloffset=+1] -endif::[] - -ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -include::modules/rbac-creating-cluster-admin.adoc[leveloffset=+1] -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] - -ifdef::openshift-rosa,openshift-rosa-hcp[] -include::modules/rosa-create-cluster-admins.adoc[leveloffset=+1] -include::modules/rosa-create-dedicated-cluster-admins.adoc[leveloffset=+1] -endif::openshift-rosa,openshift-rosa-hcp[] - -ifdef::openshift-dedicated[] -include::modules/osd-grant-admin-privileges.adoc[leveloffset=+1] -endif::openshift-dedicated[] - -include::modules/unauthenticated-users-cluster-role-binding-con.adoc[leveloffset=+1] \ No newline at end of file diff --git a/authentication/using-service-accounts-as-oauth-client.adoc b/authentication/using-service-accounts-as-oauth-client.adoc deleted file mode 100644 index c8fadd1ca416..000000000000 --- a/authentication/using-service-accounts-as-oauth-client.adoc +++ /dev/null @@ -1,9 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="using-service-accounts-as-oauth-client"] -= Using a service account as an OAuth client -include::_attributes/common-attributes.adoc[] -:context: using-service-accounts-as-oauth-client - -toc::[] - -include::modules/service-accounts-as-oauth-clients.adoc[leveloffset=+1] diff --git a/authentication/using-service-accounts-in-applications.adoc b/authentication/using-service-accounts-in-applications.adoc deleted file mode 100644 index 8a31f8792af4..000000000000 --- a/authentication/using-service-accounts-in-applications.adoc +++ /dev/null @@ -1,17 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="using-service-accounts"] -= Using service accounts in applications -include::_attributes/common-attributes.adoc[] -:context: using-service-accounts - -toc::[] - -include::modules/service-accounts-overview.adoc[leveloffset=+1] - -include::modules/service-accounts-default.adoc[leveloffset=+1] - -include::modules/service-account-auto-secret-removed.adoc[leveloffset=+2] - -include::modules/service-accounts-creating.adoc[leveloffset=+1] - -// include::modules/service-accounts-using-credentials-inside-a-container.adoc[leveloffset=+1] diff --git a/backup_and_restore/application_backup_and_restore/_attributes b/backup_and_restore/application_backup_and_restore/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/backup_and_restore/application_backup_and_restore/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/aws-sts/_attributes b/backup_and_restore/application_backup_and_restore/aws-sts/_attributes deleted file mode 120000 index bf7c2529fdb4..000000000000 --- a/backup_and_restore/application_backup_and_restore/aws-sts/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../../_attributes/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/aws-sts/images b/backup_and_restore/application_backup_and_restore/aws-sts/images deleted file mode 120000 index 4399cbb3c0f3..000000000000 --- a/backup_and_restore/application_backup_and_restore/aws-sts/images +++ /dev/null @@ -1 +0,0 @@ -../../../images/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/aws-sts/modules b/backup_and_restore/application_backup_and_restore/aws-sts/modules deleted file mode 120000 index 5be29a99c161..000000000000 --- a/backup_and_restore/application_backup_and_restore/aws-sts/modules +++ /dev/null @@ -1 +0,0 @@ -../../../modules \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/aws-sts/oadp-aws-sts.adoc b/backup_and_restore/application_backup_and_restore/aws-sts/oadp-aws-sts.adoc deleted file mode 100644 index 3c8c5576e426..000000000000 --- a/backup_and_restore/application_backup_and_restore/aws-sts/oadp-aws-sts.adoc +++ /dev/null @@ -1,43 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="oadp-aws-sts"] -= Backing up applications on AWS STS using OADP -include::_attributes/common-attributes.adoc[] -:context: oadp-aws-sts-backing-up-applications - -toc::[] - -You install the {oadp-first} with {aws-first} by installing the OADP Operator. The Operator installs link:https://{velero-domain}/docs/v{velero-version}/[Velero {velero-version}]. - -include::snippets/oadp-mtc-operator.adoc[] - -You configure {aws-short} for Velero, create a default `Secret`, and then install the Data Protection Application. For more details, see xref:../../../backup_and_restore/application_backup_and_restore/installing/oadp-installing-operator.adoc#oadp-installing-operator-doc[Installing the OADP Operator]. - -To install the OADP Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. See xref:../../../disconnected/using-olm.adoc#olm-restricted-networks[Using Operator Lifecycle Manager in disconnected environments] for details. - -You can install {oadp-short} on an AWS {sts-first} (AWS STS) cluster manually. Amazon {aws-short} provides {aws-short} STS as a web service that enables you to request temporary, limited-privilege credentials for users. You use STS to provide trusted users with temporary access to resources via API calls, your {aws-short} console, or the {aws-short} command-line interface (CLI). - -Before installing {oadp-first}, you must set up role and policy credentials for {oadp-short} so that it can use the {aws-full} API. - -This process is performed in the following two stages: - -. Prepare {aws-short} credentials. -. Install the OADP Operator and give it an IAM role. - -include::modules/preparing-aws-sts-credentials-for-oadp.adoc[leveloffset=+1] - -include::modules/oadp-setting-resource-limits-and-requests.adoc[leveloffset=+2] - -include::modules/installing-oadp-aws-sts.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../../operators/user/olm-installing-operators-in-namespace.adoc#olm-installing-from-operatorhub-using-web-console_olm-installing-operators-in-namespace[Installing from OperatorHub using the web console] -* xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#backing-up-applications[Backing up applications] - -[id="oadp-aws-sts-backing-up-and-cleaning"] -== Backing up workload on OADP AWS STS, with an optional cleanup - -include::modules/performing-a-backup-oadp-aws-sts.adoc[leveloffset=+2] - -include::modules/cleanup-a-backup-oadp-aws-sts.adoc[leveloffset=+2] \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/aws-sts/snippets b/backup_and_restore/application_backup_and_restore/aws-sts/snippets deleted file mode 120000 index ce62fd7c41e2..000000000000 --- a/backup_and_restore/application_backup_and_restore/aws-sts/snippets +++ /dev/null @@ -1 +0,0 @@ -../../../snippets/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/_attributes b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/_attributes deleted file mode 120000 index bf7c2529fdb4..000000000000 --- a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../../_attributes/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc deleted file mode 100644 index 54b4437ce5f9..000000000000 --- a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc +++ /dev/null @@ -1,76 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="backing-up-applications"] -= Backing up applications -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: backing-up-applications - -toc::[] - -Frequent backups might consume storage on the backup storage location. Check the frequency of backups, retention time, and the amount of data of the persistent volumes (PVs) if using non-local backups, for example, S3 buckets. -Because all taken backup remains until expired, also check the time to live (TTL) setting of the schedule. - - -You can back up applications by creating a `Backup` custom resource (CR). For more information, see xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-creating-backup-cr.adoc#oadp-creating-backup-cr-doc[Creating a Backup CR]. - -ifdef::openshift-rosa,openshift-rosa-hcp[] -The `Backup` CR creates backup files for Kubernetes resources and internal images on S3 object storage. -endif::openshift-rosa,openshift-rosa-hcp[] - -ifndef::openshift-rosa,openshift-rosa-hcp[] -* The `Backup` CR creates backup files for Kubernetes resources and internal images on S3 object storage. -* If your cloud provider has a native snapshot API or supports CSI snapshots, the `Backup` CR backs up persistent volumes (PVs) by creating snapshots. For more information about working with CSI snapshots, see xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-backing-up-pvs-csi-doc.adoc#oadp-backing-up-pvs-csi-doc[Backing up persistent volumes with CSI snapshots]. - -For more information about CSI volume snapshots, see xref:../../../storage/container_storage_interface/persistent-storage-csi-snapshots.adoc#persistent-storage-csi-snapshots[CSI volume snapshots]. - -:FeatureName: The `CloudStorage` API, which automates the creation of a bucket for object storage, -include::snippets/technology-preview.adoc[] - -[NOTE] -==== -The `CloudStorage` API is a Technology Preview feature when you use a `CloudStorage` object and want OADP to use the `CloudStorage` API to automatically create an S3 bucket for use as a `BackupStorageLocation`. - -The `CloudStorage` API supports manually creating a `BackupStorageLocation` object by specifying an existing S3 bucket. The `CloudStorage` API that creates an S3 bucket automatically is currently only enabled for AWS S3 storage. -==== - -* If your cloud provider does not support snapshots or if your applications are on NFS data volumes, you can create backups by using Kopia or Restic. See xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-backing-up-applications-restic-doc.adoc#oadp-backing-up-applications-restic-doc[Backing up applications with File System Backup: Kopia or Restic]. - -include::snippets/pod-volume-restore-snapshot-read-only.adoc[] - -[IMPORTANT] -==== -The {oadp-first} does not support backing up volume snapshots that were created by other software. -==== -endif::openshift-rosa,openshift-rosa-hcp[] - -include::modules/oadp-review-backup-restore.adoc[leveloffset=+1] - -You can create backup hooks to run commands before or after the backup operation. See xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-creating-backup-hooks-doc.adoc#oadp-creating-backup-hooks-doc[Creating backup hooks]. - -You can schedule backups by creating a `Schedule` CR instead of a `Backup` CR. See xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-scheduling-backups-doc.adoc#oadp-scheduling-backups-doc[Scheduling backups using Schedule CR]. - -// include::modules/oadp-creating-backup-cr.adoc[leveloffset=+1] -// include::modules/oadp-backing-up-pvs-csi.adoc[leveloffset=+1] -// include::modules/oadp-backing-up-applications-restic.adoc[leveloffset=+1] - -[id="known-issues-backing-up-applications"] -== Known issues - -{product-title} {product-version} enforces a pod security admission (PSA) policy that can hinder the readiness of pods during a Restic restore process. - -This issue has been resolved in the OADP 1.1.6 and OADP 1.2.2 releases, therefore it is recommended that users upgrade to these releases. - -ifndef::openshift-rosa,openshift-rosa-hcp[] -For more information, see xref:../../../backup_and_restore/application_backup_and_restore/restic-issues.adoc#oadp-restic-restore-failing-psa-policy_restic-issues[Restic restore partially failing on OCP 4.15 due to changed PSA policy]. -endif::openshift-rosa,openshift-rosa-hcp[] - -// TODO: Add xrefs to ROSA HCP when Operators book is added. -ifndef::openshift-rosa-hcp[] -[role="_additional-resources"] -.Additional resources -* xref:../../../operators/admin/olm-adding-operators-to-cluster.adoc#olm-installing-operators-from-operatorhub_olm-adding-operators-to-a-cluster[Installing Operators on clusters for administrators] -// This xref is not included in the ROSA docs. -ifndef::openshift-rosa[] -* xref:../../../operators/user/olm-installing-operators-in-namespace.adoc#olm-installing-operators-in-namespace[Installing Operators in namespaces for non-administrators] -endif::openshift-rosa[] -endif::openshift-rosa-hcp[] diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/images b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/modules b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/modules deleted file mode 120000 index 8b0e8540076d..000000000000 --- a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-about-kopia.adoc b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-about-kopia.adoc deleted file mode 100644 index 61b3e0947f5b..000000000000 --- a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-about-kopia.adoc +++ /dev/null @@ -1,43 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="oadp-about-kopia"] -= About Kopia -include::_attributes/common-attributes.adoc[] -:context: oadp-about-kopia - -toc::[] - -Kopia is a fast and secure open-source backup and restore tool that allows you to create encrypted snapshots of your data and save the snapshots to remote or cloud storage of your choice. - -Kopia supports network and local storage locations, and many cloud or remote storage locations, including: - -* Amazon S3 and any cloud storage that is compatible with S3 -* Azure Blob Storage -* Google Cloud Storage platform - -Kopia uses content-addressable storage for snapshots: - -* Snapshots are always incremental; data that is already included in previous snapshots is not re-uploaded to the repository. A file is only uploaded to the repository again if it is modified. -* Stored data is deduplicated; if multiple copies of the same file exist, only one of them is stored. -* If files are moved or renamed, Kopia can recognize that they have the same content and does not upload them again. - - -[id="oadp-kopia-integration"] -== OADP integration with Kopia - -OADP 1.3 supports Kopia as the backup mechanism for pod volume backup in addition to Restic. You must choose one or the other at installation by setting the `uploaderType` field in the `DataProtectionApplication` custom resource (CR). The possible values are `restic` or `kopia`. If you do not specify an `uploaderType`, OADP 1.3 defaults to using Kopia as the backup mechanism. The data is written to and read from a unified repository. - -The following example shows a `DataProtectionApplication` CR configured for using Kopia: - -[source,yaml] ----- -apiVersion: oadp.openshift.io/v1alpha1 -kind: DataProtectionApplication -metadata: - name: dpa-sample -spec: - configuration: - nodeAgent: - enable: true - uploaderType: kopia -# ... ----- \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-backing-up-applications-restic-doc.adoc b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-backing-up-applications-restic-doc.adoc deleted file mode 100644 index e0547acbfdc6..000000000000 --- a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-backing-up-applications-restic-doc.adoc +++ /dev/null @@ -1,65 +0,0 @@ - -:_mod-docs-content-type: ASSEMBLY -[id="oadp-backing-up-applications-restic-doc"] -= Backing up applications with File System Backup: Kopia or Restic -include::_attributes/common-attributes.adoc[] -:context: backing-up-applications - -toc::[] - -You can use OADP to back up and restore Kubernetes volumes attached to pods from the file system of the volumes. This process is called File System Backup (FSB) or Pod Volume Backup (PVB). It is accomplished by using modules from the open source backup tools Restic or Kopia. - -If your cloud provider does not support snapshots or if your applications are on NFS data volumes, you can create backups by using FSB. - -[NOTE] -==== -link:https://restic.net/[Restic] is installed by the OADP Operator by default. If you prefer, you can install link:https://kopia.io/[Kopia] instead. -==== - -FSB integration with OADP provides a solution for backing up and restoring almost any type of Kubernetes volumes. This integration is an additional capability of OADP and is not a replacement for existing functionality. - -You back up Kubernetes resources, internal images, and persistent volumes with Kopia or Restic by editing the `Backup` custom resource (CR). - -You do not need to specify a snapshot location in the `DataProtectionApplication` CR. - -[NOTE] -==== -In OADP version 1.3 and later, you can use either Kopia or Restic for backing up applications. - -For the Built-in DataMover, you must use Kopia. - -In OADP version 1.2 and earlier, you can only use Restic for backing up applications. -==== - -[IMPORTANT] -==== -FSB does not support backing up `hostPath` volumes. For more information, see link:https://velero.io/docs/v1.12/file-system-backup/#limitations[FSB limitations]. -==== - -include::snippets/pod-volume-restore-snapshot-read-only.adoc[] - -.Prerequisites - -* You must install the OpenShift API for Data Protection (OADP) Operator. -* You must not disable the default `nodeAgent` installation by setting `spec.configuration.nodeAgent.enable` to `false` in the `DataProtectionApplication` CR. -* You must select Kopia or Restic as the uploader by setting `spec.configuration.nodeAgent.uploaderType` to `kopia` or `restic` in the `DataProtectionApplication` CR. -* The `DataProtectionApplication` CR must be in a `Ready` state. - -.Procedure - -* Create the `Backup` CR, as in the following example: -+ -[source,yaml] ----- -apiVersion: velero.io/v1 -kind: Backup -metadata: - name: - labels: - velero.io/storage-location: default - namespace: openshift-adp -spec: - defaultVolumesToFsBackup: true <1> -... ----- -<1> In OADP version 1.2 and later, add the `defaultVolumesToFsBackup: true` setting within the `spec` block. In OADP version 1.1, add `defaultVolumesToRestic: true`. diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-backing-up-pvs-csi-doc.adoc b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-backing-up-pvs-csi-doc.adoc deleted file mode 100644 index 445e0d497aac..000000000000 --- a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-backing-up-pvs-csi-doc.adoc +++ /dev/null @@ -1,42 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="oadp-backing-up-pvs-csi-doc"] -= Backing up persistent volumes with CSI snapshots -include::_attributes/common-attributes.adoc[] -:context: backing-up-applications - -toc::[] - -You back up persistent volumes with Container Storage Interface (CSI) snapshots by editing the `VolumeSnapshotClass` custom resource (CR) of the cloud storage before you create the `Backup` CR, see xref:../../../storage/container_storage_interface/persistent-storage-csi-snapshots.adoc#persistent-storage-csi-snapshots-overview_persistent-storage-csi-snapshots[CSI volume snapshots]. - -For more information, see xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-creating-backup-cr.adoc#oadp-creating-backup-cr-doc[Creating a Backup CR]. - -.Prerequisites - -* The cloud provider must support CSI snapshots. -* You must enable CSI in the `DataProtectionApplication` CR. - -.Procedure - -* Add the `metadata.labels.velero.io/csi-volumesnapshot-class: "true"` key-value pair to the `VolumeSnapshotClass` CR: -+ -.Example configuration file -[source,yaml,subs="attributes+"] ----- -apiVersion: snapshot.storage.k8s.io/v1 -kind: VolumeSnapshotClass -metadata: - name: - labels: - velero.io/csi-volumesnapshot-class: "true" <1> - annotations: - snapshot.storage.kubernetes.io/is-default-class: true <2> -driver: -deletionPolicy: <3> ----- -<1> Must be set to `true`. -<2> If you are restoring this volume in another cluster with the same driver, make sure that you set the `snapshot.storage.kubernetes.io/is-default-class` parameter to `false` instead of setting it to `true`. Otherwise, the restore will partially fail. -<3> OADP supports the `Retain` and `Delete` deletion policy types for CSI and Data Mover backup and restore. - -.Next steps - -* You can now create a `Backup` CR. diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-creating-backup-cr.adoc b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-creating-backup-cr.adoc deleted file mode 100644 index 1ac42155b18b..000000000000 --- a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-creating-backup-cr.adoc +++ /dev/null @@ -1,88 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc - -:_mod-docs-content-type: PROCEDURE -[id="oadp-creating-backup-cr-doc"] -= Creating a Backup CR -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: backing-up-applications - -toc::[] - -You back up Kubernetes resources, internal images, and persistent volumes (PVs) by creating a `Backup` custom resource (CR). - -.Prerequisites - -* You must install the OpenShift API for Data Protection (OADP) Operator. -* The `DataProtectionApplication` CR must be in a `Ready` state. -* Backup location prerequisites: -** You must have S3 object storage configured for Velero. -** You must have a backup location configured in the `DataProtectionApplication` CR. -* Snapshot location prerequisites: -** Your cloud provider must have a native snapshot API or support Container Storage Interface (CSI) snapshots. -** For CSI snapshots, you must create a `VolumeSnapshotClass` CR to register the CSI driver. -** You must have a volume location configured in the `DataProtectionApplication` CR. - -.Procedure - -. Retrieve the `backupStorageLocations` CRs by entering the following command: - -+ -[source,terminal] ----- -$ oc get backupstoragelocations.velero.io -n openshift-adp ----- -+ -.Example output -+ -[source,terminal] ----- -NAMESPACE NAME PHASE LAST VALIDATED AGE DEFAULT -openshift-adp velero-sample-1 Available 11s 31m ----- - -. Create a `Backup` CR, as in the following example: -+ -[source,yaml] ----- -apiVersion: velero.io/v1 -kind: Backup -metadata: - name: - labels: - velero.io/storage-location: default - namespace: openshift-adp -spec: - hooks: {} - includedNamespaces: - - <1> - includedResources: [] <2> - excludedResources: [] <3> - storageLocation: <4> - ttl: 720h0m0s - labelSelector: <5> - matchLabels: - app: - app: - app: - orLabelSelectors: <6> - - matchLabels: - app: - app: - app: ----- -<1> Specify an array of namespaces to back up. -<2> Optional: Specify an array of resources to include in the backup. Resources might be shortcuts (for example, 'po' for 'pods') or fully-qualified. If unspecified, all resources are included. -<3> Optional: Specify an array of resources to exclude from the backup. Resources might be shortcuts (for example, 'po' for 'pods') or fully-qualified. -<4> Specify the name of the `backupStorageLocations` CR. -<5> Map of {key,value} pairs of backup resources that have *all* the specified labels. -<6> Map of {key,value} pairs of backup resources that have *one or more* of the specified labels. - -. Verify that the status of the `Backup` CR is `Completed`: -+ -[source,terminal] ----- -$ oc get backups.velero.io -n openshift-adp -o jsonpath='{.status.phase}' ----- diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-creating-backup-hooks-doc.adoc b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-creating-backup-hooks-doc.adoc deleted file mode 100644 index dd98c51f3bfd..000000000000 --- a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-creating-backup-hooks-doc.adoc +++ /dev/null @@ -1,62 +0,0 @@ -:_mod-docs-content-type: PROCEDURE -[id="oadp-creating-backup-hooks-doc"] -= Creating backup hooks -include::_attributes/common-attributes.adoc[] -:context: backing-up-applications - -toc::[] - -When performing a backup, it is possible to specify one or more commands to execute in a container within a pod, based on the pod being backed up. - -The commands can be configured to performed before any custom action processing (_Pre_ hooks), or after all custom actions have been completed and any additional items specified by the custom action have been backed up (_Post_ hooks). - -You create backup hooks to run commands in a container in a pod by editing the `Backup` custom resource (CR). - -.Procedure - -* Add a hook to the `spec.hooks` block of the `Backup` CR, as in the following example: -+ -[source,yaml] ----- -apiVersion: velero.io/v1 -kind: Backup -metadata: - name: - namespace: openshift-adp -spec: - hooks: - resources: - - name: - includedNamespaces: - - <1> - excludedNamespaces: <2> - - - includedResources: [] - - pods <3> - excludedResources: [] <4> - labelSelector: <5> - matchLabels: - app: velero - component: server - pre: <6> - - exec: - container: <7> - command: - - /bin/uname <8> - - -a - onError: Fail <9> - timeout: 30s <10> - post: <11> -... ----- -<1> Optional: You can specify namespaces to which the hook applies. If this value is not specified, the hook applies to all namespaces. -<2> Optional: You can specify namespaces to which the hook does not apply. -<3> Currently, pods are the only supported resource that hooks can apply to. -<4> Optional: You can specify resources to which the hook does not apply. -<5> Optional: This hook only applies to objects matching the label. If this value is not specified, the hook applies to all objects. -<6> Array of hooks to run before the backup. -<7> Optional: If the container is not specified, the command runs in the first container in the pod. -<8> This is the entry point for the `init` container being added. -<9> Allowed values for error handling are `Fail` and `Continue`. The default is `Fail`. -<10> Optional: How long to wait for the commands to run. The default is `30s`. -<11> This block defines an array of hooks to run after the backup, with the same parameters as the pre-backup hooks. diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-deleting-backups.adoc b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-deleting-backups.adoc deleted file mode 100644 index 9615b4174040..000000000000 --- a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-deleting-backups.adoc +++ /dev/null @@ -1,24 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="oadp-deleting-backups"] -= Deleting backups -include::_attributes/common-attributes.adoc[] -:context: deleting-backups - -toc::[] - -You can delete a backup by creating the `DeleteBackupRequest` custom resource (CR) or by running the `velero backup delete` command as explained in the following procedures. - -The volume backup artifacts are deleted at different times depending on the backup method: - -* Restic: The artifacts are deleted in the next full maintenance cycle, after the backup is deleted. -* Container Storage Interface (CSI): The artifacts are deleted immediately when the backup is deleted. -* Kopia: The artifacts are deleted after three full maintenance cycles of the Kopia repository, after the backup is deleted. - -// delete using oc command -include::modules/oadp-deleting-backups-using-oc.adoc[leveloffset=+1] -// delete using velero -include::modules/oadp-deleting-backups-using-velero.adoc[leveloffset=+1] -// kopia repo maintenance -include::modules/oadp-about-kopia-repo-maintenance.adoc[leveloffset=+1] -// delete backup repository -include::modules/oadp-deleting-backup-repository.adoc[leveloffset=+1] diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-scheduling-backups-doc.adoc b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-scheduling-backups-doc.adoc deleted file mode 100644 index 135e8f5f7a3d..000000000000 --- a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-scheduling-backups-doc.adoc +++ /dev/null @@ -1,86 +0,0 @@ - -:_mod-docs-content-type: PROCEDURE -[id="oadp-scheduling-backups-doc"] -= Scheduling backups using Schedule CR -include::_attributes/common-attributes.adoc[] -:context: backing-up-applications - -toc::[] - -The schedule operation allows you to create a backup of your data at a particular time, specified by a Cron expression. - -You schedule backups by creating a `Schedule` custom resource (CR) instead of a `Backup` CR. - -[WARNING] -==== -Leave enough time in your backup schedule for a backup to finish before another backup is created. - -For example, if a backup of a namespace typically takes 10 minutes, do not schedule backups more frequently than every 15 minutes. -==== - -.Prerequisites - -* You must install the OpenShift API for Data Protection (OADP) Operator. -* The `DataProtectionApplication` CR must be in a `Ready` state. - -.Procedure - -. Retrieve the `backupStorageLocations` CRs: -+ -[source,terminal] ----- -$ oc get backupStorageLocations -n openshift-adp ----- -+ -.Example output -+ -[source,terminal] ----- -NAMESPACE NAME PHASE LAST VALIDATED AGE DEFAULT -openshift-adp velero-sample-1 Available 11s 31m ----- - -. Create a `Schedule` CR, as in the following example: -+ -[source,yaml] ----- -$ cat << EOF | oc apply -f - -apiVersion: velero.io/v1 -kind: Schedule -metadata: - name: - namespace: openshift-adp -spec: - schedule: 0 7 * * * <1> - template: - hooks: {} - includedNamespaces: - - <2> - storageLocation: <3> - defaultVolumesToFsBackup: true <4> - ttl: 720h0m0s -EOF ----- - -<1> `cron` expression to schedule the backup, for example, `0 7 * * *` to perform a backup every day at 7:00. -+ -[NOTE] -==== -To schedule a backup at specific intervals, enter the `` in the following format: -[source,terminal] ----- - schedule: "*/10 * * * *" ----- -Enter the minutes value between quotation marks (`" "`). -==== - -<2> Array of namespaces to back up. -<3> Name of the `backupStorageLocations` CR. -<4> Optional: In OADP version 1.2 and later, add the `defaultVolumesToFsBackup: true` key-value pair to your configuration when performing backups of volumes with Restic. In OADP version 1.1, add the `defaultVolumesToRestic: true` key-value pair when you back up volumes with Restic. - -. Verify that the status of the `Schedule` CR is `Completed` after the scheduled backup runs: -+ -[source,terminal] ----- -$ oc get schedule -n openshift-adp -o jsonpath='{.status.phase}' ----- \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc deleted file mode 100644 index 88d2c61b9a85..000000000000 --- a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc +++ /dev/null @@ -1,27 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="restoring-applications"] -= Restoring applications -include::_attributes/common-attributes.adoc[] -:context: restoring-applications - -toc::[] - -You restore application backups by creating a `Restore` custom resource (CR). See xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc#oadp-creating-restore-cr_restoring-applications[Creating a Restore CR]. - -You can create restore hooks to run commands in a container in a pod by editing the `Restore` CR. See xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc#oadp-creating-restore-hooks_restoring-applications[Creating restore hooks]. - -include::modules/oadp-review-backup-restore.adoc[leveloffset=+1] -include::modules/oadp-creating-restore-cr.adoc[leveloffset=+1] -include::modules/oadp-creating-restore-hooks.adoc[leveloffset=+1] -[IMPORTANT] -==== -include::snippets/oadp-image-stream-tag-trigger.adoc[leveloffset=+1] -==== - -//TODO: Add this xref to ROSA HCP when Images book is added. -ifndef::openshift-rosa-hcp[] -[role="_additional-resources"] -.Additional resources - -* xref:../../../openshift_images/triggering-updates-on-imagestream-changes.adoc#triggering-updates-on-imagestream-changes[Triggering updates on image stream changes] -endif::openshift-rosa-hcp[] diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/snippets b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/snippets deleted file mode 120000 index ce62fd7c41e2..000000000000 --- a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/snippets +++ /dev/null @@ -1 +0,0 @@ -../../../snippets/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/backup-and-restore-cr-issues.adoc b/backup_and_restore/application_backup_and_restore/backup-and-restore-cr-issues.adoc deleted file mode 100644 index a146acb1d8b4..000000000000 --- a/backup_and_restore/application_backup_and_restore/backup-and-restore-cr-issues.adoc +++ /dev/null @@ -1,97 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="backup-and-restore-cr-issues"] -= Backup and Restore CR issues -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: backup-and-restore-cr-issues -:namespace: openshift-adp -:local-product: OADP - -toc::[] - -You might encounter these common issues with `Backup` and `Restore` custom resources (CRs). - -[id="backup-cannot-retrieve-volume_{context}"] -== Backup CR cannot retrieve volume - -The `Backup` CR displays the following error message: `InvalidVolume.NotFound: The volume ‘vol-xxxx’ does not exist`. - -.Cause - -The persistent volume (PV) and the snapshot locations are in different regions. - -.Solution - -. Edit the value of the `spec.snapshotLocations.velero.config.region` key in the `DataProtectionApplication` manifest so that the snapshot location is in the same region as the PV. -. Create a new `Backup` CR. - -[id="backup-cr-remains-in-progress_{context}"] -== Backup CR status remains in progress - -The status of a `Backup` CR remains in the `InProgress` phase and does not complete. - -.Cause - -If a backup is interrupted, it cannot be resumed. - -.Solution - -. Retrieve the details of the `Backup` CR by running the following command: -+ -[source,terminal] ----- -$ oc -n {namespace} exec deployment/velero -c velero -- ./velero \ - backup describe ----- - -. Delete the `Backup` CR by running the following command: -+ -[source,terminal] ----- -$ oc delete backups.velero.io -n openshift-adp ----- -+ -You do not need to clean up the backup location because an in progress `Backup` CR has not uploaded files to object storage. - -. Create a new `Backup` CR. - -. View the Velero backup details by running the following command: -+ -[source,terminal, subs="+quotes"] ----- -$ velero backup describe __ --details ----- - -[id="backup-cr-remains-partiallyfailed_{context}"] -== Backup CR status remains in PartiallyFailed - -The status of a `Backup` CR without Restic in use remains in the `PartiallyFailed` phase and is not completed. A snapshot of the affiliated PVC is not created. - -.Cause - -If the backup created based on the CSI snapshot class is missing a label, the CSI snapshot plugin fails to create a snapshot. As a result, the `Velero` pod logs an error similar to the following message: - -[source,text] ----- -time="2023-02-17T16:33:13Z" level=error msg="Error backing up item" backup=openshift-adp/user1-backup-check5 error="error executing custom action (groupResource=persistentvolumeclaims, namespace=busy1, name=pvc1-user1): rpc error: code = Unknown desc = failed to get volumesnapshotclass for storageclass ocs-storagecluster-ceph-rbd: failed to get volumesnapshotclass for provisioner openshift-storage.rbd.csi.ceph.com, ensure that the desired volumesnapshot class has the velero.io/csi-volumesnapshot-class label" logSource="/remote-source/velero/app/pkg/backup/backup.go:417" name=busybox-79799557b5-vprq ----- - -.Solution - -. Delete the `Backup` CR by running the following command:: -+ -[source,terminal] ----- -$ oc delete backups.velero.io -n openshift-adp ----- - -. If required, clean up the stored data on the `BackupStorageLocation` to free up space. - -. Apply the label `velero.io/csi-volumesnapshot-class=true` to the `VolumeSnapshotClass` object by running the following command: -+ -[source,terminal] ----- -$ oc label volumesnapshotclass/ velero.io/csi-volumesnapshot-class=true ----- - -. Create a new `Backup` CR. \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/images b/backup_and_restore/application_backup_and_restore/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/backup_and_restore/application_backup_and_restore/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/installing/_attributes b/backup_and_restore/application_backup_and_restore/installing/_attributes deleted file mode 120000 index bf7c2529fdb4..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../../_attributes/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/installing/about-installing-oadp.adoc b/backup_and_restore/application_backup_and_restore/installing/about-installing-oadp.adoc deleted file mode 100644 index 3b936c1594d9..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/about-installing-oadp.adoc +++ /dev/null @@ -1,72 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="about-installing-oadp"] -= About installing OADP -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: about-installing-oadp - -toc::[] - -As a cluster administrator, you install the OpenShift API for Data Protection (OADP) by installing the OADP Operator. The OADP Operator installs link:https://{velero-domain}/docs/v{velero-version}/[Velero {velero-version}]. - -include::snippets/oadp-mtc-operator.adoc[] - -To back up Kubernetes resources and internal images, you must have object storage as a backup location, such as one of the following storage types: - -* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc#installing-oadp-aws[Amazon Web Services] -* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc#installing-oadp-azure[Microsoft Azure] -* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc#installing-oadp-gcp[Google Cloud Platform] -* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc#installing-oadp-mcg[Multicloud Object Gateway] -* {ibm-cloud-name} Object Storage S3 -* AWS S3 compatible object storage, such as Multicloud Object Gateway or MinIO - -You can configure multiple backup storage locations within the same namespace for each individual OADP deployment. - -include::snippets/snip-noobaa-and-mcg.adoc[] - -:FeatureName: The `CloudStorage` API, which automates the creation of a bucket for object storage, -include::snippets/technology-preview.adoc[] - -[NOTE] -==== -The `CloudStorage` API is a Technology Preview feature when you use a `CloudStorage` object and want OADP to use the `CloudStorage` API to automatically create an S3 bucket for use as a `BackupStorageLocation`. - -The `CloudStorage` API supports manually creating a `BackupStorageLocation` object by specifying an existing S3 bucket. The `CloudStorage` API that creates an S3 bucket automatically is currently only enabled for AWS S3 storage. -==== - -You can back up persistent volumes (PVs) by using snapshots or a File System Backup (FSB). - -To back up PVs with snapshots, you must have a cloud provider that supports either a native snapshot API or Container Storage Interface (CSI) snapshots, such as one of the following cloud providers: - -* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc#installing-oadp-aws[Amazon Web Services] -* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc#installing-oadp-azure[Microsoft Azure] -* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc#installing-oadp-gcp[Google Cloud Platform] -* CSI snapshot-enabled cloud provider, such as xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-ocs.adoc#installing-oadp-ocs[OpenShift Data Foundation] - -include::snippets/oadp-ocp-compat.adoc[] - -If your cloud provider does not support snapshots or if your storage is NFS, you can back up applications with xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-backing-up-applications-restic-doc.adoc#backing-up-applications[Backing up applications with File System Backup: Kopia or Restic] on object storage. - -You create a default `Secret` and then you install the Data Protection Application. - -include::modules/oadp-s3-compatible-backup-storage-providers.adoc[leveloffset=+1] - -include::modules/oadp-configuring-noobaa-for-dr.adoc[leveloffset=+1] - -[discrete] -[role="_additional-resources"] -.Additional resources - -* link:https://{velero-domain}/docs/v{velero-version}/locations/[Overview of backup and snapshot locations in the Velero documentation] - -include::modules/about-oadp-update-channels.adoc[leveloffset=+1] -include::modules/about-installing-oadp-on-multiple-namespaces.adoc[leveloffset=+1] -include::modules/oadp-support-backup-data-immutability.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../../operators/understanding/olm/olm-understanding-olm.adoc#olm-csv_olm-understanding-olm[Cluster service version] - -include::modules/oadp-velero-cpu-memory-requirements.adoc[leveloffset=+1] -include::modules/oadp-backup-restore-for-large-usage.adoc[leveloffset=+2] diff --git a/backup_and_restore/application_backup_and_restore/installing/about-oadp-data-mover.adoc b/backup_and_restore/application_backup_and_restore/installing/about-oadp-data-mover.adoc deleted file mode 100644 index d07cd4e5243a..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/about-oadp-data-mover.adoc +++ /dev/null @@ -1,83 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="about-oadp-data-mover"] -= About the {oadp-short} Data Mover -include::_attributes/common-attributes.adoc[] -:context: about-oadp-data-mover - -toc::[] - -{oadp-first} includes a built-in Data Mover that you can use to move Container Storage Interface (CSI) volume snapshots to a remote object store. The built-in Data Mover allows you to restore stateful applications from the remote object store if a failure, accidental deletion, or corruption of the cluster occurs. It uses xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-about-kopia.adoc#oadp-about-kopia[Kopia] as the uploader mechanism to read the snapshot data and write to the unified repository. - -{oadp-short} supports CSI snapshots on the following: - -* {odf-full} -* Any other cloud storage provider with the Container Storage Interface (CSI) driver that supports the Kubernetes Volume Snapshot API - -// ** suggest removing the admonition and replacing with a clear section ** -// -// [IMPORTANT] -// ==== -// The {oadp-short} built-in Data Mover, which was introduced in {oadp-short} 1.3 as a Technology Preview, is now fully supported for both containerized and virtual machine workloads. -// ==== - -[id="oadp-data-mover-support_{context}"] -== Data Mover support - -The {oadp-short} built-in Data Mover, which was introduced in {oadp-short} 1.3 as a Technology Preview, is now fully supported for both containerized and virtual machine workloads. - -.Supported - -The Data Mover backups taken with {oadp-short} 1.3 can be restored using {oadp-short} 1.3, 1.4, and later. This is supported. - -.Not supported - -Backups taken with {oadp-short} 1.1 or {oadp-short} 1.2 using the Data Mover feature cannot be restored using {oadp-short} 1.3 and later. Therefore, it is not supported. - -{oadp-short} 1.1 and {oadp-short} 1.2 are no longer supported. The DataMover feature in {oadp-short} 1.1 or {oadp-short} 1.2 was a Technology Preview and was never supported. DataMover backups taken with {oadp-short} 1.1 or {oadp-short} 1.2 cannot be restored on later versions of {oadp-short}. - - -[id="enabling-oadp-data-mover_{context}"] -== Enabling the built-in Data Mover - -To enable the built-in Data Mover, you must include the CSI plugin and enable the node agent in the `DataProtectionApplication` custom resource (CR). The node agent is a Kubernetes daemonset that hosts data movement modules. These include the Data Mover controller, uploader, and the repository. - -.Example `DataProtectionApplication` manifest -[source,yaml] ----- -apiVersion: oadp.openshift.io/v1alpha1 -kind: DataProtectionApplication -metadata: - name: dpa-sample -spec: - configuration: - nodeAgent: - enable: true <1> - uploaderType: kopia <2> - velero: - defaultPlugins: - - openshift - - aws - - csi <3> - defaultSnapshotMoveData: true - defaultVolumesToFSBackup: <4> - featureFlags: - - EnableCSI -# ... ----- -<1> The flag to enable the node agent. -<2> The type of uploader. The possible values are `restic` or `kopia`. The built-in Data Mover uses Kopia as the default uploader mechanism regardless of the value of the `uploaderType` field. -<3> The CSI plugin included in the list of default plugins. -<4> In {oadp-short} 1.3.1 and later, set to `true` if you use Data Mover only for volumes that opt out of `fs-backup`. Set to `false` if you use Data Mover by default for volumes. - -[id="built-in-data-mover-crs"] -== Built-in Data Mover controller and custom resource definitions (CRDs) - -The built-in Data Mover feature introduces three new API objects defined as CRDs for managing backup and restore: - -* `DataDownload`: Represents a data download of a volume snapshot. The CSI plugin creates one `DataDownload` object per volume to be restored. The `DataDownload` CR includes information about the target volume, the specified Data Mover, the progress of the current data download, the specified backup repository, and the result of the current data download after the process is complete. - -* `DataUpload`: Represents a data upload of a volume snapshot. The CSI plugin creates one `DataUpload` object per CSI snapshot. The `DataUpload` CR includes information about the specified snapshot, the specified Data Mover, the specified backup repository, the progress of the current data upload, and the result of the current data upload after the process is complete. - -* `BackupRepository`: Represents and manages the lifecycle of the backup repositories. {oadp-short} creates a backup repository per namespace when the first CSI snapshot backup or restore for a namespace is requested. - -include::modules/oadp-incremental-backup-support.adoc[leveloffset=+1] diff --git a/backup_and_restore/application_backup_and_restore/installing/configuring-oadp-multiple-bsl.adoc b/backup_and_restore/application_backup_and_restore/installing/configuring-oadp-multiple-bsl.adoc deleted file mode 100644 index a6038251707b..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/configuring-oadp-multiple-bsl.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="configuring-oadp-multiple-bsl"] -= Configuring the {oadp-first} with more than one Backup Storage Location -include::_attributes/common-attributes.adoc[] -:context: configuring-oadp-multiple-bsl -:configuring-oadp-multiple-bsl: - - -toc::[] - - -You can configure one or more backup storage locations (BSLs) in the Data Protection Application (DPA). You can also select the location to store the backup in when you create the backup. With this configuration, you can store your backups in the following ways: - -* To different regions -* To a different storage provider - -{oadp-short} supports multiple credentials for configuring more than one BSL, so that you can specify the credentials to use with any BSL. - -// module for configuring the DPA with multiple BSLs. -include::modules/oadp-configuring-dpa-multiple-bsl.adoc[leveloffset=+1] -// module for multiple BSL use case. -include::modules/oadp-multiple-bsl-use-case.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc#oadp-secrets-for-different-credentials_installing-oadp-aws[Creating profiles for different credentials] - -:!configuring-oadp-multiple-bsl: diff --git a/backup_and_restore/application_backup_and_restore/installing/configuring-oadp-multiple-vsl.adoc b/backup_and_restore/application_backup_and_restore/installing/configuring-oadp-multiple-vsl.adoc deleted file mode 100644 index 03dd3427ffb1..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/configuring-oadp-multiple-vsl.adoc +++ /dev/null @@ -1,17 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="configuring-oadp-multiple-vsl"] -= Configuring the {oadp-first} with more than one Volume Snapshot Location -include::_attributes/common-attributes.adoc[] -:context: configuring-oadp-multiple-vsl -:configuring-oadp-multiple-vsl: - -toc::[] - - -You can configure one or more Volume Snapshot Locations (VSLs) to store the snapshots in different cloud provider regions. - -// module for configuring the DPA with multiple VSLs. -include::modules/oadp-configuring-dpa-multiple-vsl.adoc[leveloffset=+1] - - -:!configuring-oadp-multiple-vsl: diff --git a/backup_and_restore/application_backup_and_restore/installing/data-mover-intro.adoc b/backup_and_restore/application_backup_and_restore/installing/data-mover-intro.adoc deleted file mode 100644 index 38a88f9dc4b7..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/data-mover-intro.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_mod-docs-content-type: CONCEPT -[id="oadp-data-mover-intro"] -= OADP Data Mover Introduction -include::_attributes/common-attributes.adoc[] -:context: data-mover - -toc::[] - -OADP Data Mover allows you to restore stateful applications from the store if a failure, accidental deletion, or corruption of the cluster occurs. - -:FeatureName: The OADP 1.2 Data Mover -include::snippets/technology-preview.adoc[leveloffset=+1] - -* You can use OADP Data Mover to back up Container Storage Interface (CSI) volume snapshots to a remote object store. See xref:../../../backup_and_restore/application_backup_and_restore/installing/oadp-using-data-mover-for-csi-snapshots-doc.adoc#oadp-using-data-mover-for-csi-snapshots-doc[Using Data Mover for CSI snapshots]. - -* You can use OADP 1.2 Data Mover to back up and restore application data for clusters that use CephFS, CephRBD, or both. See xref:../../../backup_and_restore/application_backup_and_restore/installing/oadp-using-data-mover-for-csi-snapshots-doc.adoc#oadp-using-data-mover-for-csi-snapshots-doc[Using OADP 1.2 Data Mover with Ceph storage]. - -include::snippets/snip-post-mig-hook[] - -[id="oadp-data-mover-prerequisites"] -== OADP Data Mover prerequisites - -* You have a stateful application running in a separate namespace. - -* You have installed the OADP Operator by using Operator Lifecycle Manager (OLM). - -* You have created an appropriate `VolumeSnapshotClass` and `StorageClass`. - -* You have installed the VolSync operator using OLM. diff --git a/backup_and_restore/application_backup_and_restore/installing/images b/backup_and_restore/application_backup_and_restore/installing/images deleted file mode 120000 index 4399cbb3c0f3..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/images +++ /dev/null @@ -1 +0,0 @@ -../../../images/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc deleted file mode 100644 index 18bcafe9caf1..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc +++ /dev/null @@ -1,66 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="installing-oadp-aws"] -= Configuring the OpenShift API for Data Protection with AWS S3 compatible storage -include::_attributes/common-attributes.adoc[] -:context: installing-oadp-aws -:installing-oadp-aws: -:credentials: cloud-credentials -:provider: aws - -toc::[] - -You install the OpenShift API for Data Protection (OADP) with Amazon Web Services (AWS) S3 compatible storage by installing the OADP Operator. The Operator installs link:https://{velero-domain}/docs/v{velero-version}/[Velero {velero-version}]. - -include::snippets/oadp-mtc-operator.adoc[] - -You configure AWS for Velero, create a default `Secret`, and then install the Data Protection Application. For more details, see xref:../../../backup_and_restore/application_backup_and_restore/installing/oadp-installing-operator.adoc#oadp-installing-operator-doc[Installing the OADP Operator]. - -To install the OADP Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. See xref:../../../disconnected/using-olm.adoc#olm-restricted-networks[Using Operator Lifecycle Manager in disconnected environments] for details. - -include::modules/oadp-s3-and-gov-cloud.adoc[leveloffset=+1] - -//include::modules/oadp-installing-operator.adoc[leveloffset=+1] -include::modules/migration-configuring-aws-s3.adoc[leveloffset=+1] -include::modules/oadp-about-backup-snapshot-locations-secrets.adoc[leveloffset=+1] -include::modules/oadp-creating-default-secret.adoc[leveloffset=+2] -include::modules/oadp-secrets-for-different-credentials.adoc[leveloffset=+2] -include::modules/oadp-configuring-aws-bsl.adoc[leveloffset=+2] -include::modules/oadp-ssec-encrypted-backups.adoc[leveloffset=+2] - -[role="_additional-resources_1"] -.Additional resources - -You can also download the file with the additional encryption key backed up with Velero by running a different command. See xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc#oadp-ssec-encrypted-backups-velero_installing-oadp-aws[Downloading a file with an SSE-C encryption key for files backed up by Velero]. - -include::modules/oadp-ssec-encrypted-backups-velero.adoc[leveloffset=+3] - -[id="configuring-dpa-aws"] -== Configuring the Data Protection Application - -You can configure the Data Protection Application by setting Velero resource allocations or enabling self-signed CA certificates. - -include::modules/oadp-setting-resource-limits-and-requests.adoc[leveloffset=+2] - -include::snippets/oadp-nodeselector-snippet.adoc[] - -For more details, see xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc#oadp-configuring-node-agents_installing-oadp-aws[Configuring node agents and node labels]. - -include::modules/oadp-self-signed-certificate.adoc[leveloffset=+2] - -// include::modules/oadp-installing-dpa-1-2-and-earlier.adoc[leveloffset=+1] -include::modules/oadp-installing-dpa-1-3.adoc[leveloffset=+1] -include::modules/oadp-configuring-node-agents.adoc[leveloffset=+2] -include::modules/oadp-configuring-aws-md5sum.adoc[leveloffset=+1] -include::modules/oadp-configuring-client-burst-qps.adoc[leveloffset=+1] -include::modules/oadp-configuring-imagepullpolicy.adoc[leveloffset=+1] -include::modules/oadp-configuring-dpa-multiple-bsl.adoc[leveloffset=+1] -include::modules/oadp-enabling-csi-dpa.adoc[leveloffset=+2] -include::modules/oadp-about-disable-node-agent-dpa.adoc[leveloffset=+2] - -[role="_additional-resources_2"] -.Additional resources - -* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-kubevirt.adoc#oadp-installing-dpa_installing-oadp-kubevirt[Installing the Data Protection Application with the `kubevirt` and `openshift` plugins] -* xref:../../../nodes/jobs/nodes-nodes-jobs.adoc#nodes-nodes-jobs[Running tasks in pods using jobs]. - -:!installing-oadp-aws: diff --git a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc deleted file mode 100644 index d6f3137bc6d2..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc +++ /dev/null @@ -1,55 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="installing-oadp-azure"] -= Configuring the OpenShift API for Data Protection with Microsoft Azure -include::_attributes/common-attributes.adoc[] -:context: installing-oadp-azure -:installing-oadp-azure: -:credentials: cloud-credentials-azure -:provider: azure - -toc::[] - -You install the OpenShift API for Data Protection (OADP) with Microsoft Azure by installing the OADP Operator. The Operator installs link:https://{velero-domain}/docs/v{velero-version}/[Velero {velero-version}]. - -include::snippets/oadp-mtc-operator.adoc[] - -You configure Azure for Velero, create a default `Secret`, and then install the Data Protection Application. For more details, see xref:../../../backup_and_restore/application_backup_and_restore/installing/oadp-installing-operator.adoc#oadp-installing-operator-doc[Installing the OADP Operator]. - -To install the OADP Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. See xref:../../../disconnected/using-olm.adoc#olm-restricted-networks[Using Operator Lifecycle Manager in disconnected environments] for details. - -// include::modules/oadp-installing-operator.adoc[leveloffset=+1] -include::modules/migration-configuring-azure.adoc[leveloffset=+1] -include::modules/oadp-about-backup-snapshot-locations-secrets.adoc[leveloffset=+1] -include::modules/oadp-creating-default-secret.adoc[leveloffset=+2] -include::modules/oadp-secrets-for-different-credentials.adoc[leveloffset=+2] - -[id="configuring-dpa-azure"] -== Configuring the Data Protection Application - -You can configure the Data Protection Application by setting Velero resource allocations or enabling self-signed CA certificates. - -include::modules/oadp-setting-resource-limits-and-requests.adoc[leveloffset=+2] - -include::snippets/oadp-nodeselector-snippet.adoc[] - -For more details, see xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc#oadp-configuring-node-agents_installing-oadp-azure[Configuring node agents and node labels]. - -include::modules/oadp-self-signed-certificate.adoc[leveloffset=+2] - -// include::modules/oadp-installing-dpa-1-2-and-earlier.adoc[leveloffset=+1] -include::modules/oadp-installing-dpa-1-3.adoc[leveloffset=+1] -include::modules/oadp-configuring-client-burst-qps.adoc[leveloffset=+1] -include::modules/oadp-configuring-imagepullpolicy.adoc[leveloffset=+1] -include::modules/oadp-configuring-node-agents.adoc[leveloffset=+2] -include::modules/oadp-enabling-csi-dpa.adoc[leveloffset=+2] -include::modules/oadp-about-disable-node-agent-dpa.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-kubevirt.adoc#oadp-installing-dpa_installing-oadp-kubevirt[Installing the Data Protection Application with the `kubevirt` and `openshift` plugins] -* xref:../../../nodes/jobs/nodes-nodes-jobs.adoc#nodes-nodes-jobs[Running tasks in pods using jobs]. - -* xref:../../../backup_and_restore/application_backup_and_restore/installing/configuring-oadp-multiple-bsl.adoc#configuring-oadp-multiple-bsl[Configuring the {oadp-first} with multiple backup storage locations] - -:installing-oadp-azure!: diff --git a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc deleted file mode 100644 index cabd605f5903..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc +++ /dev/null @@ -1,56 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="installing-oadp-gcp"] -= Configuring the OpenShift API for Data Protection with Google Cloud Platform -include::_attributes/common-attributes.adoc[] -:context: installing-oadp-gcp -:installing-oadp-gcp: -:credentials: cloud-credentials-gcp -:provider: gcp - -toc::[] - -You install the OpenShift API for Data Protection (OADP) with Google Cloud Platform (GCP) by installing the OADP Operator. The Operator installs link:https://{velero-domain}/docs/v{velero-version}/[Velero {velero-version}]. - -include::snippets/oadp-mtc-operator.adoc[] - -You configure GCP for Velero, create a default `Secret`, and then install the Data Protection Application. For more details, see xref:../../../backup_and_restore/application_backup_and_restore/installing/oadp-installing-operator.adoc#oadp-installing-operator-doc[Installing the OADP Operator]. - -To install the OADP Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. See xref:../../../disconnected/using-olm.adoc#olm-restricted-networks[Using Operator Lifecycle Manager in disconnected environments] for details. - -//include::modules/oadp-installing-operator.adoc[leveloffset=+1] -include::modules/migration-configuring-gcp.adoc[leveloffset=+1] -include::modules/oadp-about-backup-snapshot-locations-secrets.adoc[leveloffset=+1] -include::modules/oadp-creating-default-secret.adoc[leveloffset=+2] -include::modules/oadp-secrets-for-different-credentials.adoc[leveloffset=+2] - -[id="configuring-dpa-gcp"] -== Configuring the Data Protection Application - -You can configure the Data Protection Application by setting Velero resource allocations or enabling self-signed CA certificates. - -include::modules/oadp-setting-resource-limits-and-requests.adoc[leveloffset=+2] - -include::snippets/oadp-nodeselector-snippet.adoc[] - -For more details, see xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc#oadp-configuring-node-agents_installing-oadp-gcp[Configuring node agents and node labels]. - -include::modules/oadp-self-signed-certificate.adoc[leveloffset=+2] - -// include::modules/oadp-installing-dpa-1-2-and-earlier.adoc[leveloffset=+1] -include::modules/oadp-gcp-wif-cloud-authentication.adoc[leveloffset=+1] -include::modules/oadp-installing-dpa-1-3.adoc[leveloffset=+1] -include::modules/oadp-configuring-client-burst-qps.adoc[leveloffset=+1] -include::modules/oadp-configuring-imagepullpolicy.adoc[leveloffset=+1] -include::modules/oadp-configuring-node-agents.adoc[leveloffset=+2] -include::modules/oadp-enabling-csi-dpa.adoc[leveloffset=+2] -include::modules/oadp-about-disable-node-agent-dpa.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-kubevirt.adoc#oadp-installing-dpa_installing-oadp-kubevirt[Installing the Data Protection Application with the `kubevirt` and `openshift` plugins] -* xref:../../../nodes/jobs/nodes-nodes-jobs.adoc#nodes-nodes-jobs[Running tasks in pods using jobs]. - -* xref:../../../backup_and_restore/application_backup_and_restore/installing/configuring-oadp-multiple-bsl.adoc#configuring-oadp-multiple-bsl[Configuring the {oadp-first} with multiple backup storage locations] - -:installing-oadp-gcp!: diff --git a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-ibm-cloud.adoc b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-ibm-cloud.adoc deleted file mode 100644 index e2e5348ec22c..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-ibm-cloud.adoc +++ /dev/null @@ -1,36 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="installing-oadp-ibm-cloud"] -= Configuring the {oadp-full} with {ibm-cloud-title} -include::_attributes/common-attributes.adoc[] -:context: installing-oadp-ibm-cloud -:installing-oadp-ibm-cloud: -:credentials: cloud-credentials - - -toc::[] - -You install the {oadp-first} Operator on an {ibm-cloud-title} cluster to back up and restore applications on the cluster. You configure {ibm-cloud-object-storage} to store the backups. - -// configuring the IBM COS instance -include::modules/configuring-ibm-cos.adoc[leveloffset=+1] -// include the module for creating default secret -include::modules/oadp-creating-default-secret.adoc[leveloffset=+1] -// include the module for creating custom secret -include::modules/oadp-secrets-for-different-credentials.adoc[leveloffset=+1] -// include the DPA module -include::modules/oadp-installing-dpa-1-3.adoc[leveloffset=+1] -// include the module for setting Velero CPU and memory resource allocations -include::modules/oadp-setting-resource-limits-and-requests.adoc[leveloffset=+1] -// include the node agent config module -include::modules/oadp-configuring-node-agents.adoc[leveloffset=+1] -// include the module for client burst and qps config -include::modules/oadp-configuring-client-burst-qps.adoc[leveloffset=+1] -// include module for image pull policy setting -include::modules/oadp-configuring-imagepullpolicy.adoc[leveloffset=+1] -// include the module for configuring multiple BSL -include::modules/oadp-configuring-dpa-multiple-bsl.adoc[leveloffset=+1] -// include the module for disabling node agent in the DPA -include::modules/oadp-about-disable-node-agent-dpa.adoc[leveloffset=+1] - -:!installing-oadp-ibm-cloud: - diff --git a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-kubevirt.adoc b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-kubevirt.adoc deleted file mode 100644 index 8fe2f70adff1..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-kubevirt.adoc +++ /dev/null @@ -1,63 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="installing-oadp-kubevirt"] - -= Configuring the {oadp-full} with {VirtProductName} -include::_attributes/common-attributes.adoc[] -:context: installing-oadp-kubevirt -:installing-oadp-kubevirt: -:credentials: cloud-credentials -:provider: gcp - -toc::[] - -You can install the {oadp-first} with {VirtProductName} by installing the OADP Operator and configuring a backup location. Then, you can install the Data Protection Application. - -Back up and restore virtual machines by using the xref:../../../backup_and_restore/index.adoc#application-backup-restore-operations-overview[{oadp-full}]. - -[NOTE] -==== -{oadp-full} with {VirtProductName} supports the following backup and restore storage options: - -* Container Storage Interface (CSI) backups - -* Container Storage Interface (CSI) backups with DataMover - -The following storage options are excluded: - -* File system backup and restore - -* Volume snapshot backups and restores - -For more information, see xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-backing-up-applications-restic-doc.adoc#oadp-backing-up-applications-restic-doc[Backing up applications with File System Backup: Kopia or Restic]. -==== -To install the OADP Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. See xref:../../../disconnected/using-olm.adoc#olm-restricted-networks[Using Operator Lifecycle Manager in disconnected environments] for details. - -include::modules/install-and-configure-oadp-kubevirt.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../../backup_and_restore/application_backup_and_restore/oadp-features-plugins.adoc#oadp-plugins_oadp-features-plugins[{oadp-short} plugins] -* xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#backing-up-applications[`Backup` custom resource (CR)] -* xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc#restoring-applications[`Restore` CR] -* xref:../../../disconnected/using-olm.adoc#olm-restricted-networks[Using Operator Lifecycle Manager in disconnected environments] - -include::modules/oadp-installing-dpa-1-3.adoc[leveloffset=+1] -include::snippets/oadp-windows-vm-backup-fails.adoc[] -include::modules/oadp-backup-single-vm.adoc[leveloffset=+1] -include::modules/oadp-restore-single-vm.adoc[leveloffset=+1] -include::modules/oadp-restore-single-vm-from-multiple-vm-backup.adoc[leveloffset=+1] -include::modules/oadp-configuring-client-burst-qps.adoc[leveloffset=+1] -include::modules/oadp-configuring-imagepullpolicy.adoc[leveloffset=+1] -include::modules/oadp-configuring-node-agents.adoc[leveloffset=+2] -include::modules/oadp-incremental-backup-support.adoc[leveloffset=+1] - -[IMPORTANT] -==== -Red Hat only supports the combination of {oadp-short} versions 1.3.0 and later, and {VirtProductName} versions 4.14 and later. - -{oadp-short} versions before 1.3.0 are not supported for back up and restore of {VirtProductName}. -==== - - -:!installing-oadp-kubevirt: diff --git a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc deleted file mode 100644 index ab740c626c07..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc +++ /dev/null @@ -1,63 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="installing-oadp-mcg"] -= Configuring the OpenShift API for Data Protection with Multicloud Object Gateway -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: installing-oadp-mcg -:installing-oadp-mcg: -:credentials: cloud-credentials -:provider: aws - -toc::[] - -Multicloud Object Gateway (MCG) is a component of {rh-storage}, and you can configure it as a backup location in the `DataProtectionApplication` custom resource (CR). -You can install the {oadp-first} with MCG by installing the OADP Operator. The Operator installs link:https://{velero-domain}/docs/v{velero-version}/[Velero {velero-version}]. - -include::snippets/oadp-mtc-operator.adoc[] - -:FeatureName: The `CloudStorage` API, which automates the creation of a bucket for object storage, -include::snippets/technology-preview.adoc[] - -You can create a `Secret` CR for the backup location and install the Data Protection Application. For more details, see xref:../../../backup_and_restore/application_backup_and_restore/installing/oadp-installing-operator.adoc#oadp-installing-operator-doc[Installing the OADP Operator]. - -To install the OADP Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. For details, see xref:../../../disconnected/using-olm.adoc#olm-restricted-networks[Using Operator Lifecycle Manager in disconnected environments]. - -//include::modules/oadp-installing-operator.adoc[leveloffset=+1] -include::modules/migration-configuring-mcg.adoc[leveloffset=+1] -include::modules/oadp-about-backup-snapshot-locations-secrets.adoc[leveloffset=+1] -include::modules/oadp-creating-default-secret.adoc[leveloffset=+2] -include::modules/oadp-secrets-for-different-credentials.adoc[leveloffset=+2] - -[id="configuring-dpa-mcg"] -== Configuring the Data Protection Application - -You can configure the Data Protection Application by setting Velero resource allocations or enabling self-signed CA certificates. - -include::modules/oadp-setting-resource-limits-and-requests.adoc[leveloffset=+2] - -include::snippets/oadp-nodeselector-snippet.adoc[] - -For more details, see xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc#oadp-configuring-node-agents_installing-oadp-mcg[Configuring node agents and node labels]. - -include::modules/oadp-self-signed-certificate.adoc[leveloffset=+2] - -// include::modules/oadp-installing-dpa-1-2-and-earlier.adoc[leveloffset=+1] -include::modules/oadp-installing-dpa-1-3.adoc[leveloffset=+1] -include::modules/oadp-configuring-client-burst-qps.adoc[leveloffset=+1] -include::modules/oadp-configuring-imagepullpolicy.adoc[leveloffset=+1] -include::modules/oadp-configuring-node-agents.adoc[leveloffset=+2] -include::modules/oadp-enabling-csi-dpa.adoc[leveloffset=+2] -include::modules/oadp-about-disable-node-agent-dpa.adoc[leveloffset=+2] - -[discrete] -[role="_additional-resources"] -.Additional resources - -* link:https://access.redhat.com/solutions/6719951[Performance tuning guide for Multicloud Object Gateway]. - -* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-kubevirt.adoc#oadp-installing-dpa_installing-oadp-kubevirt[Installing the Data Protection Application with the `kubevirt` and `openshift` plugins] -* xref:../../../nodes/jobs/nodes-nodes-jobs.adoc#nodes-nodes-jobs[Running tasks in pods using jobs]. - -* xref:../../../backup_and_restore/application_backup_and_restore/installing/configuring-oadp-multiple-bsl.adoc#configuring-oadp-multiple-bsl[Configuring the {oadp-first} with multiple backup storage locations] - -:installing-oadp-mcg!: diff --git a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-ocs.adoc b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-ocs.adoc deleted file mode 100644 index 7086418b1180..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-ocs.adoc +++ /dev/null @@ -1,66 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="installing-oadp-ocs"] -= Configuring the OpenShift API for Data Protection with OpenShift Data Foundation -include::_attributes/common-attributes.adoc[] -:context: installing-oadp-ocs -:installing-oadp-ocs: -:credentials: cloud-credentials -:provider: gcp - -toc::[] - -You install the OpenShift API for Data Protection (OADP) with {rh-storage} by installing the OADP Operator and configuring a backup location and a snapshot location. Then, you install the Data Protection Application. - -include::snippets/oadp-mtc-operator.adoc[] - -You can configure xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc#installing-oadp-mcg[Multicloud Object Gateway] or any AWS S3-compatible object storage as a backup location. - -:FeatureName: The `CloudStorage` API, which automates the creation of a bucket for object storage, -include::snippets/technology-preview.adoc[] - -You can create a `Secret` CR for the backup location and install the Data Protection Application. For more details, see xref:../../../backup_and_restore/application_backup_and_restore/installing/oadp-installing-operator.adoc#oadp-installing-operator-doc[Installing the OADP Operator]. - -To install the OADP Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. For details, see xref:../../../disconnected/using-olm.adoc#olm-restricted-networks[Using Operator Lifecycle Manager in disconnected environments]. - -//include::modules/oadp-installing-operator.adoc[leveloffset=+1] -include::modules/oadp-about-backup-snapshot-locations-secrets.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.13/html/managing_hybrid_and_multicloud_resources/object-bucket-claim#creating-an-object-bucket-claim-using-the-openshift-web-console_rhodf[Creating an Object Bucket Claim using the OpenShift Web Console]. - -include::modules/oadp-creating-default-secret.adoc[leveloffset=+2] -include::modules/oadp-secrets-for-different-credentials.adoc[leveloffset=+2] - -[id="configuring-dpa-ocs"] -== Configuring the Data Protection Application - -You can configure the Data Protection Application by setting Velero resource allocations or enabling self-signed CA certificates. - -include::modules/oadp-setting-resource-limits-and-requests.adoc[leveloffset=+2] - -include::snippets/oadp-nodeselector-snippet.adoc[] - -For more details, see xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-ocs.adoc#oadp-configuring-node-agents_installing-oadp-ocs[Configuring node agents and node labels]. - -include::modules/oadp-odf-cpu-memory-requirements.adoc[leveloffset=+3] -include::modules/oadp-self-signed-certificate.adoc[leveloffset=+2] - -// include::modules/oadp-installing-dpa-1-2-and-earlier.adoc[leveloffset=+1] -include::modules/oadp-installing-dpa-1-3.adoc[leveloffset=+1] -include::modules/oadp-configuring-client-burst-qps.adoc[leveloffset=+1] -include::modules/oadp-configuring-imagepullpolicy.adoc[leveloffset=+1] -include::modules/oadp-configuring-node-agents.adoc[leveloffset=+2] -include::modules/oadp-creating-object-bucket-claim.adoc[leveloffset=+2] -include::modules/oadp-enabling-csi-dpa.adoc[leveloffset=+2] -include::modules/oadp-about-disable-node-agent-dpa.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-kubevirt.adoc#oadp-installing-dpa_installing-oadp-kubevirt[Installing the Data Protection Application with the `kubevirt` and `openshift` plugins] -* xref:../../../nodes/jobs/nodes-nodes-jobs.adoc#nodes-nodes-jobs[Running tasks in pods using jobs]. - -* xref:../../../backup_and_restore/application_backup_and_restore/installing/configuring-oadp-multiple-bsl.adoc#configuring-oadp-multiple-bsl[Configuring the {oadp-first} with multiple backup storage locations] - -:installing-oadp-ocs!: diff --git a/backup_and_restore/application_backup_and_restore/installing/modules b/backup_and_restore/application_backup_and_restore/installing/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/installing/oadp-12-data-mover-ceph-doc.adoc b/backup_and_restore/application_backup_and_restore/installing/oadp-12-data-mover-ceph-doc.adoc deleted file mode 100644 index 1cf62770df94..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/oadp-12-data-mover-ceph-doc.adoc +++ /dev/null @@ -1,65 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="oadp-12-data-mover-ceph-doc"] -= Using OADP 1.2 Data Mover with Ceph storage -include::_attributes/common-attributes.adoc[] -:context: backing-up-applications - -toc::[] - -You can use OADP 1.2 Data Mover to back up and restore application data for clusters that use CephFS, CephRBD, or both. - -OADP 1.2 Data Mover leverages Ceph features that support large-scale environments. One of these is the shallow copy method, which is available for {product-title} 4.12 and later. This feature supports backing up and restoring `StorageClass` and `AccessMode` resources other than what is found on the source persistent volume claim (PVC). - -[IMPORTANT] -==== -The CephFS shallow copy feature is a backup feature. It is not part of restore operations. -==== - -include::modules/oadp-ceph-prerequisites.adoc[leveloffset=+1] - -[id="defining-crs-for-12-data-mover"] -== Defining custom resources for use with OADP 1.2 Data Mover - -When you install {rh-storage-first}, it automatically creates default CephFS and a CephRBD `StorageClass` and `VolumeSnapshotClass` custom resources (CRs). You must define these CRs for use with OpenShift API for Data Protection (OADP) 1.2 Data Mover. - -After you define the CRs, you must make several other changes to your environment before you can perform your back up and restore operations. - -include::modules/oadp-ceph-preparing-cephfs-crs.adoc[leveloffset=+2] -include::modules/oadp-ceph-preparing-cephrbd-crs.adoc[leveloffset=+2] -include::modules/oadp-ceph-preparing-crs-additional.adoc[leveloffset=+2] - -[id="oadp-ceph-back-up-restore-cephfs"] -== Backing up and restoring data using OADP 1.2 Data Mover and CephFS storage - -You can use OpenShift API for Data Protection (OADP) 1.2 Data Mover to back up and restore data using CephFS storage by enabling the shallow copy feature of CephFS. - -include::snippets/oadp-ceph-cr-prerequisites.adoc[] - -:context: !backing-up-applications - -:context: cephfs - -include::modules/oadp-ceph-cephfs-back-up-dba.adoc[leveloffset=+2] -include::modules/oadp-ceph-cephfs-back-up.adoc[leveloffset=+2] -include::modules/oadp-ceph-cephfs-restore.adoc[leveloffset=+2] - -[id="oadp-ceph-split"] -== Backing up and restoring data using OADP 1.2 Data Mover and split volumes (CephFS and Ceph RBD) - -You can use OpenShift API for Data Protection (OADP) 1.2 Data Mover to backup and restore data in an environment that has _split volumes_, that is, an environment that uses both CephFS and CephRBD. - -include::snippets/oadp-ceph-cr-prerequisites.adoc[] - -:context: !cephfs - -:context: split - -include::modules/oadp-ceph-split-back-up-dba.adoc[leveloffset=+2] -include::modules/oadp-ceph-cephfs-back-up.adoc[leveloffset=+2] -include::modules/oadp-ceph-cephfs-restore.adoc[leveloffset=+2] - -:context: !split - -:context: backing-up-applications - -include::modules/oadp-deletion-policy-1-2.adoc[leveloffset=+1] \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/installing/oadp-backup-restore-csi-snapshots.adoc b/backup_and_restore/application_backup_and_restore/installing/oadp-backup-restore-csi-snapshots.adoc deleted file mode 100644 index b0b7be6e2ebd..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/oadp-backup-restore-csi-snapshots.adoc +++ /dev/null @@ -1,15 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="oadp-backup-restore-csi-snapshots"] -= Backing up and restoring CSI snapshots data movement -include::_attributes/common-attributes.adoc[] -:context: oadp-backup-restore-csi-snapshots - -toc::[] - -You can back up and restore persistent volumes by using the OADP 1.3 Data Mover. - -include::modules/oadp-1-3-backing-csi-snapshots.adoc[leveloffset=+1] - -include::modules/oadp-1-3-restoring-csi-snapshots.adoc[leveloffset=+1] - -include::modules/oadp-deletion-policy-1-3.adoc[leveloffset=+1] diff --git a/backup_and_restore/application_backup_and_restore/installing/oadp-cleaning-up-after-data-mover-1-1-backup-doc.adoc b/backup_and_restore/application_backup_and_restore/installing/oadp-cleaning-up-after-data-mover-1-1-backup-doc.adoc deleted file mode 100644 index 83535e5f3a5b..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/oadp-cleaning-up-after-data-mover-1-1-backup-doc.adoc +++ /dev/null @@ -1,26 +0,0 @@ -[id="oadp-cleaning-up-after-data-mover-1-1-backup-doc"] -= Cleaning up after a backup using OADP 1.1 Data Mover -include::_attributes/common-attributes.adoc[] -:context: datamover11 - -toc::[] - -For OADP 1.1 Data Mover, you must perform a data cleanup after you perform a backup. - -The cleanup consists of deleting the following resources: - -* Snapshots in a bucket -* Cluster resources -* Volume snapshot backups (VSBs) after a backup procedure that is either run by a schedule or is run repetitively - -include::modules/oadp-cleaning-up-after-data-mover-snapshots.adoc[leveloffset=+1] - -[id="deleting-cluster-resources-data-mover"] -== Deleting cluster resources - -OADP 1.1 Data Mover might leave cluster resources whether or not it successfully backs up your container storage interface (CSI) volume snapshots to a remote object store. - -include::modules/oadp-deleting-cluster-resources-following-success.adoc[leveloffset=+2] -include::modules/oadp-deleting-cluster-resources-following-failure.adoc[leveloffset=+2] - -include::modules/oadp-vsb-cleanup-after-scheduler.adoc[leveloffset=+1] diff --git a/backup_and_restore/application_backup_and_restore/installing/oadp-installing-operator.adoc b/backup_and_restore/application_backup_and_restore/installing/oadp-installing-operator.adoc deleted file mode 100644 index ba6d54403d8e..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/oadp-installing-operator.adoc +++ /dev/null @@ -1,25 +0,0 @@ -:_mod-docs-content-type: PROCEDURE -[id="oadp-installing-operator-doc"] -= Installing the OADP Operator -include::_attributes/common-attributes.adoc[] -:context: installing-oadp-operator - -toc::[] - -You can install the OpenShift API for Data Protection (OADP) Operator on {product-title} {product-version} by using Operator Lifecycle Manager (OLM). - -The OADP Operator installs link:https://{velero-domain}/docs/v{velero-version}/[Velero {velero-version}]. - -.Prerequisites - -* You must be logged in as a user with `cluster-admin` privileges. - -.Procedure - -. In the {product-title} web console, click *Operators* -> *OperatorHub*. -. Use the *Filter by keyword* field to find the *OADP Operator*. -. Select the *OADP Operator* and click *Install*. -. Click *Install* to install the Operator in the `openshift-adp` project. -. Click *Operators* -> *Installed Operators* to verify the installation. - -include::modules/velero-oadp-version-relationship.adoc[leveloffset=+1] diff --git a/backup_and_restore/application_backup_and_restore/installing/oadp-using-data-mover-for-csi-snapshots-doc.adoc b/backup_and_restore/application_backup_and_restore/installing/oadp-using-data-mover-for-csi-snapshots-doc.adoc deleted file mode 100644 index 86966993a943..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/oadp-using-data-mover-for-csi-snapshots-doc.adoc +++ /dev/null @@ -1,276 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc - -:_mod-docs-content-type: PROCEDURE -[id="oadp-using-data-mover-for-csi-snapshots-doc"] -= Using Data Mover for CSI snapshots -include::_attributes/common-attributes.adoc[] -:context: backing-up-applications - -toc::[] - -:FeatureName: Data Mover for CSI snapshots - -The OADP Data Mover enables customers to back up Container Storage Interface (CSI) volume snapshots to a remote object store. When Data Mover is enabled, you can restore stateful applications, using CSI volume snapshots pulled from the object store if a failure, accidental deletion, or corruption of the cluster occurs. - -The Data Mover solution uses the Restic option of VolSync. - -Data Mover supports backup and restore of CSI volume snapshots only. - -In OADP 1.2 Data Mover `VolumeSnapshotBackups` (VSBs) and `VolumeSnapshotRestores` (VSRs) are queued using the VolumeSnapshotMover (VSM). The VSM's performance is improved by specifying a concurrent number of VSBs and VSRs simultaneously `InProgress`. After all async plugin operations are complete, the backup is marked as complete. - - -:FeatureName: The OADP 1.2 Data Mover -include::snippets/technology-preview.adoc[leveloffset=+1] - -[NOTE] -==== -Red Hat recommends that customers who use OADP 1.2 Data Mover in order to back up and restore ODF CephFS volumes, upgrade or install {product-title} version 4.12 or later for improved performance. OADP Data Mover can leverage CephFS shallow volumes in {product-title} version 4.12 or later, which based on our testing, can improve the performance of backup times. - -* https://issues.redhat.com/browse/RHSTOR-4287[CephFS ROX details] -//* https://github.com/ceph/ceph-csi/blob/devel/docs/cephfs-snapshot-backed-volumes.md[Provisioning and mounting CephFS snapshot-backed volumes] - - -//For more information about OADP 1.2 with CephS [name of topic], see ___. - -==== - -.Prerequisites - -* You have verified that the `StorageClass` and `VolumeSnapshotClass` custom resources (CRs) support CSI. - -* You have verified that only one `VolumeSnapshotClass` CR has the annotation `snapshot.storage.kubernetes.io/is-default-class: "true"`. -+ -[NOTE] -==== -In {product-title} version 4.12 or later, verify that this is the only default `VolumeSnapshotClass`. -==== - -* You have verified that `deletionPolicy` of the `VolumeSnapshotClass` CR is set to `Retain`. - -* You have verified that only one `StorageClass` CR has the annotation `storageclass.kubernetes.io/is-default-class: "true"`. - -* You have included the label `{velero-domain}/csi-volumesnapshot-class: "true"` in your `VolumeSnapshotClass` CR. - -* You have verified that the `OADP namespace` has the annotation `oc annotate --overwrite namespace/openshift-adp volsync.backube/privileged-movers="true"`. -+ -[NOTE] -==== -In OADP 1.2 the `privileged-movers` setting is not required in most scenarios. The restoring container permissions should be adequate for the Volsync copy. In some user scenarios, there may be permission errors that the `privileged-mover`= `true` setting should resolve. -==== - -* You have installed the VolSync Operator by using the Operator Lifecycle Manager (OLM). -+ -[NOTE] -==== -The VolSync Operator is required for using OADP Data Mover. -==== - -* You have installed the OADP operator by using OLM. -+ --- -include::snippets/xfs-filesystem-snippet.adoc[] --- - -.Procedure - -. Configure a Restic secret by creating a `.yaml` file as following: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: - namespace: openshift-adp -type: Opaque -stringData: - RESTIC_PASSWORD: ----- -+ -[NOTE] -==== -By default, the Operator looks for a secret named `dm-credential`. If you are using a different name, you need to specify the name through a Data Protection Application (DPA) CR using `dpa.spec.features.dataMover.credentialName`. -==== - -. Create a DPA CR similar to the following example. The default plugins include CSI. -+ -.Example Data Protection Application (DPA) CR -[source,yaml] ----- -apiVersion: oadp.openshift.io/v1alpha1 -kind: DataProtectionApplication -metadata: - name: velero-sample - namespace: openshift-adp -spec: - backupLocations: - - velero: - config: - profile: default - region: us-east-1 - credential: - key: cloud - name: cloud-credentials - default: true - objectStorage: - bucket: - prefix: - provider: aws - configuration: - restic: - enable: - velero: - itemOperationSyncFrequency: "10s" - defaultPlugins: - - openshift - - aws - - csi - - vsm - features: - dataMover: - credentialName: restic-secret - enable: true - maxConcurrentBackupVolumes: "3" <1> - maxConcurrentRestoreVolumes: "3" <2> - pruneInterval: "14" <3> - volumeOptions: <4> - sourceVolumeOptions: - accessMode: ReadOnlyMany - cacheAccessMode: ReadWriteOnce - cacheCapacity: 2Gi - destinationVolumeOptions: - storageClass: other-storageclass-name - cacheAccessMode: ReadWriteMany - snapshotLocations: - - velero: - config: - profile: default - region: us-west-2 - provider: aws - ----- -<1> Optional: Specify the upper limit of the number of snapshots allowed to be queued for backup. The default value is `10`. -<2> Optional: Specify the upper limit of the number of snapshots allowed to be queued for restore. The default value is `10`. -<3> Optional: Specify the number of days between running Restic pruning on the repository. The prune operation repacks the data to free space, but it can also generate significant I/O traffic as a part of the process. Setting this option allows a trade-off between storage consumption, from no longer referenced data, and access costs. -<4> Optional: Specify VolumeSync volume options for backup and restore. - -+ -The OADP Operator installs two custom resource definitions (CRDs), `VolumeSnapshotBackup` and `VolumeSnapshotRestore`. -+ -.Example `VolumeSnapshotBackup` CRD -[source,yaml] ----- -apiVersion: datamover.oadp.openshift.io/v1alpha1 -kind: VolumeSnapshotBackup -metadata: - name: - namespace: <1> -spec: - volumeSnapshotContent: - name: - protectedNamespace: <2> - resticSecretRef: - name: ----- -<1> Specify the namespace where the volume snapshot exists. -<2> Specify the namespace where the OADP Operator is installed. The default is `openshift-adp`. -+ -.Example `VolumeSnapshotRestore` CRD -[source,yaml] ----- -apiVersion: datamover.oadp.openshift.io/v1alpha1 -kind: VolumeSnapshotRestore -metadata: - name: - namespace: <1> -spec: - protectedNamespace: <2> - resticSecretRef: - name: - volumeSnapshotMoverBackupRef: - sourcePVCData: - name: - size: - resticrepository: - volumeSnapshotClassName: ----- -<1> Specify the namespace where the volume snapshot exists. -<2> Specify the namespace where the OADP Operator is installed. The default is `openshift-adp`. - -. You can back up a volume snapshot by performing the following steps: - -.. Create a backup CR: -+ -[source,yaml] ----- -apiVersion: velero.io/v1 -kind: Backup -metadata: - name: - namespace: <1> -spec: - includedNamespaces: - - <2> - storageLocation: velero-sample-1 ----- -<1> Specify the namespace where the Operator is installed. The default namespace is `openshift-adp`. -<2> Specify the application namespace or namespaces to be backed up. - -.. Wait up to 10 minutes and check whether the `VolumeSnapshotBackup` CR status is `Completed` by entering the following commands: -+ -[source,terminal] ----- -$ oc get vsb -n ----- -+ -[source,terminal] ----- -$ oc get vsb -n -o jsonpath="{.status.phase}" ----- -+ -A snapshot is created in the object store was configured in the DPA. -+ -[NOTE] -==== -If the status of the `VolumeSnapshotBackup` CR becomes `Failed`, refer to the Velero logs for troubleshooting. -==== - -. You can restore a volume snapshot by performing the following steps: - -.. Delete the application namespace and the `VolumeSnapshotContent` that was created by the Velero CSI plugin. - -.. Create a `Restore` CR and set `restorePVs` to `true`. -+ -.Example `Restore` CR -[source,yaml] ----- -apiVersion: velero.io/v1 -kind: Restore -metadata: - name: - namespace: -spec: - backupName: - restorePVs: true ----- - -.. Wait up to 10 minutes and check whether the `VolumeSnapshotRestore` CR status is `Completed` by entering the following command: -+ -[source,terminal] ----- -$ oc get vsr -n ----- -+ -[source,terminal] ----- -$ oc get vsr -n -o jsonpath="{.status.phase}" ----- - -.. Check whether your application data and resources have been restored. -+ -[NOTE] -==== -If the status of the `VolumeSnapshotRestore` CR becomes 'Failed', refer to the Velero logs for troubleshooting. -==== diff --git a/backup_and_restore/application_backup_and_restore/installing/overriding-kopia-algorithms.adoc b/backup_and_restore/application_backup_and_restore/installing/overriding-kopia-algorithms.adoc deleted file mode 100644 index e6bd6560057e..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/overriding-kopia-algorithms.adoc +++ /dev/null @@ -1,18 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="overriding-kopia-algorithms"] -= Overriding Kopia hashing, encryption, and splitter algorithms -include::_attributes/common-attributes.adoc[] -:context: overriding-kopia-algorithms - - -toc::[] - -You can override the default values of Kopia hashing, encryption, and splitter algorithms by using specific environment variables in the Data Protection Application (DPA). - -// configuring the DPA with Kopia environment variables -include::modules/oadp-kopia-configuring-algorithms.adoc[leveloffset=+1] -// use case to demonstrate the usage of Kopia environment variables -include::modules/oadp-usecase-kopia-override-algorithms.adoc[leveloffset=+1] -// benchmarking Kopia algorithms -include::modules/oadp-kopia-algorithms-benchmarking.adoc[leveloffset=+1] - diff --git a/backup_and_restore/application_backup_and_restore/installing/snippets b/backup_and_restore/application_backup_and_restore/installing/snippets deleted file mode 120000 index ce62fd7c41e2..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/snippets +++ /dev/null @@ -1 +0,0 @@ -../../../snippets/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/installing/uninstalling-oadp.adoc b/backup_and_restore/application_backup_and_restore/installing/uninstalling-oadp.adoc deleted file mode 100644 index 7cf49e56d54e..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/uninstalling-oadp.adoc +++ /dev/null @@ -1,9 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="uninstalling-oadp"] -= Uninstalling the OpenShift API for Data Protection -include::_attributes/common-attributes.adoc[] -:context: uninstalling-oadp - -toc::[] - -You uninstall the OpenShift API for Data Protection (OADP) by deleting the OADP Operator. See xref:../../../operators/admin/olm-deleting-operators-from-cluster.adoc#olm-deleting-operators-from-cluster[Deleting Operators from a cluster] for details. diff --git a/backup_and_restore/application_backup_and_restore/issues-with-velero-and-admission-webhooks.adoc b/backup_and_restore/application_backup_and_restore/issues-with-velero-and-admission-webhooks.adoc deleted file mode 100644 index 7632637939d0..000000000000 --- a/backup_and_restore/application_backup_and_restore/issues-with-velero-and-admission-webhooks.adoc +++ /dev/null @@ -1,33 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="issues-with-velero-and-admission-webhooks"] -= Issues with Velero and admission webhooks -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: issues-with-velero-and-admission-webhooks -:namespace: openshift-adp -:local-product: OADP - -toc::[] - -Velero has limited abilities to resolve admission webhook issues during a restore. If you have workloads with admission webhooks, you might need to use an additional Velero plugin or make changes to how you restore the workload. - -Typically, workloads with admission webhooks require you to create a resource of a specific kind first. This is especially true if your workload has child resources because admission webhooks typically block child resources. - -For example, creating or restoring a top-level object such as `service.serving.knative.dev` typically creates child resources automatically. If you do this first, you will not need to use Velero to create and restore these resources. This avoids the problem of child resources being blocked by an admission webhook that Velero might use. - -[id="velero-restore-workarounds-for-workloads-with-admission-webhooks_{context}"] -== Restoring workarounds for Velero backups that use admission webhooks - -You need additional steps to restore resources for several types of Velero backups that use admission webhooks. - -include::modules/migration-debugging-velero-admission-webhooks-knative.adoc[leveloffset=+2] -include::modules/migration-debugging-velero-admission-webhooks-ibm-appconnect.adoc[leveloffset=+2] -include::modules/oadp-features-plugins-known-issues.adoc[leveloffset=+1] -include::modules/oadp-plugins-receiving-eof-message.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../architecture/admission-plug-ins.adoc#admission-plug-ins[Admission plugins] -* xref:../../architecture/admission-plug-ins.adoc#admission-webhooks-about_admission-plug-ins[Webhook admission plugins] -* xref:../../architecture/admission-plug-ins.adoc#admission-webhook-types_admission-plug-ins[Types of webhook admission plugins] diff --git a/backup_and_restore/application_backup_and_restore/modules b/backup_and_restore/application_backup_and_restore/modules deleted file mode 120000 index 8b0e8540076d..000000000000 --- a/backup_and_restore/application_backup_and_restore/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/oadp-3scale/_attributes b/backup_and_restore/application_backup_and_restore/oadp-3scale/_attributes deleted file mode 120000 index bf7c2529fdb4..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-3scale/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../../_attributes/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/oadp-3scale/backing-up-and-restoring-3scale-by-using-oadp.adoc b/backup_and_restore/application_backup_and_restore/oadp-3scale/backing-up-and-restoring-3scale-by-using-oadp.adoc deleted file mode 100644 index 7381a8a42e7a..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-3scale/backing-up-and-restoring-3scale-by-using-oadp.adoc +++ /dev/null @@ -1,43 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="backing-up-and-restoring-3scale-by-using-oadp_{context}"] -= Backing up and restoring 3scale by using OADP -include::_attributes/common-attributes.adoc[] -:context: backing-up-and-restoring-3scale-by-using-oadp - -toc::[] - -With Red Hat 3scale API Management (APIM), you can manage your APIs for internal or external users. Share, secure, distribute, control, and monetize your APIs on an infrastructure platform built with performance, customer control, and future growth in mind. -You can deploy 3scale components on-premise, in the cloud, as a managed service, or in any combination based on your requirement. - -[NOTE] -==== -In this example, the non-service affecting approach is used to back up and restore 3scale on-cluster storage by using the {oadp-first} Operator. -Additionally, ensure that you are restoring 3scale on the same cluster where it was backed up from. If you want to restore 3scale on a different cluster, ensure that both clusters are using the same custom domain. -==== - -.Prerequisites - -* You installed and configured Red Hat 3scale. For more information, see link:https://docs.redhat.com/en/documentation/red_hat_3scale_api_management/2.15/html/installing_red_hat_3scale_api_management[Red Hat 3scale API Management]. - -include::modules/creating-the-data-protection-application.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc#oadp-installing-dpa_installing-oadp-aws[Installing the Data Protection Application] - -include::modules/backing-up-the-3scale-operator.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-creating-backup-cr.adoc#oadp-creating-backup-cr-doc[Creating a Backup CR] - -include::modules/backing-up-the-mysql-database.adoc[leveloffset=+1] - -include::modules/backing-up-the-backend-redis-database.adoc[leveloffset=+1] - -include::modules/restoring-the-secrets-and-apimanager.adoc[leveloffset=+1] - -include::modules/restoring-the-mysql-database.adoc[leveloffset=+1] - -include::modules/restoring-the-backend-redis-database.adoc[leveloffset=+1] - -include::modules/scaling-up-the-3scale-operator-and-deployment.adoc[leveloffset=+1] \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/oadp-3scale/images b/backup_and_restore/application_backup_and_restore/oadp-3scale/images deleted file mode 120000 index 4399cbb3c0f3..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-3scale/images +++ /dev/null @@ -1 +0,0 @@ -../../../images/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/oadp-3scale/modules b/backup_and_restore/application_backup_and_restore/oadp-3scale/modules deleted file mode 120000 index 7e8b50bee77a..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-3scale/modules +++ /dev/null @@ -1 +0,0 @@ -../../../modules/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/oadp-3scale/snippets b/backup_and_restore/application_backup_and_restore/oadp-3scale/snippets deleted file mode 120000 index ce62fd7c41e2..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-3scale/snippets +++ /dev/null @@ -1 +0,0 @@ -../../../snippets/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/oadp-advanced-topics.adoc b/backup_and_restore/application_backup_and_restore/oadp-advanced-topics.adoc deleted file mode 100644 index d77345385705..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-advanced-topics.adoc +++ /dev/null @@ -1,46 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="oadp-advanced-topics"] -= Advanced OADP features and functionalities -include::_attributes/common-attributes.adoc[] -:context: oadp-advanced-topics - -toc::[] - -This document provides information about advanced features and functionalities of OpenShift API for Data Protection (OADP). - -[id="oadp-different-kubernetes-api-versions"] -== Working with different Kubernetes API versions on the same cluster - -include::modules/oadp-checking-api-group-versions.adoc[leveloffset=+2] -include::modules/oadp-about-enable-api-group-versions.adoc[leveloffset=+2] -include::modules/oadp-using-enable-api-group-versions.adoc[leveloffset=+2] - -[id="backing-up-data-one-cluster-restoring-another-cluster"] -== Backing up data from one cluster and restoring it to another cluster - -include::modules/oadp-about-backing-and-restoring-from-cluster-to-cluster.adoc[leveloffset=+2] -include::modules/oadp-pod-volume-backup.adoc[leveloffset=+2] -include::modules/oadp-backing-up-opt-in.adoc[leveloffset=+3] -include::modules/oadp-backing-up-opt-out.adoc[leveloffset=+3] -include::modules/oadp-cluster-to-cluster-uid-and-gid-ranges.adoc[leveloffset=+2] -include::modules/oadp-backing-and-restoring-from-cluster-to-cluster.adoc[leveloffset=+2] - -[id="oadp-storage-class-mapping"] -== OADP storage class mapping - -include::modules/oadp-storage-class-mapping.adoc[leveloffset=+2] -include::modules/oadp-storage-class-mapping-oadp.adoc[leveloffset=+3] - -[role="_additional-resources"] -[id="additional-resources_oadp-advanced-topics"] -== Additional resources - -* xref:../../backup_and_restore/application_backup_and_restore/oadp-advanced-topics.adoc#oadp-different-kubernetes-api-versions[Working with different Kubernetes API versions on the same cluster]. - -* xref:../../backup_and_restore/application_backup_and_restore/installing/oadp-backup-restore-csi-snapshots.adoc#oadp-1-3-backing-csi-snapshots_oadp-backup-restore-csi-snapshots[Using Data Mover for CSI snapshots]. - -* xref:../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-backing-up-applications-restic-doc.adoc#backing-up-applications[Backing up applications with File System Backup: Kopia or Restic]. - -* xref:../../migration_toolkit_for_containers/advanced-migration-options-mtc.adoc#migration-converting-storage-classes_advanced-migration-options-mtc[Migration converting storage classes]. - -:!oadp-advanced-topics: diff --git a/backup_and_restore/application_backup_and_restore/oadp-api.adoc b/backup_and_restore/application_backup_and_restore/oadp-api.adoc deleted file mode 100644 index b6fdfe66c1e7..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-api.adoc +++ /dev/null @@ -1,254 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="oadp-api"] -= APIs used with OADP -include::_attributes/common-attributes.adoc[] -:context: oadp-api -:namespace: openshift-adp -:local-product: OADP -:velero-domain: velero.io - -toc::[] - -The document provides information about the following APIs that you can use with OADP: - -* Velero API -* OADP API - -[id="velero-api"] -== Velero API - -Velero API documentation is maintained by Velero, not by Red Hat. It can be found at link:https://velero.io/docs/main/api-types/[Velero API types]. - -[id="oadp-api-tables"] -== OADP API - -The following tables provide the structure of the OADP API: - -.DataProtectionApplicationSpec -[options="header"] -|=== -|Property|Type|Description - -|`backupLocations` -|[] link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#BackupLocation[`BackupLocation`] -|Defines the list of configurations to use for `BackupStorageLocations`. - -|`snapshotLocations` -|[] link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#SnapshotLocation[`SnapshotLocation`] -|Defines the list of configurations to use for `VolumeSnapshotLocations`. - -|`unsupportedOverrides` -|map [ link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#UnsupportedImageKey[UnsupportedImageKey] ] link:https://pkg.go.dev/builtin#string[string] -|Can be used to override the deployed dependent images for development. Options are `veleroImageFqin`, `awsPluginImageFqin`, `openshiftPluginImageFqin`, `azurePluginImageFqin`, `gcpPluginImageFqin`, `csiPluginImageFqin`, `dataMoverImageFqin`, `resticRestoreImageFqin`, `kubevirtPluginImageFqin`, and `operator-type`. - -|`podAnnotations` -|map [ link:https://pkg.go.dev/builtin#string[string] ] link:https://pkg.go.dev/builtin#string[string] -|Used to add annotations to pods deployed by Operators. - -|`podDnsPolicy` -|link:https://pkg.go.dev/k8s.io/api/core/v1#DNSPolicy[`DNSPolicy`] -|Defines the configuration of the DNS of a pod. - -|`podDnsConfig` -|link:https://pkg.go.dev/k8s.io/api/core/v1#PodDNSConfig[`PodDNSConfig`] -|Defines the DNS parameters of a pod in addition to those generated from `DNSPolicy`. - -|`backupImages` -|*link:https://pkg.go.dev/builtin#bool[bool] -|Used to specify whether or not you want to deploy a registry for enabling backup and restore of images. - -|`configuration` -|*link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#ApplicationConfig[`ApplicationConfig`] -|Used to define the data protection application's server configuration. - -|`features` -|*link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#Features[`Features`] -|Defines the configuration for the DPA to enable the Technology Preview features. -|=== - -link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#DataProtectionApplicationSpec[Complete schema definitions for the OADP API]. - -.BackupLocation -[options="header"] -|=== -|Property|Type|Description - -|`velero` -|*link:https://pkg.go.dev/github.com/vmware-tanzu/velero/pkg/apis/velero/v1#BackupStorageLocationSpec[velero.BackupStorageLocationSpec] -|Location to store volume snapshots, as described in link:https://pkg.go.dev/github.com/vmware-tanzu/velero/pkg/apis/velero/v1#BackupStorageLocation[Backup Storage Location]. - -|`bucket` -| *link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#CloudStorageLocation[CloudStorageLocation] -| [Technology Preview] Automates creation of a bucket at some cloud storage providers for use as a backup storage location. -|=== - -:FeatureName: The `bucket` parameter -include::snippets/technology-preview.adoc[leveloffset=+1] - -link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#BackupLocation[Complete schema definitions for the type `BackupLocation`]. - -.SnapshotLocation -[options="header"] -|=== -|Property|Type|Description - -|`velero` -|*link:https://pkg.go.dev/github.com/vmware-tanzu/velero/pkg/apis/velero/v1#VolumeSnapshotLocationSpec[VolumeSnapshotLocationSpec] -|Location to store volume snapshots, as described in link:https://pkg.go.dev/github.com/vmware-tanzu/velero/pkg/apis/velero/v1#VolumeSnapshotLocation[Volume Snapshot Location]. -|=== - -link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#SnapshotLocation[Complete schema definitions for the type `SnapshotLocation`]. - -.ApplicationConfig -[options="header"] -|=== -|Property|Type|Description - -|`velero` -|*link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#VeleroConfig[VeleroConfig] -|Defines the configuration for the Velero server. - -|`restic` -|*link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#ResticConfig[ResticConfig] -|Defines the configuration for the Restic server. -|=== - -link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#ApplicationConfig[Complete schema definitions for the type `ApplicationConfig`]. - -.VeleroConfig -[options="header"] -|=== -|Property|Type|Description - -|`featureFlags` -|[] link:https://pkg.go.dev/builtin#string[string] -|Defines the list of features to enable for the Velero instance. - -|`defaultPlugins` -|[] link:https://pkg.go.dev/builtin#string[string] -|The following types of default Velero plugins can be installed: `aws`,`azure`, `csi`, `gcp`, `kubevirt`, and `openshift`. - -|`customPlugins` -|[]link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#CustomPlugin[CustomPlugin] -|Used for installation of custom Velero plugins. - -Default and custom plugins are described in xref:../../backup_and_restore/application_backup_and_restore/oadp-features-plugins#oadp-features-plugins[OADP plugins] - -|`restoreResourcesVersionPriority` -|link:https://pkg.go.dev/builtin#string[string] -|Represents a config map that is created if defined for use in conjunction with the `EnableAPIGroupVersions` feature flag. Defining this field automatically adds `EnableAPIGroupVersions` to the Velero server feature flag. - -|`noDefaultBackupLocation` -|link:https://pkg.go.dev/builtin#bool[bool] -|To install Velero without a default backup storage location, you must set the `noDefaultBackupLocation` flag in order to confirm installation. - -|`podConfig` -|*link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#PodConfig[`PodConfig`] -|Defines the configuration of the `Velero` pod. - -|`logLevel` -|link:https://pkg.go.dev/builtin#string[string] -|Velero server’s log level (use `debug` for the most granular logging, leave unset for Velero default). Valid options are `trace`, `debug`, `info`, `warning`, `error`, `fatal`, and `panic`. -|=== - -link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#VeleroConfig[Complete schema definitions for the type `VeleroConfig`]. - -.CustomPlugin -[options="header"] -|=== -|Property|Type|Description - -|`name` -|link:https://pkg.go.dev/builtin#string[string] -|Name of custom plugin. - -|`image` -|link:https://pkg.go.dev/builtin#string[string] -|Image of custom plugin. -|=== - -link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#CustomPlugin[Complete schema definitions for the type `CustomPlugin`]. - -.ResticConfig -[options="header"] -|=== -|Property|Type|Description - -|`enable` -|*link:https://pkg.go.dev/builtin#bool[bool] -|If set to `true`, enables backup and restore using Restic. If set to `false`, snapshots are needed. - -|`supplementalGroups` -|[]link:https://pkg.go.dev/builtin#int64[int64] -|Defines the Linux groups to be applied to the `Restic` pod. - -|`timeout` -|link:https://pkg.go.dev/builtin#string[string] -|A user-supplied duration string that defines the Restic timeout. Default value is `1hr` (1 hour). A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as `300ms`, -1.5h` or `2h45m`. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, and `h`. - -|`podConfig` -|*link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#PodConfig[`PodConfig`] -|Defines the configuration of the `Restic` pod. -|=== - -link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#ResticConfig[Complete schema definitions for the type `ResticConfig`]. - -.PodConfig -[options="header"] -|=== -|Property|Type|Description - -|`nodeSelector` -|map [ link:https://pkg.go.dev/builtin#string[string] ] link:https://pkg.go.dev/builtin#string[string] -|Defines the `nodeSelector` to be supplied to a `Velero` `podSpec` or a `Restic` `podSpec`. -For more details, see xref:../../backup_and_restore/application_backup_and_restore/oadp-api.adoc#oadp-configuring-node-agents_oadp-api[Configuring node agents and node labels]. - -|`tolerations` -|[]link:https://pkg.go.dev/k8s.io/api/core/v1#Toleration[Toleration] -|Defines the list of tolerations to be applied to a Velero deployment or a Restic `daemonset`. - -|`resourceAllocations` -|link:https://pkg.go.dev/k8s.io/api/core/v1#ResourceRequirements[ResourceRequirements] -|Set specific resource `limits` and `requests` for a `Velero` pod or a `Restic` pod as described in xref:../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc#oadp-setting-resource-limits-and-requests_installing-oadp-aws[Setting Velero CPU and memory resource allocations]. - -|`labels` -|map [ link:https://pkg.go.dev/builtin#string[string] ] link:https://pkg.go.dev/builtin#string[string] -|Labels to add to pods. -|=== - -include::modules/oadp-configuring-node-agents.adoc[leveloffset=+2] - -link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#PodConfig[Complete schema definitions for the type `PodConfig`]. - -.Features -[options="header"] -|=== -|Property|Type|Description - -|`dataMover` -|*link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#DataMover[`DataMover`] -|Defines the configuration of the Data Mover. -|=== - -link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#Features[Complete schema definitions for the type `Features`]. - -.DataMover -[options="header"] -|=== -|Property|Type|Description - -|`enable` -|link:https://pkg.go.dev/builtin#bool[bool] -|If set to `true`, deploys the volume snapshot mover controller and a modified CSI Data Mover plugin. If set to `false`, these are not deployed. - -|`credentialName` -|link:https://pkg.go.dev/builtin#string[string] -|User-supplied Restic `Secret` name for Data Mover. - -|`timeout` -|link:https://pkg.go.dev/builtin#string[string] -|A user-supplied duration string for `VolumeSnapshotBackup` and `VolumeSnapshotRestore` to complete. Default is `10m` (10 minutes). A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as `300ms`, -1.5h` or `2h45m`. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, and `h`. -|=== - -The OADP API is more fully detailed in link:https://pkg.go.dev/github.com/openshift/oadp-operator[OADP Operator]. - diff --git a/backup_and_restore/application_backup_and_restore/oadp-features-plugins.adoc b/backup_and_restore/application_backup_and_restore/oadp-features-plugins.adoc deleted file mode 100644 index bfd7b1c4cb7a..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-features-plugins.adoc +++ /dev/null @@ -1,40 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="oadp-features-plugins"] -= OADP features and plugins -include::_attributes/common-attributes.adoc[] -:context: oadp-features-plugins - -toc::[] - -OpenShift API for Data Protection (OADP) features provide options for backing up and restoring applications. - -The default plugins enable Velero to integrate with certain cloud providers and to back up and restore {product-title} resources. - -include::modules/oadp-features.adoc[leveloffset=+1] -include::modules/oadp-plugins.adoc[leveloffset=+1] -include::modules/oadp-configuring-velero-plugins.adoc[leveloffset=+1] -include::modules/oadp-plugins-receiving-eof-message.adoc[leveloffset=+2] -ifndef::openshift-rosa,openshift-rosa-hcp[] -include::modules/oadp-supported-architecture.adoc[leveloffset=+1] -endif::openshift-rosa,openshift-rosa-hcp[] - -ifndef::openshift-rosa,openshift-rosa-hcp[] -[id="oadp-support-for-ibm-power-and-ibm-z"] -== OADP support for {ibm-power-title} and {ibm-z-title} - -OpenShift API for Data Protection (OADP) is platform neutral. The information that follows relates only to {ibm-power-name} and to {ibm-z-name}. - -* {oadp-short} {oadp-version-1-3} was tested successfully against {product-title} 4.12, 4.13, 4.14, and 4.15 for both {ibm-power-name} and {ibm-z-name}. The sections that follow give testing and support information for {oadp-short} {oadp-version-1-3} in terms of backup locations for these systems. -* {oadp-short} {oadp-version-1-4} was tested successfully against {product-title} 4.14, 4.15, 4.16, and 4.17 for both {ibm-power-name} and {ibm-z-name}. The sections that follow give testing and support information for {oadp-short} {oadp-version-1-4} in terms of backup locations for these systems. - - -include::modules/oadp-ibm-power-test-support.adoc[leveloffset=+2] -include::modules/oadp-ibm-z-test-support.adoc[leveloffset=+2] -include::modules/oadp-ibm-power-and-z-known-issues.adoc[leveloffset=+3] -endif::openshift-rosa,openshift-rosa-hcp[] - -include::modules/oadp-fips.adoc[leveloffset=+1] - -include::modules/oadp-features-plugins-known-issues.adoc[leveloffset=+1] - -:!oadp-features-plugins: diff --git a/backup_and_restore/application_backup_and_restore/oadp-installation-issues.adoc b/backup_and_restore/application_backup_and_restore/oadp-installation-issues.adoc deleted file mode 100644 index 539f024c3453..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-installation-issues.adoc +++ /dev/null @@ -1,49 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="oadp-installation-issues"] -= OADP installation issues -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: installation-issues -:namespace: openshift-adp -:local-product: OADP - -toc::[] - -You might encounter issues caused by using invalid directories or incorrect credentials when you install the Data Protection Application. - -[id="oadp-backup-location-contains-invalid-directories_{context}"] -== Backup storage contains invalid directories - -The `Velero` pod log displays the following error message: `Backup storage contains invalid top-level directories`. - -.Cause - -The object storage contains top-level directories that are not Velero directories. - -.Solution - -If the object storage is not dedicated to Velero, you must specify a prefix for the bucket by setting the `spec.backupLocations.velero.objectStorage.prefix` parameter in the `DataProtectionApplication` manifest. - -[id="oadp-incorrect-aws-credentials_{context}"] -== Incorrect AWS credentials - -The `oadp-aws-registry` pod log displays the following error message: `InvalidAccessKeyId: The AWS Access Key Id you provided does not exist in our records.` - -The `Velero` pod log displays the following error message: `NoCredentialProviders: no valid providers in chain`. - -.Cause - -The `credentials-velero` file used to create the `Secret` object is incorrectly formatted. - -.Solution - -Ensure that the `credentials-velero` file is correctly formatted, as in the following example: - -.Example `credentials-velero` file ----- -[default] <1> -aws_access_key_id=AKIAIOSFODNN7EXAMPLE <2> -aws_secret_access_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY ----- -<1> AWS default profile. -<2> Do not enclose the values with quotation marks (`"`, `'`). diff --git a/backup_and_restore/application_backup_and_restore/oadp-intro.adoc b/backup_and_restore/application_backup_and_restore/oadp-intro.adoc deleted file mode 100644 index 499a67e83ada..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-intro.adoc +++ /dev/null @@ -1,45 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="oadp-introduction"] -= Introduction to {oadp-full} -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: oadp-api -:namespace: openshift-adp -:local-product: OADP - -toc::[] - -The {oadp-first} product safeguards customer applications on {product-title}. It offers comprehensive disaster recovery protection, covering {product-title} applications, application-related cluster resources, persistent volumes, and internal images. {oadp-short} is also capable of backing up both containerized applications and virtual machines (VMs). - -ifndef::openshift-rosa,openshift-rosa-hcp[] -However, {oadp-short} does not serve as a disaster recovery solution for xref:../../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backup-etcd[etcd] or {OCP-short} Operators. -endif::openshift-rosa,openshift-rosa-hcp[] - -{oadp-short} support is provided to customer workload namespaces, and cluster scope resources. - -Full cluster xref:../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#backing-up-applications[backup] and xref:../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc#restoring-applications[restore] are not supported. - -[id="oadp-apis_{context}"] -== {oadp-full} APIs - -{oadp-first} provides APIs that enable multiple approaches to customizing backups and preventing the inclusion of unnecessary or inappropriate resources. - -OADP provides the following APIs: - -* xref:../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#backing-up-applications[Backup] -* xref:../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc#restoring-applications[Restore] -* xref:../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-scheduling-backups-doc.adoc#oadp-scheduling-backups-doc[Schedule] -ifndef::openshift-rosa,openshift-rosa-hcp[] -* xref:../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc#oadp-about-backup-snapshot-locations_installing-oadp-aws[BackupStorageLocation] -* xref:../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-backing-up-pvs-csi-doc.adoc#oadp-backing-up-pvs-csi-doc[VolumeSnapshotLocation] -endif::openshift-rosa,openshift-rosa-hcp[] - -include::modules/oadp-operator-supported.adoc[leveloffset=+2] - -ifndef::openshift-rosa,openshift-rosa-hcp[] -[role="_additional-resources"] -.Additional resources - -* xref:../../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backup-etcd[Backing up etcd] -// once finished re-work come back and add doc links to the APIs -endif::openshift-rosa,openshift-rosa-hcp[] diff --git a/backup_and_restore/application_backup_and_restore/oadp-monitoring.adoc b/backup_and_restore/application_backup_and_restore/oadp-monitoring.adoc deleted file mode 100644 index 40b8dbf05f8f..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-monitoring.adoc +++ /dev/null @@ -1,32 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="oadp-monitoring"] -= OADP monitoring -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: oadp-monitoring -:namespace: openshift-adp -:local-product: OADP - -toc::[] - -By using the {product-title} monitoring stack, users and administrators can effectively perform the following tasks: - -* Monitor and manage clusters -* Analyze the workload performance of user applications -* Monitor services running on the clusters -* Receive alerts if an event occurs - -[role="_additional-resources"] -.Additional resources -* xref:../../observability/monitoring/about-ocp-monitoring/about-ocp-monitoring.adoc#about-ocp-monitoring[About {product-title} monitoring] - -include::modules/oadp-monitoring-setup.adoc[leveloffset=+1] -include::modules/oadp-creating-service-monitor.adoc[leveloffset=+1] -include::modules/oadp-creating-alerting-rule.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../observability/monitoring/managing-alerts/managing-alerts-as-an-administrator.adoc#managing-alerts-as-an-administrator[Managing alerts as an Administrator] - -include::modules/oadp-list-of-metrics.adoc[leveloffset=+1] -include::modules/oadp-viewing-metrics-ui.adoc[leveloffset=+1] \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/oadp-operator-issues.adoc b/backup_and_restore/application_backup_and_restore/oadp-operator-issues.adoc deleted file mode 100644 index 4e9c377b59ef..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-operator-issues.adoc +++ /dev/null @@ -1,93 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="oadp-operator-issues"] -= OADP Operator issues -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: oadp-operator-issues -:namespace: openshift-adp -:local-product: OADP - -toc::[] - -The {oadp-first} Operator might encounter issues caused by problems it is not able to resolve. - -[id="oadp-operator-fails-silently_{context}"] -== OADP Operator fails silently - -The S3 buckets of an OADP Operator might be empty, but when you run the command `oc get po -n `, you see that the Operator has a status of `Running`. In such a case, the Operator is said to have _failed silently_ because it incorrectly reports that it is running. - -.Cause - -The problem is caused when cloud credentials provide insufficient permissions. - -.Solution - -Retrieve a list of backup storage locations (BSLs) and check the manifest of each BSL for credential issues. - -.Procedure - -. Retrieve a list of BSLs by using either the OpenShift or Velero command-line interface (CLI): -.. Retrieve a list of BSLs by using the OpenShift CLI (`oc`): -+ -[source,terminal] ----- -$ oc get backupstoragelocations.velero.io -A ----- -.. Retrieve a list of BSLs by using the `velero` CLI: -+ -[source,terminal] ----- -$ velero backup-location get -n ----- - -. Use the list of BSLs from the previous step and run the following command to examine the manifest of each BSL for an error: -+ -[source,terminal] ----- -$ oc get backupstoragelocations.velero.io -n -o yaml ----- -+ -.Example result -[source, yaml] ----- -apiVersion: v1 -items: -- apiVersion: velero.io/v1 - kind: BackupStorageLocation - metadata: - creationTimestamp: "2023-11-03T19:49:04Z" - generation: 9703 - name: example-dpa-1 - namespace: openshift-adp-operator - ownerReferences: - - apiVersion: oadp.openshift.io/v1alpha1 - blockOwnerDeletion: true - controller: true - kind: DataProtectionApplication - name: example-dpa - uid: 0beeeaff-0287-4f32-bcb1-2e3c921b6e82 - resourceVersion: "24273698" - uid: ba37cd15-cf17-4f7d-bf03-8af8655cea83 - spec: - config: - enableSharedConfig: "true" - region: us-west-2 - credential: - key: credentials - name: cloud-credentials - default: true - objectStorage: - bucket: example-oadp-operator - prefix: example - provider: aws - status: - lastValidationTime: "2023-11-10T22:06:46Z" - message: "BackupStorageLocation \"example-dpa-1\" is unavailable: rpc - error: code = Unknown desc = WebIdentityErr: failed to retrieve credentials\ncaused - by: AccessDenied: Not authorized to perform sts:AssumeRoleWithWebIdentity\n\tstatus - code: 403, request id: d3f2e099-70a0-467b-997e-ff62345e3b54" - phase: Unavailable -kind: List -metadata: - resourceVersion: "" ----- \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/oadp-performance/_attributes b/backup_and_restore/application_backup_and_restore/oadp-performance/_attributes deleted file mode 120000 index bf7c2529fdb4..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-performance/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../../_attributes/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/oadp-performance/images b/backup_and_restore/application_backup_and_restore/oadp-performance/images deleted file mode 120000 index 4399cbb3c0f3..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-performance/images +++ /dev/null @@ -1 +0,0 @@ -../../../images/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/oadp-performance/modules b/backup_and_restore/application_backup_and_restore/oadp-performance/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-performance/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/oadp-performance/oadp-recommended-network-settings.adoc b/backup_and_restore/application_backup_and_restore/oadp-performance/oadp-recommended-network-settings.adoc deleted file mode 100644 index 63c891c78044..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-performance/oadp-recommended-network-settings.adoc +++ /dev/null @@ -1,25 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="oadp-recommended-network-settings"] -= {oadp-short} recommended network settings -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] - -:context: oadp-recommended-network-settings - -toc::[] - -For a supported experience with {oadp-first}, you should have a stable and resilient network across {OCP-short} nodes, S3 storage, and in supported cloud environments that meet {OCP-short} network requirement recommendations. - -To ensure successful backup and restore operations for deployments with remote S3 buckets located off-cluster with suboptimal data paths, it is recommended that your network settings meet the following minimum requirements in such less optimal conditions: - -* Bandwidth (network upload speed to object storage): Greater than 2 Mbps for small backups and 10-100 Mbps depending on the data volume for larger backups. -* Packet loss: 1% -* Packet corruption: 1% -* Latency: 100ms - -Ensure that your {product-title} network performs optimally and meets {product-title} network requirements. - -[IMPORTANT] -==== -Although Red Hat provides supports for standard backup and restore failures, it does not provide support for failures caused by network settings that do not meet the recommended thresholds. -==== diff --git a/backup_and_restore/application_backup_and_restore/oadp-performance/snippets b/backup_and_restore/application_backup_and_restore/oadp-performance/snippets deleted file mode 120000 index ce62fd7c41e2..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-performance/snippets +++ /dev/null @@ -1 +0,0 @@ -../../../snippets/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/oadp-rosa/_attributes b/backup_and_restore/application_backup_and_restore/oadp-rosa/_attributes deleted file mode 120000 index bf7c2529fdb4..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-rosa/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../../_attributes/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/oadp-rosa/images b/backup_and_restore/application_backup_and_restore/oadp-rosa/images deleted file mode 120000 index 4399cbb3c0f3..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-rosa/images +++ /dev/null @@ -1 +0,0 @@ -../../../images/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/oadp-rosa/modules b/backup_and_restore/application_backup_and_restore/oadp-rosa/modules deleted file mode 120000 index 5be29a99c161..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-rosa/modules +++ /dev/null @@ -1 +0,0 @@ -../../../modules \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/oadp-rosa/oadp-rosa-backing-up-applications.adoc b/backup_and_restore/application_backup_and_restore/oadp-rosa/oadp-rosa-backing-up-applications.adoc deleted file mode 100644 index 16d46bc603f1..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-rosa/oadp-rosa-backing-up-applications.adoc +++ /dev/null @@ -1,57 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="oadp-rosa-backing-up-applications"] -ifndef::openshift-rosa,openshift-rosa-hcp[] -= Backing up applications on ROSA clusters using OADP -endif::openshift-rosa,openshift-rosa-hcp[] -ifdef::openshift-rosa,openshift-rosa-hcp[] -= Installing OADP -endif::openshift-rosa,openshift-rosa-hcp[] -include::_attributes/common-attributes.adoc[] -:context: oadp-rosa-backing-up-applications - -toc::[] - -You can use {oadp-first} with {product-rosa} (ROSA) clusters to back up and restore application data. - -ifndef::openshift-rosa,openshift-rosa-hcp[] -ROSA is a fully-managed, turnkey application platform that allows you to deliver value to your customers by building and deploying applications. - -ROSA provides seamless integration with a wide range of {aws-first} compute, database, analytics, machine learning, networking, mobile, and other services to speed up the building and delivery of differentiating experiences to your customers. - -You can subscribe to the service directly from your {aws-short} account. - -After you create your clusters, you can operate your clusters with the {product-title} web console or through link:https://docs.openshift.com/dedicated/ocm/ocm-overview.html[{cluster-manager-first}]. You can also use ROSA with OpenShift APIs and command-line interface (CLI) tools. - -For additional information about ROSA installation, see link:https://www.redhat.com/en/products/interactive-walkthrough/install-rosa[Installing Red Hat OpenShift Service on AWS (ROSA) interactive walkthrough]. -endif::openshift-rosa,openshift-rosa-hcp[] - -Before installing {oadp-first}, you must set up role and policy credentials for OADP so that it can use the {aws-full} API. - -This process is performed in the following two stages: - -. Prepare {aws-short} credentials -. Install the OADP Operator and give it an IAM role - -include::modules/preparing-aws-credentials-for-oadp.adoc[leveloffset=+1] - -include::modules/installing-oadp-rosa-sts.adoc[leveloffset=+1] - -include::modules/updating-role-arn-oadp-rosa-sts.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -// This xref points to a topic that is not published in the ROSA docs. -ifndef::openshift-rosa,openshift-rosa-hcp[] -* xref:../../../operators/user/olm-installing-operators-in-namespace.adoc#olm-installing-from-operatorhub-using-web-console_olm-installing-operators-in-namespace[Installing from OperatorHub using the web console]. -endif::openshift-rosa,openshift-rosa-hcp[] -* xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#backing-up-applications[Backing up applications] - -// For ROSA and ROSA HCP, this section is in oadp-use-cases/oadp-rosa-backup-restore.adoc -ifndef::openshift-rosa,openshift-rosa-hcp[] -[id="oadp-rosa-backing-up-and-cleaning"] -== Example: Backing up workload on OADP ROSA STS, with an optional cleanup - -include::modules/performing-a-backup-oadp-rosa-sts.adoc[leveloffset=+2] - -include::modules/cleanup-a-backup-oadp-rosa-sts.adoc[leveloffset=+2] -endif::openshift-rosa,openshift-rosa-hcp[] diff --git a/backup_and_restore/application_backup_and_restore/oadp-rosa/snippets b/backup_and_restore/application_backup_and_restore/oadp-rosa/snippets deleted file mode 120000 index ce62fd7c41e2..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-rosa/snippets +++ /dev/null @@ -1 +0,0 @@ -../../../snippets/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/oadp-timeouts.adoc b/backup_and_restore/application_backup_and_restore/oadp-timeouts.adoc deleted file mode 100644 index 97cfd3f899ab..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-timeouts.adoc +++ /dev/null @@ -1,36 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="oadp-timeouts"] -= OADP timeouts -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: oadp-timeouts -:namespace: openshift-adp -:local-product: OADP - -toc::[] - -Extending a timeout allows complex or resource-intensive processes to complete successfully without premature termination. This configuration can reduce errors, retries, or failures. - -Ensure that you balance timeout extensions in a logical manner so that you do not configure excessively long timeouts that might hide underlying issues in the process. Consider and monitor an appropriate timeout value that meets the needs of the process and the overall system performance. - -The following OADP timeouts show instructions of how and when to implement these parameters: - -* xref:../../backup_and_restore/application_backup_and_restore/oadp-timeouts.adoc#restic-timeout_oadp-timeouts[Restic timeout] - -* xref:../../backup_and_restore/application_backup_and_restore/oadp-timeouts.adoc#velero-timeout_oadp-timeouts[Velero resource timeout] - -* xref:../../backup_and_restore/application_backup_and_restore/oadp-timeouts.adoc#datamover-timeout_oadp-timeouts[Data Mover timeout] - -* xref:../../backup_and_restore/application_backup_and_restore/oadp-timeouts.adoc#csisnapshot-timeout_oadp-timeouts[CSI snapshot timeout] - -* xref:../../backup_and_restore/application_backup_and_restore/oadp-timeouts.adoc#item-operation-timeout-backup_oadp-timeouts[Item operation timeout - backup] - -* xref:../../backup_and_restore/application_backup_and_restore/oadp-timeouts.adoc#item-operation-timeout-restore_oadp-timeouts[Item operation timeout - restore] - -include::modules/oadp-restic-timeouts.adoc[leveloffset=+1] -include::modules/oadp-velero-timeouts.adoc[leveloffset=+1] -include::modules/oadp-velero-default-timeouts.adoc[leveloffset=+2] -include::modules/oadp-datamover-timeouts.adoc[leveloffset=+1] -include::modules/oadp-csi-snapshot-timeouts.adoc[leveloffset=+1] -include::modules/oadp-item-restore-timeouts.adoc[leveloffset=+1] -include::modules/oadp-item-backup-timeouts.adoc[leveloffset=+1] \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/oadp-use-cases/_attributes b/backup_and_restore/application_backup_and_restore/oadp-use-cases/_attributes deleted file mode 120000 index bf7c2529fdb4..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-use-cases/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../../_attributes/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/oadp-use-cases/modules b/backup_and_restore/application_backup_and_restore/oadp-use-cases/modules deleted file mode 120000 index 5be29a99c161..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-use-cases/modules +++ /dev/null @@ -1 +0,0 @@ -../../../modules \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/oadp-use-cases/oadp-rosa-backup-restore.adoc b/backup_and_restore/application_backup_and_restore/oadp-use-cases/oadp-rosa-backup-restore.adoc deleted file mode 100644 index ac995b07fa7c..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-use-cases/oadp-rosa-backup-restore.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="oadp-rosa-backing-up-and-cleaning-example"] -= Backing up workloads on OADP with ROSA STS -include::_attributes/common-attributes.adoc[] -:context: oadp-rosa-backing-up-and-cleaning-example - -toc::[] - -include::modules/performing-a-backup-oadp-rosa-sts.adoc[leveloffset=+1] - -include::modules/cleanup-a-backup-oadp-rosa-sts.adoc[leveloffset=+1] diff --git a/backup_and_restore/application_backup_and_restore/oadp-use-cases/oadp-usecase-backup-using-odf.adoc b/backup_and_restore/application_backup_and_restore/oadp-use-cases/oadp-usecase-backup-using-odf.adoc deleted file mode 100644 index d3b2823f470b..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-use-cases/oadp-usecase-backup-using-odf.adoc +++ /dev/null @@ -1,12 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="oadp-use-cases-backup"] -= Backup using {oadp-full} and {odf-first} -include::_attributes/common-attributes.adoc[] -:context: oadp-use-cases-backup - -toc::[] - -Following is a use case for using {oadp-short} and {odf-short} to back up an application. - -//backup -include::modules/oadp-usecase-backup-odf.adoc[leveloffset=+1] diff --git a/backup_and_restore/application_backup_and_restore/oadp-use-cases/oadp-usecase-enable-ca-cert.adoc b/backup_and_restore/application_backup_and_restore/oadp-use-cases/oadp-usecase-enable-ca-cert.adoc deleted file mode 100644 index 4c9a4a55403c..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-use-cases/oadp-usecase-enable-ca-cert.adoc +++ /dev/null @@ -1,12 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="oadp-use-case-enable-ca-cert"] -= Including a self-signed CA certificate during backup -include::_attributes/common-attributes.adoc[] -:context: oadp-use-case-enable-ca-cert - -toc::[] - -You can include a self-signed Certificate Authority (CA) certificate in the Data Protection Application (DPA) and then back up an application. You store the backup in a NooBaa bucket provided by {odf-first}. - -// include a CA cert and run a backup -include::modules/oadp-usecase-include-ca-cert-backup.adoc[leveloffset=+1] diff --git a/backup_and_restore/application_backup_and_restore/oadp-use-cases/oadp-usecase-legacy-aws-plugin.adoc b/backup_and_restore/application_backup_and_restore/oadp-use-cases/oadp-usecase-legacy-aws-plugin.adoc deleted file mode 100644 index 1a89916742d6..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-use-cases/oadp-usecase-legacy-aws-plugin.adoc +++ /dev/null @@ -1,12 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="oadp-using-legacy-aws-plugin"] -= Using the legacy-aws Velero plugin -include::_attributes/common-attributes.adoc[] -:context: oadp-using-legacy-aws-plugin - -toc::[] - -If you are using an {aws-short} S3-compatible backup storage location, you might get a `SignatureDoesNotMatch` error while backing up your application. This error occurs because some backup storage locations still use the older versions of the S3 APIs, which are incompatible with the newer AWS SDK for Go V2. To resolve this issue, you can use the `legacy-aws` Velero plugin in the `DataProtectionApplication` custom resource (CR). The `legacy-aws` Velero plugin uses the older AWS SDK for Go V1, which is compatible with the legacy S3 APIs, ensuring successful backups. - -// using legacy-aws velero plugin -include::modules/oadp-using-legacy-aws-plugin.adoc[leveloffset=+1] diff --git a/backup_and_restore/application_backup_and_restore/oadp-use-cases/oadp-usecase-restore-different-namespace.adoc b/backup_and_restore/application_backup_and_restore/oadp-use-cases/oadp-usecase-restore-different-namespace.adoc deleted file mode 100644 index 8c6c682c10f9..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-use-cases/oadp-usecase-restore-different-namespace.adoc +++ /dev/null @@ -1,12 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="oadp-use-cases-restore"] -= {oadp-first} restore use case -include::_attributes/common-attributes.adoc[] -:context: oadp-use-cases-restore - -toc::[] - -Following is a use case for using {oadp-short} to restore a backup to a different namespace. - -//restore -include::modules/oadp-usecase-restore.adoc[leveloffset=+1] \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/pods-crash-or-restart-due-to-lack-of-memory-or-cpu.adoc b/backup_and_restore/application_backup_and_restore/pods-crash-or-restart-due-to-lack-of-memory-or-cpu.adoc deleted file mode 100644 index 51150df2a4d1..000000000000 --- a/backup_and_restore/application_backup_and_restore/pods-crash-or-restart-due-to-lack-of-memory-or-cpu.adoc +++ /dev/null @@ -1,31 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="pods-crash-or-restart-due-to-lack-of-memory-or-cpu"] -= Pods crash or restart due to lack of memory or CPU -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: pods-crash-or-restart-due-to-lack-of-memory-or-cpu -:namespace: openshift-adp -:local-product: OADP -:must-gather-v1-3: registry.redhat.io/oadp/oadp-mustgather-rhel9:v1.3 -:must-gather-v1-4: registry.redhat.io/oadp/oadp-mustgather-rhel9:v1.4 - -toc::[] - -If a Velero or Restic pod crashes due to a lack of memory or CPU, you can set specific resource requests for either of those resources. - -The values for the resource request fields must follow the same format as Kubernetes resource requirements. -If you do not specify `configuration.velero.podConfig.resourceAllocations` or `configuration.restic.podConfig.resourceAllocations`, see the following default `resources` specification configuration for a Velero or Restic pod: - -[source,yaml] ----- -requests: - cpu: 500m - memory: 128Mi ----- - -[role="_additional-resources"] -.Additional resources -* xref:../../backup_and_restore/application_backup_and_restore/installing/about-installing-oadp.adoc#oadp-velero-cpu-memory-requirements_about-installing-oadp[Velero CPU and memory requirements based on collected data] - -include::modules/oadp-pod-crash-set-resource-request-velero.adoc[leveloffset=+1] -include::modules/oadp-pod-crash-set-resource-request-restic.adoc[leveloffset=+1] \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/release-notes/_attributes b/backup_and_restore/application_backup_and_restore/release-notes/_attributes deleted file mode 120000 index bf7c2529fdb4..000000000000 --- a/backup_and_restore/application_backup_and_restore/release-notes/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../../_attributes/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/release-notes/images b/backup_and_restore/application_backup_and_restore/release-notes/images deleted file mode 120000 index 4399cbb3c0f3..000000000000 --- a/backup_and_restore/application_backup_and_restore/release-notes/images +++ /dev/null @@ -1 +0,0 @@ -../../../images/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/release-notes/modules b/backup_and_restore/application_backup_and_restore/release-notes/modules deleted file mode 120000 index 5be29a99c161..000000000000 --- a/backup_and_restore/application_backup_and_restore/release-notes/modules +++ /dev/null @@ -1 +0,0 @@ -../../../modules \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/release-notes/oadp-1-4-release-notes.adoc b/backup_and_restore/application_backup_and_restore/release-notes/oadp-1-4-release-notes.adoc deleted file mode 100644 index 9cc9f2c75379..000000000000 --- a/backup_and_restore/application_backup_and_restore/release-notes/oadp-1-4-release-notes.adoc +++ /dev/null @@ -1,42 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="oadp-1-4-release-notes"] -= OADP 1.4 release notes -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: oadp-1-4-release-notes - -toc::[] - -The release notes for {oadp-first} describe new features and enhancements, deprecated features, product recommendations, known issues, and resolved issues. - -[NOTE] -==== -For additional information about {oadp-short}, see link:https://access.redhat.com/articles/5456281[{oadp-first} FAQs] -==== - -include::modules/oadp-1-4-4-release-notes.adoc[leveloffset=+1] -include::modules/oadp-1-4-3-release-notes.adoc[leveloffset=+1] -include::modules/oadp-1-4-2-release-notes.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-deleting-backups.adoc#oadp-about-kopia-repo-maintenance_deleting-backups[About Kopia repository maintenance] - -include::modules/oadp-1-4-1-release-notes.adoc[leveloffset=+1] -include::modules/oadp-1-4-0-release-notes.adoc[leveloffset=+1] -include::modules/oadp-backing-up-dpa-configuration-1-4-0.adoc[leveloffset=+3] -include::modules/oadp-upgrading-oadp-operator-1-4-0.adoc[leveloffset=+3] - -// TODO: Include this xref when the Operators book is added to the ROSA HCP docs. -ifndef::openshift-rosa-hcp[] -[role="_additional-resources"] -.Additional resources -* xref:../../../operators/admin/olm-upgrading-operators.adoc#olm-changing-update-channel_olm-upgrading-operators[Updating installed Operators] -endif::openshift-rosa-hcp[] - -[id="oadp-converting-dpa-to-new-version-1-4-0_{context}"] -=== Converting DPA to the new version - -To upgrade from OADP 1.3 to 1.4, no Data Protection Application (DPA) changes are required. - -include::modules/oadp-verifying-upgrade-1-4-0.adoc[leveloffset=+2] diff --git a/backup_and_restore/application_backup_and_restore/release-notes/snippets b/backup_and_restore/application_backup_and_restore/release-notes/snippets deleted file mode 120000 index ce62fd7c41e2..000000000000 --- a/backup_and_restore/application_backup_and_restore/release-notes/snippets +++ /dev/null @@ -1 +0,0 @@ -../../../snippets/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/restic-issues.adoc b/backup_and_restore/application_backup_and_restore/restic-issues.adoc deleted file mode 100644 index d0f17e9be6d3..000000000000 --- a/backup_and_restore/application_backup_and_restore/restic-issues.adoc +++ /dev/null @@ -1,90 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="restic-issues"] -= Restic issues -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: restic-issues -:namespace: openshift-adp -:local-product: OADP - -toc::[] - -You might encounter these issues when you back up applications with Restic. - -[id="restic-permission-error-nfs-root-squash-enabled_{context}"] -== Restic permission error for NFS data volumes with root_squash enabled - -The `Restic` pod log displays the following error message: `controller=pod-volume-backup error="fork/exec/usr/bin/restic: permission denied"`. - -.Cause - -If your NFS data volumes have `root_squash` enabled, `Restic` maps to `nfsnobody` and does not have permission to create backups. - -.Solution - -You can resolve this issue by creating a supplemental group for `Restic` and adding the group ID to the `DataProtectionApplication` manifest: - -. Create a supplemental group for `Restic` on the NFS data volume. -. Set the `setgid` bit on the NFS directories so that group ownership is inherited. -. Add the `spec.configuration.nodeAgent.supplementalGroups` parameter and the group ID to the `DataProtectionApplication` manifest, as shown in the following example: -+ -[source,yaml] ----- -apiVersion: oadp.openshift.io/v1alpha1 -kind: DataProtectionApplication -# ... -spec: - configuration: - nodeAgent: - enable: true - uploaderType: restic - supplementalGroups: - - <1> -# ... ----- -<1> Specify the supplemental group ID. - -. Wait for the `Restic` pods to restart so that the changes are applied. - -[id="restic-backup-cannot-be-recreated-after-s3-bucket-emptied_{context}"] -== Restic Backup CR cannot be recreated after bucket is emptied - -If you create a Restic `Backup` CR for a namespace, empty the object storage bucket, and then recreate the `Backup` CR for the same namespace, the recreated `Backup` CR fails. - -The `velero` pod log displays the following error message: `stderr=Fatal: unable to open config file: Stat: The specified key does not exist.\nIs there a repository at the following location?`. - -.Cause - -Velero does not recreate or update the Restic repository from the `ResticRepository` manifest if the Restic directories are deleted from object storage. See link:https://github.com/vmware-tanzu/velero/issues/4421[Velero issue 4421] for more information. - -.Solution - -* Remove the related Restic repository from the namespace by running the following command: -+ -[source,terminal] ----- -$ oc delete resticrepository openshift-adp ----- -+ - -In the following error log, `mysql-persistent` is the problematic Restic repository. The name of the repository appears in italics for clarity. -+ -[source,text,options="nowrap",subs="+quotes,verbatim"] ----- - time="2021-12-29T18:29:14Z" level=info msg="1 errors - encountered backup up item" backup=velero/backup65 - logSource="pkg/backup/backup.go:431" name=mysql-7d99fc949-qbkds - time="2021-12-29T18:29:14Z" level=error msg="Error backing up item" - backup=velero/backup65 error="pod volume backup failed: error running - restic backup, stderr=Fatal: unable to open config file: Stat: The - specified key does not exist.\nIs there a repository at the following - location?\ns3:http://minio-minio.apps.mayap-oadp- - veleo-1234.qe.devcluster.openshift.com/mayapvelerooadp2/velero1/ - restic/_mysql-persistent_\n: exit status 1" error.file="/remote-source/ - src/github.com/vmware-tanzu/velero/pkg/restic/backupper.go:184" - error.function="github.com/vmware-tanzu/velero/ - pkg/restic.(*backupper).BackupPodVolumes" - logSource="pkg/backup/backup.go:435" name=mysql-7d99fc949-qbkds ----- - -include::modules/oadp-restic-restore-failing-psa-policy.adoc[leveloffset=+1] \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/snippets b/backup_and_restore/application_backup_and_restore/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/backup_and_restore/application_backup_and_restore/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/troubleshooting.adoc b/backup_and_restore/application_backup_and_restore/troubleshooting.adoc deleted file mode 100644 index 1e42bfee3efb..000000000000 --- a/backup_and_restore/application_backup_and_restore/troubleshooting.adoc +++ /dev/null @@ -1,32 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="troubleshooting"] -= Troubleshooting -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: oadp-troubleshooting -:oadp-troubleshooting: -:namespace: openshift-adp -:local-product: OADP -:must-gather-v1-3: registry.redhat.io/oadp/oadp-mustgather-rhel9:v1.3 -:must-gather-v1-4: registry.redhat.io/oadp/oadp-mustgather-rhel9:v1.4 - -toc::[] - -You can troubleshoot OADP issues by using the following methods: - -* Debug Velero custom resources (CRs) by using the xref:../../backup_and_restore/application_backup_and_restore/velero-cli-tool.adoc#oadp-debugging-oc-cli_velero-cli-tool[OpenShift CLI tool] or the xref:../../backup_and_restore/application_backup_and_restore/velero-cli-tool.adoc#migration-debugging-velero-resources_velero-cli-tool[Velero CLI tool]. The Velero CLI tool provides more detailed logs and information. - -* Debug Velero or Restic pod crashes, which are caused due to a lack of memory or CPU by using xref:../../backup_and_restore/application_backup_and_restore/pods-crash-or-restart-due-to-lack-of-memory-or-cpu.adoc#pods-crash-or-restart-due-to-lack-of-memory-or-cpu[Pods crash or restart due to lack of memory or CPU]. - -* Debug issues with Velero and admission webhooks by using xref:../../backup_and_restore/application_backup_and_restore/issues-with-velero-and-admission-webhooks.adoc#issues-with-velero-and-admission-webhooks[Issues with Velero and admission webhooks]. - -* Check xref:../../backup_and_restore/application_backup_and_restore/oadp-installation-issues.adoc#oadp-installation-issues[OADP installation issues], xref:../../backup_and_restore/application_backup_and_restore/oadp-operator-issues.adoc#oadp-operator-issues[OADP Operator issues], xref:../../backup_and_restore/application_backup_and_restore/backup-and-restore-cr-issues.adoc#backup-and-restore-cr-issues[backup and restore CR issues], and xref:../../backup_and_restore/application_backup_and_restore/restic-issues.adoc#restic-issues[Restic issues]. - -* Use the available xref:../../backup_and_restore/application_backup_and_restore/oadp-timeouts.adoc#oadp-timeouts[OADP timeouts] to reduce errors, retries, or failures. - -* Collect logs and CR information by using the xref:../../backup_and_restore/application_backup_and_restore/using-the-must-gather-tool.adoc#using-the-must-gather-tool[`must-gather` tool]. - -* Monitor and analyze the workload performance with the help of xref:../../backup_and_restore/application_backup_and_restore/oadp-monitoring.adoc#oadp-monitoring[OADP monitoring]. - - -:oadp-troubleshooting!: diff --git a/backup_and_restore/application_backup_and_restore/using-the-must-gather-tool.adoc b/backup_and_restore/application_backup_and_restore/using-the-must-gather-tool.adoc deleted file mode 100644 index 461ea577994f..000000000000 --- a/backup_and_restore/application_backup_and_restore/using-the-must-gather-tool.adoc +++ /dev/null @@ -1,76 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="using-the-must-gather-tool"] -= Using the must-gather tool -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: using-the-must-gather-tool -:namespace: openshift-adp -:local-product: OADP -:must-gather-v1-3: registry.redhat.io/oadp/oadp-mustgather-rhel9:v1.3 -:must-gather-v1-4: registry.redhat.io/oadp/oadp-mustgather-rhel9:v1.4 - -toc::[] - -You can collect logs, metrics, and information about {local-product} custom resources by using the `must-gather` tool. The `must-gather` data must be attached to all customer cases. - -You can run the `must-gather` tool with the following data collection options: - -* Full `must-gather` data collection collects Prometheus metrics, pod logs, and Velero CR information for all namespaces where the OADP Operator is installed. -* Essential `must-gather` data collection collects pod logs and Velero CR information for a specific duration of time, for example, one hour or 24 hours. Prometheus metrics and duplicate logs are not included. -* `must-gather` data collection with timeout. Data collection can take a long time if there are many failed `Backup` CRs. You can improve performance by setting a timeout value. -* Prometheus metrics data dump downloads an archive file containing the metrics data collected by Prometheus. - - -.Prerequisites - -* You have logged in to the {product-title} cluster as a user with the `cluster-admin` role. -* You have installed the OpenShift CLI (`oc`). -* You must use {op-system-base-full} {op-system-version-9} with {oadp-short} 1.4. - -.Procedure - -. Navigate to the directory where you want to store the `must-gather` data. -. Run the `oc adm must-gather` command for one of the following data collection options: - -* For full `must-gather` data collection, including Prometheus metrics, run the following command: -+ -[source,terminal,subs="attributes+"] ----- -$ oc adm must-gather --image={must-gather-v1-4} ----- -+ -The data is saved as `must-gather/must-gather.tar.gz`. You can upload this file to a support case on the link:https://access.redhat.com/[Red{nbsp}Hat Customer Portal]. -+ -For essential `must-gather` data collection, without Prometheus metrics, for a specific time duration, run the following command: -+ -[source,terminal,subs="attributes+"] ----- -$ oc adm must-gather --image={must-gather-v1-4} \ - -- /usr/bin/gather_