From 0d77b5ed453f3f04db6d6621e794514e9b837a39 Mon Sep 17 00:00:00 2001 From: Andrea Hoffer Date: Wed, 1 Oct 2025 10:33:43 -0400 Subject: [PATCH] OSDOCS#16255: Removing unused modules from main+back --- ...s-image-change-trigger-identification.adoc | 80 -------- .../builds-source-input-subman-config.adoc | 44 ---- ...ds-strategy-docker-force-pull-example.adoc | 15 -- .../builds-strategy-force-pull-procedure.adoc | 12 -- ...uilds-strategy-s2i-force-pull-example.adoc | 19 -- ...ring-hpa-based-on-application-metrics.adoc | 48 ----- ...amic-provisioning-ceph-rbd-definition.adoc | 42 ---- ...namic-provisioning-gluster-definition.adoc | 96 --------- modules/enabling-multi-cluster-console.adoc | 56 ----- modules/enabling-plug-in-browser.adoc | 24 --- .../hcp-requirements-platform-version.adoc | 38 ---- modules/images-create-metadata-define.adoc | 82 -------- modules/ldap-failover-configure-apache.adoc | 192 ------------------ .../ldap-failover-configure-openshift.adoc | 55 ----- modules/ldap-failover-configure-sssd.adoc | 126 ------------ modules/ldap-failover-generate-certs.adoc | 66 ------ modules/ldap-failover-overview.adoc | 26 --- modules/ldap-failover-prereqs.adoc | 29 --- modules/lvms-adding-a-storage-class.adoc | 55 ----- modules/multi-cluster-about.adoc | 9 - ...nodes-cluster-overcommit-buffer-chunk.adoc | 69 ------- ...odes-cluster-resource-override-deploy.adoc | 8 - ...des-cluster-timeout-graceful-shutdown.adoc | 25 --- ...s-nodes-configuring-graceful-shutdown.adoc | 150 -------------- modules/nodes-pods-secrets-creating-tls.adoc | 51 ----- ...odes-scheduler-node-names-configuring.adoc | 60 ------ .../nodes-scheduler-node-projects-about.adoc | 33 --- ...s-scheduler-node-projects-configuring.adoc | 44 ---- ...-scheduler-node-selectors-configuring.adoc | 74 ------- modules/olmv1-creating-fbc.adoc | 66 ------ modules/olmv1-publishing-fbc.adoc | 25 --- ...orage-csi-ebs-operator-install-driver.adoc | 38 ---- ...tent-storage-csi-ebs-operator-install.adoc | 40 ---- ...ent-storage-csi-manila-install-driver.adoc | 108 ---------- ...t-storage-csi-manila-install-operator.adoc | 33 --- ...storage-csi-manila-uninstall-operator.adoc | 29 --- ...nt-storage-csi-migration-automatic-ga.adoc | 23 --- ...rsistent-storage-csi-migration-enable.adoc | 74 ------- ...-csi-migration-overview-support-level.adoc | 36 ---- ...sistent-storage-csi-migration-vsphere.adoc | 51 ----- ...tent-storage-csi-olm-driver-uninstall.adoc | 21 -- modules/persistent-storage-csi-tp-enable.adoc | 134 ------------ .../persistent-storage-manila-install.adoc | 151 -------------- modules/persistent-storage-manila-usage.adoc | 35 ---- .../persistent-storage-vsphere-backup.adoc | 25 --- modules/prometheus-operator.adoc | 21 -- ...ice-accounts-configuration-parameters.adoc | 52 ----- ...vice-accounts-enabling-authentication.adoc | 36 ---- ...-using-credentials-inside-a-container.adoc | 41 ---- modules/service-ca-certificates.adoc | 81 -------- ...-persistent-storage-efs-authorization.adoc | 114 ----------- ...rage-persistent-storage-efs-configmap.adoc | 42 ---- ...ge-persistent-storage-efs-provisioner.adoc | 85 -------- .../storage-persistent-storage-efs-pvc.adoc | 77 ------- ...-persistent-storage-efs-storage-class.adoc | 45 ---- modules/upgrade-49-acknowledgement.adoc | 15 -- 56 files changed, 3126 deletions(-) delete mode 100644 modules/builds-image-change-trigger-identification.adoc delete mode 100644 modules/builds-source-input-subman-config.adoc delete mode 100644 modules/builds-strategy-docker-force-pull-example.adoc delete mode 100644 modules/builds-strategy-force-pull-procedure.adoc delete mode 100644 modules/builds-strategy-s2i-force-pull-example.adoc delete mode 100644 modules/configuring-hpa-based-on-application-metrics.adoc delete mode 100644 modules/dynamic-provisioning-ceph-rbd-definition.adoc delete mode 100644 modules/dynamic-provisioning-gluster-definition.adoc delete mode 100644 modules/enabling-multi-cluster-console.adoc delete mode 100644 modules/enabling-plug-in-browser.adoc delete mode 100644 modules/hcp-requirements-platform-version.adoc delete mode 100644 modules/images-create-metadata-define.adoc delete mode 100644 modules/ldap-failover-configure-apache.adoc delete mode 100644 modules/ldap-failover-configure-openshift.adoc delete mode 100644 modules/ldap-failover-configure-sssd.adoc delete mode 100644 modules/ldap-failover-generate-certs.adoc delete mode 100644 modules/ldap-failover-overview.adoc delete mode 100644 modules/ldap-failover-prereqs.adoc delete mode 100644 modules/lvms-adding-a-storage-class.adoc delete mode 100644 modules/multi-cluster-about.adoc delete mode 100644 modules/nodes-cluster-overcommit-buffer-chunk.adoc delete mode 100644 modules/nodes-cluster-resource-override-deploy.adoc delete mode 100644 modules/nodes-nodes-cluster-timeout-graceful-shutdown.adoc delete mode 100644 modules/nodes-nodes-configuring-graceful-shutdown.adoc delete mode 100644 modules/nodes-pods-secrets-creating-tls.adoc delete mode 100644 modules/nodes-scheduler-node-names-configuring.adoc delete mode 100644 modules/nodes-scheduler-node-projects-about.adoc delete mode 100644 modules/nodes-scheduler-node-projects-configuring.adoc delete mode 100644 modules/nodes-scheduler-node-selectors-configuring.adoc delete mode 100644 modules/olmv1-creating-fbc.adoc delete mode 100644 modules/olmv1-publishing-fbc.adoc delete mode 100644 modules/persistent-storage-csi-ebs-operator-install-driver.adoc delete mode 100644 modules/persistent-storage-csi-ebs-operator-install.adoc delete mode 100644 modules/persistent-storage-csi-manila-install-driver.adoc delete mode 100644 modules/persistent-storage-csi-manila-install-operator.adoc delete mode 100644 modules/persistent-storage-csi-manila-uninstall-operator.adoc delete mode 100644 modules/persistent-storage-csi-migration-automatic-ga.adoc delete mode 100644 modules/persistent-storage-csi-migration-enable.adoc delete mode 100644 modules/persistent-storage-csi-migration-overview-support-level.adoc delete mode 100644 modules/persistent-storage-csi-migration-vsphere.adoc delete mode 100644 modules/persistent-storage-csi-olm-driver-uninstall.adoc delete mode 100644 modules/persistent-storage-csi-tp-enable.adoc delete mode 100644 modules/persistent-storage-manila-install.adoc delete mode 100644 modules/persistent-storage-manila-usage.adoc delete mode 100644 modules/persistent-storage-vsphere-backup.adoc delete mode 100644 modules/prometheus-operator.adoc delete mode 100644 modules/service-accounts-configuration-parameters.adoc delete mode 100644 modules/service-accounts-enabling-authentication.adoc delete mode 100644 modules/service-accounts-using-credentials-inside-a-container.adoc delete mode 100644 modules/service-ca-certificates.adoc delete mode 100644 modules/storage-persistent-storage-efs-authorization.adoc delete mode 100644 modules/storage-persistent-storage-efs-configmap.adoc delete mode 100644 modules/storage-persistent-storage-efs-provisioner.adoc delete mode 100644 modules/storage-persistent-storage-efs-pvc.adoc delete mode 100644 modules/storage-persistent-storage-efs-storage-class.adoc delete mode 100644 modules/upgrade-49-acknowledgement.adoc diff --git a/modules/builds-image-change-trigger-identification.adoc b/modules/builds-image-change-trigger-identification.adoc deleted file mode 100644 index b0fd3f449be2..000000000000 --- a/modules/builds-image-change-trigger-identification.adoc +++ /dev/null @@ -1,80 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/triggering-builds-build-hooks.adoc - -[id="builds-image-change-trigger-identification_{context}"] -= Image change trigger identification - -As a developer, if you have configured image change triggers, you can identify which image change initiated the last build. - -To accomplish this, you must identify elements in your build configuration's specification and status that are related to image change triggers. - -This way, you can use the timestamp in `buildConfig.status.imageChangeTriggers` to identify the most recent build. Then you can use the name and namespace of the image stream that triggered this build to find the corresponding image change trigger in the `buildConfig.spec.triggers`. - - -== Image change trigger elements in the specification - -In your build configuration specification, `buildConfig.spec.triggers` is an array of build trigger policies, `BuildTriggerPolicy`. - -Each `BuildTriggerPolicy` has a `type` field and set of pointers fields, where each pointer field corresponds to one of the allowed values for the `type` field. As such, only one pointer field can be set for a given `BuildTriggerPolicy`. - -So for image change triggers, the value of `type` is `ImageChange`. - -Then, the `imageChange` field is the pointer to an `ImageChangeTrigger` object. So this will be set. It has the following fields: - -* `lastTriggeredImageID`: This field is deprecated in {product-title} 4.8, but is still being set. It will be ignored in a future release. It contains the resolved image reference for the `ImageStreamTag` when the last build was triggered from this `BuildConfig`. -* `paused`: This field is used to temporarily disable this particular image change trigger. -* `from`: This field is used to reference the `ImageStreamTag` that drives this image change trigger. Its type is the core Kubernetes type, `OwnerReference`. The `from` field has the following fields of note: - * `kind`: In this case, the only supported value is `ImageStreamTag`. - * `namespace`: The namespace where the `ImageStreamTag` lives. - * `name`: The name of the `ImageStreamTag`. - -The following example shows the relative location of the elements mentioned in the preceding list and omits unrelated elements, such as `name`, `source`, and `strategy`. - -.Example `BuildConfig.spec` -[source,yaml] ----- -kind: BuildConfig -apiVersion: build.openshift.io/v1 -spec: - triggers: - - imageChange: - from: - kind: ImageStreamTag - name: <1> - namespace: <2> - type: ImageChange ----- -<1> The name of an image stream, such as `input:latest`. -<2> A namespace, such as `my-namespace`. - -== Image change trigger elements in the status - -In your build configuration status, `buildConfig.status.imageChangeTriggers` is an array of `ImageChangeTriggerStatus` elements. Each `ImageChangeTriggerStatus` element includes the `from`, `lastTriggeredImageID`, and `lastTriggerTime` elements shown in the following example. This example omits elements that are not related to image change triggers. - -.Example `BuildConfig.status` -[source,yaml] ----- -kind: BuildConfig -apiVersion: build.openshift.io/v1 -status: - imageChangeTriggers: - - from: - name: <1> - namespace: <2> - lastTriggeredImageID: <3> - lastTriggerTime: <4> ----- -<1> The name of an image stream, such as `input:latest`. -<2> A namespace, such as `my-namespace`. -<3> The SHA or ID of the `ImageStreamTag` when a build started. Its value is updated each time a build is started, even if this `ImageStreamTag` is not the reason the build started. -<4> The last time this particular `ImageStreamTag` triggered a build to start. Its value is only updated when this trigger specifically started a Build. - -== Identification of image change triggers - -The `ImageChangeTriggerStatus` that has the most recent `lastTriggerTime` triggered the most recent build. You can use its `name` and `namespace` to correlate it with the `ImageStreamTag` of one of the image change triggers you defined in the `buildConfig.spec.triggers`. - -[role="_additional-resources"] -.Additional resources - -* link:http://docs.docker.com/v1.7/reference/api/hub_registry_spec/#docker-registry-1-0[v1 container registries] diff --git a/modules/builds-source-input-subman-config.adoc b/modules/builds-source-input-subman-config.adoc deleted file mode 100644 index f43a5478d649..000000000000 --- a/modules/builds-source-input-subman-config.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/running-entitled-builds.adoc - -:_mod-docs-content-type: PROCEDURE -[id="builds-source-input-subman-config_{context}"] -= Adding Subscription Manager configurations to builds - -Builds that use the Subscription Manager to install content must provide appropriate configuration files and certificate authorities for subscribed repositories. - -.Prerequisites - -You must have access to the Subscription Manager's configuration and certificate authority files. - -.Procedure - -. Create a `ConfigMap` for the Subscription Manager configuration: -+ -[source,terminal] ----- -$ oc create configmap rhsm-conf --from-file /path/to/rhsm/rhsm.conf ----- - -. Create a `ConfigMap` for the certificate authority: -+ -[source,terminal] ----- -$ oc create configmap rhsm-ca --from-file /path/to/rhsm/ca/redhat-uep.pem ----- - -. Add the Subscription Manager configuration and certificate authority to the -`BuildConfig`: -+ -[source,yaml] ----- -source: - configMaps: - - configMap: - name: rhsm-conf - destinationDir: rhsm-conf - - configMap: - name: rhsm-ca - destinationDir: rhsm-ca ----- diff --git a/modules/builds-strategy-docker-force-pull-example.adoc b/modules/builds-strategy-docker-force-pull-example.adoc deleted file mode 100644 index a6d63f81a576..000000000000 --- a/modules/builds-strategy-docker-force-pull-example.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assemblies: -// * builds/build-strategies.adoc - -[id="builds-strategy-docker-force-pull-example_{context}"] -= Docker force pull flag example - -Set the following to use the `forcePull` flag with Docker: - -[source,yaml] ----- -strategy: - dockerStrategy: - forcePull: true <1> ----- -<1> This flag causes the local builder image to be ignored, and a fresh version to be pulled from the registry to which the imagestream points. Setting `forcePull` to `false` results in the default behavior of honoring the image stored locally. diff --git a/modules/builds-strategy-force-pull-procedure.adoc b/modules/builds-strategy-force-pull-procedure.adoc deleted file mode 100644 index 62fa74c2efc6..000000000000 --- a/modules/builds-strategy-force-pull-procedure.adoc +++ /dev/null @@ -1,12 +0,0 @@ -// Module included in the following assemblies: -//* builds/build-strategies.adoc - -:_mod-docs-content-type: PROCEDURE -[id="builds-strategy-force-pull-procedure_{context}"] -= Using the force pull flag - -By default, if the builder image specified in the build configuration is available locally on the node, that image will be used. However, you can use the `forcepull` flag to override the local image and refresh it from the registry. - -.Procedure - -To override the local image and refresh it from the registry to which the image stream points, create a `BuildConfig` with the `forcePull` flag set to `true`. diff --git a/modules/builds-strategy-s2i-force-pull-example.adoc b/modules/builds-strategy-s2i-force-pull-example.adoc deleted file mode 100644 index 85f659bdd1a2..000000000000 --- a/modules/builds-strategy-s2i-force-pull-example.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// * builds/build-strategies.adoc - -[id="builds-strategy-s2i-force-pull-example_{context}"] -= Source-to-Image (S2I) force pull flag example - -Set the following to use the `forcePull` flag with S2I: - -[source,yaml] ----- -strategy: - sourceStrategy: - from: - kind: "ImageStreamTag" - name: "builder-image:latest" <1> - forcePull: true <2> ----- -<1> The builder image being used, where the local version on the node may not be up to date with the version in the registry to which the imagestream points. -<2> This flag causes the local builder image to be ignored and a fresh version to be pulled from the registry to which the imagestream points. Setting `forcePull` to `false` results in the default behavior of honoring the image stored locally. diff --git a/modules/configuring-hpa-based-on-application-metrics.adoc b/modules/configuring-hpa-based-on-application-metrics.adoc deleted file mode 100644 index 29035b46d3ce..000000000000 --- a/modules/configuring-hpa-based-on-application-metrics.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/configuring-hpa-for-an-application.adoc - -:_mod-docs-content-type: PROCEDURE -[id="configuring-hpa-based-on-application-metrics_{context}"] -= Configuring HPA based on application metrics - -If you configure an application to export metrics, you can set up Horizontal Pod Autoscaling (HPA) based on these metrics. - -.Procedure - -. Create a YAML file for your configuration. In this example, it is called `deploy.yaml`. - -. Add configuration for deploying the horizontal pod autoscaler for the application. This example configures and deploys HPA based on the application `http_requests_per_second` metric for the sample application configured in the "Application monitoring" section: -+ -[source,yaml] ----- -apiVersion: autoscaling/v2 -kind: HorizontalPodAutoscaler -metadata: - name: example-app-scaler - namespace: default -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: example-app <1> - minReplicas: 3 <2> - maxReplicas: 10 <3> - metrics: - - type: Pods - pods: - metricName: http_requests_per_second <4> - targetAverageValue: 10 <5> ----- -<1> `name` specifies the application. -<2> `minReplicas` specifies the minimum number of replicas for the HPA to maintain for the application. -<3> `maxReplicas` specifies the maximum number of replicas for the HPA to maintain for the application. -<4> `metricName` specifies the metric upon which HPA is based. Here, specify the metric you previously exposed for your application. -<5> `targetAverageValue` specifies the value of the metric for the HPA to try to maintain by increasing or decreasing the number of replicas. - -. Apply the configuration file to the cluster: -+ -[source,terminal] ----- -$ oc apply -f deploy.yaml ----- diff --git a/modules/dynamic-provisioning-ceph-rbd-definition.adoc b/modules/dynamic-provisioning-ceph-rbd-definition.adoc deleted file mode 100644 index 6dbaed5ef5af..000000000000 --- a/modules/dynamic-provisioning-ceph-rbd-definition.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * storage/dynamic-provisioning.adoc - -[id="ceph-rbd-definition_{context}"] -= Ceph RBD object definition - -.ceph-storageclass.yaml -[source,yaml] ----- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: cephfs -provisioner: kubernetes.io/rbd -parameters: - monitors: 10.16.153.105:6789 <1> - adminId: admin <2> - adminSecretName: ceph-secret <3> - adminSecretNamespace: kube-system <4> - pool: kube <5> - userId: kube <6> - userSecretName: ceph-secret-user <7> - fsType: ext4 <8> - imageFormat: "2" <9> ----- -<1> (required) A comma-delimited list of Ceph monitors. -<2> Optional: Ceph client ID that is capable of creating images in the -pool. Default is `admin`. -<3> (required) Secret Name for `adminId`. The provided secret must have -type `kubernetes.io/rbd`. -<4> Optional: The namespace for `adminSecret`. Default is `default`. -<5> Optional: Ceph RBD pool. Default is `rbd`. -<6> Optional: Ceph client ID that is used to map the Ceph RBD image. -Default is the same as `adminId`. -<7> (required) The name of Ceph Secret for `userId` to map Ceph RBD image. -It must exist in the same namespace as PVCs. -<8> Optional: File system that is created on dynamically provisioned -volumes. This value is copied to the `fsType` field of dynamically -provisioned persistent volumes and the file system is created when the -volume is mounted for the first time. The default value is `ext4`. -<9> Optional: Ceph RBD image format. The default value is `2`. diff --git a/modules/dynamic-provisioning-gluster-definition.adoc b/modules/dynamic-provisioning-gluster-definition.adoc deleted file mode 100644 index 51934a58e2d3..000000000000 --- a/modules/dynamic-provisioning-gluster-definition.adoc +++ /dev/null @@ -1,96 +0,0 @@ -// Module included in the following assemblies: -// -// * storage/dynamic-provisioning.adoc - -[id="gluster-definition_{context}"] -= GlusterFS object definition - -.glusterfs-storageclass.yaml -[source,yaml] ----- -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: slow -provisioner: kubernetes.io/glusterfs -parameters: <1> - resturl: http://127.0.0.1:8081 <2> - restuser: admin <3> - secretName: heketi-secret <4> - secretNamespace: default <5> - gidMin: "40000" <6> - gidMax: "50000" <7> - volumeoptions: group metadata-cache, nl-cache on <8> - volumetype: replicate:3 <9> ----- -<1> Listed are mandatory and a few optional parameters. Refer to -link:https://access.redhat.com/documentation/en-us/red_hat_openshift_container_storage/3.10/html-single/operations_guide/#sect_file_reg_storageclass[Registering a Storage Class] for additional parameters. -<2> link:https://github.com/heketi/heketi[heketi] (volume management REST -service for Gluster) URL that provisions GlusterFS volumes on demand. The -general format should be `{http/https}://{IPaddress}:{Port}`. This is a -mandatory parameter for the GlusterFS dynamic provisioner. If the heketi -service is exposed as a routable service in the {product-title}, it will -have a resolvable fully qualified domain name (FQDN) and heketi service URL. -<3> heketi user who has access to create volumes. This is typically `admin`. -<4> Identification of a Secret that contains a user password to use when -talking to heketi. An empty password will be used -when both `secretNamespace` and `secretName` are omitted. -The provided secret must be of type `"kubernetes.io/glusterfs"`. -<5> The namespace of mentioned `secretName`. An empty password will be used -when both `secretNamespace` and `secretName` are omitted. The provided -Secret must be of type `"kubernetes.io/glusterfs"`. -<6> Optional. The minimum value of the GID range for volumes of this -StorageClass. -<7> Optional. The maximum value of the GID range for volumes of this -StorageClass. -<8> Optional. Options for newly created volumes. It allows for -performance tuning. See -link:https://docs.gluster.org/en/v3/Administrator%20Guide/Managing%20Volumes/#tuning-volume-options[Tuning Volume Options] -for more GlusterFS volume options. -<9> Optional. The -link:https://docs.gluster.org/en/v3/Quick-Start-Guide/Architecture/[type of volume] -to use. - -[NOTE] -==== -When the `gidMin` and `gidMax` values are not specified, their defaults are -2000 and 2147483647 respectively. Each dynamically provisioned volume -will be given a GID in this range (`gidMin-gidMax`). This GID is released -from the pool when the respective volume is deleted. The GID pool is -per StorageClass. -If two or more storage classes have GID ranges that overlap there may be -duplicate GIDs dispatched by the provisioner. -==== - -When heketi authentication is used, a Secret containing the admin key must -also exist. - -[source,terminal] ----- -$ oc create secret generic heketi-secret --from-literal=key= -n --type=kubernetes.io/glusterfs ----- - -This results in the following configuration: - -.heketi-secret.yaml -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: heketi-secret - namespace: namespace - ... -data: - key: cGFzc3dvcmQ= <1> -type: kubernetes.io/glusterfs ----- -<1> base64 encoded password - -[NOTE] -==== -When the PVs are dynamically provisioned, the GlusterFS plugin -automatically creates an Endpoints and a headless Service named -`gluster-dynamic-`. When the PVC is deleted, these dynamic -resources are deleted automatically. -==== diff --git a/modules/enabling-multi-cluster-console.adoc b/modules/enabling-multi-cluster-console.adoc deleted file mode 100644 index 3e10fdea486b..000000000000 --- a/modules/enabling-multi-cluster-console.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * assemblies/web-console.adoc - -:_mod-docs-content-type: PROCEDURE -[id="enable-multi-cluster-console_{context}"] -= Enabling multicluster in the web console - -:FeatureName: Multicluster console -include::snippets/technology-preview.adoc[leveloffset=+1] -// - -.Prerequisites -* Your cluster must be using the latest version of {product-title}. -* You must have link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.5/html/install/index[Red Hat Advanced Cluster Management (ACM) for Kubernetes 2.5] or the link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.5/html/multicluster_engine/index[multiculster engine (MCE) Operator] installed. -* You must have administrator privileges. - -[WARNING] -==== -Do not set this feature gate on production clusters. You will not be able to upgrade your cluster after applying the feature gate, and it cannot be undone. -==== - -.Procedure - -. Log in to the {product-title} web console using your credentials. - -. Enable ACM in the administrator perspective by navigating from *Administration* -> *Cluster Settings* -> *Configuration* -> *Console* `console.operator.openshift.io` -> *Console Plugins* and click *Enable* for `acm`. - -. A pop-up window will appear notifying you that updating the enablement of this console plugin will prompt for the console to be refreshed once it has been updated. Select `Enable` and click *Save*. - -. Repeat the previous two steps for the `mce` console plugin immediately after enabling `acm`. - -. A pop-up window that states that a web console update is available will appear a few moments after you enable. Click *Refresh the web console* in the pop-up window to update. -+ -[NOTE] -==== -You might see the pop-up window to refresh the web console twice if the second redeployment has not occurred by the time you click *Refresh the web console*. -==== - -** *local-cluster* and *All Clusters* is now visible above the perspectives in the navigation section. - -. Enable the feature gate by navigating from *Administration* -> *Cluster Settings* -> *Configuration* -> *FeatureGate*, and edit the YAML template as follows: -+ -[source,yaml] - ----- -spec: - featureSet: TechPreviewNoUpgrade ----- - -. Click *Save* to enable the multicluster console for all clusters. -+ -[IMPORTANT] -==== -After you save, this feature is enabled and cannot be undone. -==== diff --git a/modules/enabling-plug-in-browser.adoc b/modules/enabling-plug-in-browser.adoc deleted file mode 100644 index 4775147d5099..000000000000 --- a/modules/enabling-plug-in-browser.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * web_console/dynamic-plug-ins.adoc - -:_mod-docs-content-type: PROCEDURE -[id="enable-plug-in-browser_{context}"] -= Enable dynamic plugins in the web console -Cluster administrators can enable plugins in the web console browser. Dynamic plugins are disabled by default. In order to enable, a cluster administrator will need to enable them in the `console-operator` configuration. - -.Procedure - -. In the *Administration* -> *Cluster Settings* page of the web console, click the *Configuration* tab. - -. Click the `Console` `operator.openshift.io` configuration resource. - -. From there, click the *Console plugins* tab to view the dynamic plugins running. - -. In the `Status` column, click `Enable console plugin` in the pop-over menu, which will launch the `Console plugin enablement` modal. - -. Click `Enable` and `Save`. - -.Verification - -* Refresh the browser to view the enabled plugin. diff --git a/modules/hcp-requirements-platform-version.adoc b/modules/hcp-requirements-platform-version.adoc deleted file mode 100644 index d550c0454184..000000000000 --- a/modules/hcp-requirements-platform-version.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// * hosted-control-planes/hcp-prepare/hcp-requirements.adoc - -:_mod-docs-content-type: CONCEPT -[id="hcp-requirements-platform-version_{context}"] -= Platform and version requirements for {hcp-capital} - -The following table indicates which {product-title} versions are supported for each platform. In the table, Hosting {product-title} version refers to the {product-title} version where the {mce-short} is enabled: - -.Required {product-title} versions for platforms -[cols="3",options="header"] -|=== -|Platform |Hosting {product-title} version |Hosted {product-title} version - -|{aws-full} -|4.11 - 4.16 -|4.14 - 4.16 (only) - -|{ibm-power-title} -|4.16 -|4.16 (only) - -|{ibm-z-title} -|4.16 -|4.16 (only) - -|{VirtProductName} -|4.14 - 4.16 -|4.14 - 4.16 (only) - -|Bare metal -|4.14 - 4.16 -|4.14 - 4.16 (only) - -|Non-bare-metal agent machines -|4.16 -|4.16 (only) -|=== diff --git a/modules/images-create-metadata-define.adoc b/modules/images-create-metadata-define.adoc deleted file mode 100644 index 27b38648b81b..000000000000 --- a/modules/images-create-metadata-define.adoc +++ /dev/null @@ -1,82 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/create-images.adoc - -[id="images-create-metadata-define_{context}"] -= Defining image metadata - -You can use the `LABEL` instruction in a `Dockerfile` to define image -metadata. Labels are similar to environment variables in that they are key value -pairs attached to an image or a container. Labels are different from environment -variable in that they are not visible to the running application and they can -also be used for fast look-up of images and containers. - -link:https://docs.docker.com/engine/reference/builder/#label[Docker -documentation] for more information on the `LABEL` instruction. - -The label names should typically be namespaced. The namespace should be set -accordingly to reflect the project that is going to pick up the labels and use -them. For {product-title} the namespace should be set to `io.openshift` and -for Kubernetes the namespace is `io.k8s`. - -See the https://docs.docker.com/engine/userguide/labels-custom-metadata[Docker custom -metadata] documentation for details about the format. - -.Supported Metadata -[cols="3a,8a",options="header"] -|=== - -|Variable |Description - -|`io.openshift.tags` -|This label contains a list of tags represented as list of comma-separated -string values. The tags are the way to categorize the container images into broad -areas of functionality. Tags help UI and generation tools to suggest relevant -container images during the application creation process. - ----- -LABEL io.openshift.tags mongodb,mongodb24,nosql ----- - -|`io.openshift.wants` -|Specifies a list of tags that the generation tools and the UI might use to -provide relevant suggestions if you do not have the container images with given tags -already. For example, if the container image wants `mysql` and `redis` and you -do not have the container image with `redis` tag, then UI might suggest you to add -this image into your deployment. - ----- -LABEL io.openshift.wants mongodb,redis ----- - -|`io.k8s.description` -|This label can be used to give the container image consumers more detailed -information about the service or functionality this image provides. The UI can -then use this description together with the container image name to provide more -human friendly information to end users. - ----- -LABEL io.k8s.description The MySQL 5.5 Server with master-slave replication support ----- - -|`io.openshift.non-scalable` -|An image might use this variable to suggest that it does not support scaling. -The UI will then communicate this to consumers of that image. Being not-scalable -basically means that the value of `replicas` should initially not be set higher -than 1. - ----- -LABEL io.openshift.non-scalable true ----- - -|`io.openshift.min-memory` and `io.openshift.min-cpu` -|This label suggests how much resources the container image might need to -work properly. The UI might warn the user that deploying this container image may -exceed their user quota. The values must be compatible with -Kubernetes quantity. - ----- -LABEL io.openshift.min-memory 16Gi -LABEL io.openshift.min-cpu 4 ----- - -|=== diff --git a/modules/ldap-failover-configure-apache.adoc b/modules/ldap-failover-configure-apache.adoc deleted file mode 100644 index 34ed10426b86..000000000000 --- a/modules/ldap-failover-configure-apache.adoc +++ /dev/null @@ -1,192 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/configuring-ldap-failover.adoc - -:_mod-docs-content-type: PROCEDURE -[id="sssd-configuring-apache_{context}"] -= Configuring Apache to use SSSD - -.Procedure - -. Create a `/etc/pam.d/openshift` file that contains the -following contents: -+ ----- -auth required pam_sss.so -account required pam_sss.so ----- -+ -This configuration enables PAM, the pluggable authentication module, to use -`pam_sss.so` to determine authentication and access control when an -authentication request is issued for the `openshift` stack. - -. Edit the `/etc/httpd/conf.modules.d/55-authnz_pam.conf` file and uncomment - the following line: -+ ----- -LoadModule authnz_pam_module modules/mod_authnz_pam.so ----- - -. To configure the Apache `httpd.conf` file for remote basic authentication, -create the `openshift-remote-basic-auth.conf` file in the -`/etc/httpd/conf.d` directory. Use the following template to provide your -required settings and values: -+ -[IMPORTANT] -==== -Carefully review the template and customize its contents to fit your -environment. -==== -+ ----- -LoadModule request_module modules/mod_request.so -LoadModule php7_module modules/libphp7.so - -# Nothing needs to be served over HTTP. This virtual host simply redirects to -# HTTPS. - - DocumentRoot /var/www/html - RewriteEngine On - RewriteRule ^(.*)$ https://%{HTTP_HOST}$1 [R,L] - - - - # This needs to match the certificates you generated. See the CN and X509v3 - # Subject Alternative Name in the output of: - # openssl x509 -text -in /etc/pki/tls/certs/remote-basic.example.com.crt - ServerName remote-basic.example.com - - DocumentRoot /var/www/html - - # Secure all connections with TLS - SSLEngine on - SSLCertificateFile /etc/pki/tls/certs/remote-basic.example.com.crt - SSLCertificateKeyFile /etc/pki/tls/private/remote-basic.example.com.key - SSLCACertificateFile /etc/pki/CA/certs/ca.crt - - # Require that TLS clients provide a valid certificate - SSLVerifyClient require - SSLVerifyDepth 10 - - # Other SSL options that may be useful - # SSLCertificateChainFile ... - # SSLCARevocationFile ... - - # Send logs to a specific location to make them easier to find - ErrorLog logs/remote_basic_error_log - TransferLog logs/remote_basic_access_log - LogLevel warn - - # PHP script that turns the Apache REMOTE_USER env var - # into a JSON formatted response that OpenShift understands - - # all requests not using SSL are denied - SSLRequireSSL - # denies access when SSLRequireSSL is applied - SSLOptions +StrictRequire - # Require both a valid basic auth user (so REMOTE_USER is always set) - # and that the CN of the TLS client matches that of the OpenShift master - - Require valid-user - Require expr %{SSL_CLIENT_S_DN_CN} == 'system:openshift-master' - - # Use basic auth since OpenShift will call this endpoint with a basic challenge - AuthType Basic - AuthName openshift - AuthBasicProvider PAM - AuthPAMService openshift - - # Store attributes in environment variables. Specify the email attribute that - # you confirmed. - LookupOutput Env - LookupUserAttr mail REMOTE_USER_MAIL - LookupUserGECOS REMOTE_USER_DISPLAY_NAME - - # Other options that might be useful - - # While REMOTE_USER is used as the sub field and serves as the immutable ID, - # REMOTE_USER_PREFERRED_USERNAME could be used to have a different username - # LookupUserAttr REMOTE_USER_PREFERRED_USERNAME - - # Group support may be added in a future release - # LookupUserGroupsIter REMOTE_USER_GROUP - - - # Deny everything else - - Deny from all - - ----- - -. Create the `check_user.php` script in the `/var/www/html` directory. -Include the following code: -+ ----- - 'remote PAM authentication failed' -); - -// Build a success response if we have a user -if (!empty($user)) { - $data = array( - 'sub' => $user - ); - // Map of optional environment variables to optional JSON fields - $env_map = array( - 'REMOTE_USER_MAIL' => 'email', - 'REMOTE_USER_DISPLAY_NAME' => 'name', - 'REMOTE_USER_PREFERRED_USERNAME' => 'preferred_username' - ); - - // Add all non-empty environment variables to JSON data - foreach ($env_map as $env_name => $json_name) { - $env_data = apache_getenv($env_name); - if (!empty($env_data)) { - $data[$json_name] = $env_data; - } - } -} - -// We always output JSON from this script -header('Content-Type: application/json', true); - -// Write the response as JSON -echo json_encode($data); -?> ----- - -. Enable Apache to load the module. Modify the -`/etc/httpd/conf.modules.d/55-lookup_identity.conf` file and uncomment the -following line: -+ ----- -LoadModule lookup_identity_module modules/mod_lookup_identity.so ----- - -. Set an SELinux boolean so that SElinux allows Apache to connect to SSSD over -D-BUS: -+ ----- -# setsebool -P httpd_dbus_sssd on ----- - -. Set a boolean to tell SELinux that it is acceptable for Apache to contact the -PAM subsystem: -+ ----- -# setsebool -P allow_httpd_mod_auth_pam on ----- - -. Start Apache: -+ ----- -# systemctl start httpd.service ----- diff --git a/modules/ldap-failover-configure-openshift.adoc b/modules/ldap-failover-configure-openshift.adoc deleted file mode 100644 index b77c9818e564..000000000000 --- a/modules/ldap-failover-configure-openshift.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/configuring-ldap-failover.adoc - -:_mod-docs-content-type: PROCEDURE -[id="sssd-for-ldap-configure-openshift_{context}"] -= Configuring {product-title} to use SSSD as the basic remote authentication server - -Modify the default configuration of your cluster to use the new identity -provider that you created. Complete the following steps on the first control plane host -listed in the Ansible host inventory file. - -.Procedure - -. Open the `/etc/origin/master/master-config.yaml` file. - -. Locate the `identityProviders` section and replace it with the following code: -+ ----- - identityProviders: - - name: sssd - challenge: true - login: true - mappingMethod: claim - provider: - apiVersion: v1 - kind: BasicAuthPasswordIdentityProvider - url: https://remote-basic.example.com/check_user.php - ca: /etc/origin/master/ca.crt - certFile: /etc/origin/master/openshift-master.crt - keyFile: /etc/origin/master/openshift-master.key ----- - -. Start {product-title} with the updated configuration: -+ ----- -# openshift start \ - --public-master=https://openshift.example.com:8443 \ - --master-config=/etc/origin/master/master-config.yaml \ - --node-config=/etc/origin/node-node1.example.com/node-config.yaml ----- - -. Test a login by using the `oc` CLI: -+ ----- -$ oc login https://openshift.example.com:8443 -u user1 ----- -+ -You can log in only with valid LDAP credentials. -. List the identities and confirm that an email address is displayed for each -user name. Run the following command: -+ ----- -$ oc get identity -o yaml ----- diff --git a/modules/ldap-failover-configure-sssd.adoc b/modules/ldap-failover-configure-sssd.adoc deleted file mode 100644 index 1415af2275a3..000000000000 --- a/modules/ldap-failover-configure-sssd.adoc +++ /dev/null @@ -1,126 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/configuring-ldap-failover.adoc - -:_mod-docs-content-type: PROCEDURE -[id="sssd-configuring-sssd_{context}"] -= Configuring SSSD for LDAP failover - -Complete these steps on the remote basic authentication server. - -You can configure the SSSD to retrieve attributes, such as email addresses and -display names, and pass them to {product-title} to display in the web interface. -In the following steps, you configure the SSSD to provide email addresses to -{product-title}. - -.Procedure - -. Install the required SSSD and the web server components: -+ ----- -# yum install -y sssd \ - sssd-dbus \ - realmd \ - httpd \ - mod_session \ - mod_ssl \ - mod_lookup_identity \ - mod_authnz_pam \ - php \ - mod_php ----- - -. Set up SSSD to authenticate this VM against the LDAP server. If the LDAP server -is a FreeIPA or Active Directory environment, then use `realmd` to join -this machine to the domain. -+ ----- -# realm join ldap.example.com ----- -+ -For more advanced cases, see the -https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/System-Level_Authentication_Guide/authconfig-ldap.html[System-Level Authentication Guide] - -. To use SSSD to manage failover situations for LDAP, add more entries to the - `/etc/sssd/sssd.conf` file on the `ldap_uri` line. Systems that are -enrolled with FreeIPA can automatically handle failover by using DNS SRV records. - -. Modify the `[domain/DOMAINNAME]` section of the `/etc/sssd/sssd.conf` file -and add this attribute: -+ ----- -[domain/example.com] -... -ldap_user_extra_attrs = mail <1> ----- -<1> Specify the correct attribute to retrieve email addresses for your LDAP -solution. For IPA, specify `mail`. Other LDAP solutions might use another -attribute, such as `email`. - -. Confirm that the `domain` parameter in the `/etc/sssd/sssd.conf` file -contains only the domain name listed in the `[domain/DOMAINNAME]` section. -+ ----- -domains = example.com ----- - -. Grant Apache permission to retrieve the email attribute. Add the following -lines to the `[ifp]` section of the `/etc/sssd/sssd.conf` file: -+ ----- -[ifp] -user_attributes = +mail -allowed_uids = apache, root ----- - -. To ensure that all of the changes are applied properly, restart SSSD: -+ ----- -$ systemctl restart sssd.service ----- - -. Test that the user information can be retrieved properly: -+ ----- -$ getent passwd -username:*:12345:12345:Example User:/home/username:/usr/bin/bash ----- - -. Confirm that the mail attribute you specified returns an email address from -your domain: -+ ----- -# dbus-send --print-reply --system --dest=org.freedesktop.sssd.infopipe \ - /org/freedesktop/sssd/infopipe org.freedesktop.sssd.infopipe.GetUserAttr \ - string:username \ <1> - array:string:mail <2> - -method return time=1528091855.672691 sender=:1.2787 -> destination=:1.2795 serial=13 reply_serial=2 - array [ - dict entry( - string "mail" - variant array [ - string "username@example.com" - ] - ) - ] ----- -<1> Provide a user name in your LDAP solution. -<2> Specify the attribute that you configured. - -. Attempt to log in to the VM as an LDAP user and confirm that you can log in -using LDAP credentials. You can use either the local console or a remote service -like SSH to log in. - -[IMPORTANT] -==== -By default, all users can log in to the remote basic authentication server by using -their LDAP credentials. You can change this behavior: - -* If you use IPA joined systems, -link:https://www.freeipa.org/page/Howto/HBAC_and_allow_all[configure host-based access control]. -* If you use Active Directory joined systems, use a -link:https://docs.pagure.org/SSSD.sssd/design_pages/active_directory_gpo_integration.html[group policy object]. -* For other cases, see the -link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/system-level_authentication_guide/sssd[SSSD configuration] documentation. -==== diff --git a/modules/ldap-failover-generate-certs.adoc b/modules/ldap-failover-generate-certs.adoc deleted file mode 100644 index 1d37057cf9e6..000000000000 --- a/modules/ldap-failover-generate-certs.adoc +++ /dev/null @@ -1,66 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/configuring-ldap-failover.adoc - -:_mod-docs-content-type: PROCEDURE -[id="sssd-generating-certificates_{context}"] -= Generating and sharing certificates with the remote basic authentication server - -Complete the following steps on the first control plane host listed in the Ansible host inventory file, -by default `/etc/ansible/hosts`. - -.Procedure - -. To ensure that communication between the remote basic authentication server and -{product-title} is trustworthy, create a set of Transport Layer Security (TLS) -certificates to use during the other phases of this set up. Run the following command: -+ ----- -# openshift start \ - --public-master=https://openshift.example.com:8443 \ - --write-config=/etc/origin/ ----- -+ -The output inclues the `/etc/origin/master/ca.crt` and -`/etc/origin/master/ca.key` signing certificates. -. Use the signing certificate to generate keys to use on the remote basic -authentication server: -+ ----- -# mkdir -p /etc/origin/remote-basic/ -# oc adm ca create-server-cert \ - --cert='/etc/origin/remote-basic/remote-basic.example.com.crt' \ - --key='/etc/origin/remote-basic/remote-basic.example.com.key' \ - --hostnames=remote-basic.example.com \ <1> - --signer-cert='/etc/origin/master/ca.crt' \ - --signer-key='/etc/origin/master/ca.key' \ - --signer-serial='/etc/origin/master/ca.serial.txt' ----- -+ -<1> A comma-separated list of all the hostnames and interface IP addresses that must access the -remote basic authentication server. -+ -[NOTE] -==== -The certificate files that you generate are valid for two years. You can alter -this period by changing the `--expire-days` and `--signer-expire-days` values, -but for security reasons, do not make them greater than 730. -==== -+ -[IMPORTANT] -==== -If you do not list all hostnames and interface IP addresses that must access the -remote basic authentication server, the HTTPS connection will fail. -==== -. Copy the necessary certificates and key to the remote basic authentication server: -+ ----- -# scp /etc/origin/master/ca.crt \ - root@remote-basic.example.com:/etc/pki/CA/certs/ - -# scp /etc/origin/remote-basic/remote-basic.example.com.crt \ - root@remote-basic.example.com:/etc/pki/tls/certs/ - -# scp /etc/origin/remote-basic/remote-basic.example.com.key \ - root@remote-basic.example.com:/etc/pki/tls/private/ ----- diff --git a/modules/ldap-failover-overview.adoc b/modules/ldap-failover-overview.adoc deleted file mode 100644 index afe0e3577db1..000000000000 --- a/modules/ldap-failover-overview.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/configuring-ldap-failover.adoc - -[id="sssd-for-ldap-overview_{context}"] - -{product-title} provides an authentication -provider for use with Lightweight Directory Access Protocol (LDAP) setups, but -it can connect to only a single LDAP server. During {product-title} installation, -you can configure the System Security -Services Daemon (SSSD) for LDAP failover to ensure access to your cluster if one -LDAP server fails. - -The setup for this configuration is advanced and requires a separate -authentication server, also called an *remote basic authentication server*, for -{product-title} to communicate with. You configure this server -to pass extra attributes, such as email addresses, to {product-title} so it can -display them in the web console. - -This topic describes how to complete this set up on a dedicated physical or -virtual machine (VM), but you can also configure SSSD in containers. - -[IMPORTANT] -==== -You must complete all sections of this topic. -==== diff --git a/modules/ldap-failover-prereqs.adoc b/modules/ldap-failover-prereqs.adoc deleted file mode 100644 index 4bf4a40eb217..000000000000 --- a/modules/ldap-failover-prereqs.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/configuring-ldap-failover.adoc - -[id="sssd-for-ldap-prereqs_{context}"] -= Prerequisites for configuring basic remote authentication - -* Before starting setup, you must know the following information about your -LDAP server: -** Whether the directory server is powered by -http://www.freeipa.org/page/Main_Page[FreeIPA], Active Directory, or another -LDAP solution. -** The Uniform Resource Identifier (URI) for the LDAP server, for example, -`ldap.example.com`. -** The location of the CA certificate for the LDAP server. -** Whether the LDAP server corresponds to RFC 2307 or RFC2307bis for user groups. -* Prepare the servers: -** `remote-basic.example.com`: A VM to use as the remote basic authentication server. -*** Select an operating system that includes SSSD version 1.12.0 for this server -such as Red Hat Enterprise Linux 7.0 or later. -ifeval::["{context}" == "sssd-ldap-failover-extend"] -*** Install mod_lookup_identity version 0.9.4 or later. You can obtain this -package link:https://github.com/adelton/mod_lookup_identity/releases[from -upstream]. -endif::[] -** `openshift.example.com`: A new installation of {product-title}. -*** You must not -have an authentication method configured for this cluster. -*** Do not start {product-title} on this cluster. diff --git a/modules/lvms-adding-a-storage-class.adoc b/modules/lvms-adding-a-storage-class.adoc deleted file mode 100644 index faa2e9101cd1..000000000000 --- a/modules/lvms-adding-a-storage-class.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// This module is included in the following assemblies: -// -// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc - -:_mod-docs-content-type: PROCEDURE -[id="adding-a-storage-class_{context}"] -= Adding a storage class - -You can add a storage class to an {product-title} cluster. A storage class describes a class of storage in the cluster and how the cluster dynamically provisions the persistent volumes (PVs) when the user specifies the storage class. A storage class describes the type of device classes, the quality-of-service level, the filesystem type, and other details. - -.Procedure - -. Create a YAML file: -+ -[source,yaml] ----- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: lvm-storageclass -parameters: - csi.storage.k8s.io/fstype: ext4 - topolvm.io/device-class: vg1 -provisioner: topolvm.io -reclaimPolicy: Delete -allowVolumeExpansion: true -volumeBindingMode: WaitForFirstConsumer ----- -+ -Save the file by using a name similar to the storage class name. For example, `lvm-storageclass.yaml`. - -. Apply the YAML file by using the `oc` command: -+ -[source,terminal] ----- -$ oc apply -f <1> ----- -<1> Replace `` with the name of the YAML file. For example, `lvm-storageclass.yaml`. -+ -The cluster will create the storage class. - -. Verify that the cluster created the storage class by using the following command: -+ -[source,terminal] ----- -$ oc get storageclass <1> ----- -<1> Replace `` with the name of the storage class. For example, `lvm-storageclass`. -+ -.Example output -[source,terminal,options="nowrap",role="white-space-pre"] ----- -NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE -lvm-storageclass topolvm.io Delete WaitForFirstConsumer true 1s ----- diff --git a/modules/multi-cluster-about.adoc b/modules/multi-cluster-about.adoc deleted file mode 100644 index dc3a0a99e94c..000000000000 --- a/modules/multi-cluster-about.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * assemblies/web-console.adoc - -:_mod-docs-content-type: CONCEPT -[id="multi-cluster-about_{context}"] -= Multicluster console - -The multicluster console provides a single interface with consistent design for the hybrid cloud console. If you enable the feature, you can switch between Advanced Cluster Management (ACM) and the cluster console in the same browser tab. It provides a simplified and consistent design that allows for shared components. diff --git a/modules/nodes-cluster-overcommit-buffer-chunk.adoc b/modules/nodes-cluster-overcommit-buffer-chunk.adoc deleted file mode 100644 index ed6a6cc0204a..000000000000 --- a/modules/nodes-cluster-overcommit-buffer-chunk.adoc +++ /dev/null @@ -1,69 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-cluster-overcommit.adoc -// * cluster-logging-collector.adoc - - -[id="understandin-fluentd-buffering_{context}"] -= Understanding Buffer Chunk Limiting for Fluentd - -If the Fluentd logger is unable to keep up with a high number of logs, it will need -to switch to file buffering to reduce memory usage and prevent data loss. - -Fluentd file buffering stores records in _chunks_. Chunks are stored in _buffers_. - -[NOTE] -==== -To modify the `FILE_BUFFER_LIMIT` or `BUFFER_SIZE_LIMIT` parameters -in the Fluentd daemonset as described below, you must set OpenShift Logging to the unmanaged state. -Operators in an unmanaged state are unsupported and the cluster administrator assumes full control of the individual component configurations and upgrades. -==== - -The Fluentd `buffer_chunk_limit` is determined by the environment variable -`BUFFER_SIZE_LIMIT`, which has the default value `8m`. The file buffer size per -output is determined by the environment variable `FILE_BUFFER_LIMIT`, which has -the default value `256Mi`. The permanent volume size must be larger than -`FILE_BUFFER_LIMIT` multiplied by the output. - -On the Fluentd pods, permanent volume */var/lib/fluentd* should be -prepared by the PVC or hostmount, for example. That area is then used for the -file buffers. - -The `buffer_type` and `buffer_path` are configured in the Fluentd configuration files as -follows: - -[source,terminal] ----- -$ egrep "buffer_type|buffer_path" *.conf ----- - -.Example output -[source,text] ----- -output-es-config.conf: - buffer_type file - buffer_path `/var/lib/fluentd/buffer-output-es-config` -output-es-ops-config.conf: - buffer_type file - buffer_path `/var/lib/fluentd/buffer-output-es-ops-config` ----- - -The Fluentd `buffer_queue_limit` is the value of the variable `BUFFER_QUEUE_LIMIT`. This value is `32` by default. - -The environment variable `BUFFER_QUEUE_LIMIT` is calculated as `(FILE_BUFFER_LIMIT / (number_of_outputs * BUFFER_SIZE_LIMIT))`. - -If the `BUFFER_QUEUE_LIMIT` variable has the default set of values: - -* `FILE_BUFFER_LIMIT = 256Mi` -* `number_of_outputs = 1` -* `BUFFER_SIZE_LIMIT = 8Mi` - -The value of `buffer_queue_limit` will be `32`. To change the `buffer_queue_limit`, you must change the value of `FILE_BUFFER_LIMIT`. - -In this formula, `number_of_outputs` is `1` if all the logs are sent to a single resource, and it is incremented by `1` for each additional resource. For example, the value of `number_of_outputs` is: - - * `1` - if all logs are sent to a single Elasticsearch pod - * `2` - if application logs are sent to an Elasticsearch pod and ops logs are sent to -another Elasticsearch pod - * `4` - if application logs are sent to an Elasticsearch pod, ops logs are sent to -another Elasticsearch pod, and both of them are forwarded to other Fluentd instances diff --git a/modules/nodes-cluster-resource-override-deploy.adoc b/modules/nodes-cluster-resource-override-deploy.adoc deleted file mode 100644 index 31fc3f10c459..000000000000 --- a/modules/nodes-cluster-resource-override-deploy.adoc +++ /dev/null @@ -1,8 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/clusters/nodes-cluster-overcommit.adoc - -[id="nodes-cluster-resource-override-deploy_{context}"] -= Installing the Cluster Resource Override Operator - -You can use the {product-title} console or CLI to install the Red Hat OpenShift Logging Operator. diff --git a/modules/nodes-nodes-cluster-timeout-graceful-shutdown.adoc b/modules/nodes-nodes-cluster-timeout-graceful-shutdown.adoc deleted file mode 100644 index ebec6c914703..000000000000 --- a/modules/nodes-nodes-cluster-timeout-graceful-shutdown.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assembly: -// * nodes/nodes-nodes-graceful-shutdown - -:_mod-docs-content-type: CONCEPT -[id="nodes-nodes-cluster-timeout-graceful-shutdown_{context}"] -= About graceful node shutdown - -During a graceful node shutdown, the kubelet sends a termination signal to pods running on the node and postpones the node shutdown until all the pods evicted. If a node unexpectedly shuts down, the graceful node shutdown feature minimizes interruption to workloads running on these pods. - -During a graceful node shutdown, the kubelet stops pods in two phases: - -* Regular pod termination -* Critical pod termination - -You can define shutdown grace periods for regular and critical pods by configuring the following specifications in the `KubeletConfig` custom resource: - -* `shutdownGracePeriod`: Specifies the total duration for pod termination for regular and critical pods. -* `shutdownGracePeriodCriticalPods`: Specifies the duration for critical pod termination. This value must be less than the `shutdownGracePeriod` value. - -For example, if the `shutdownGracePeriod` value is `30s`, and the `shutdownGracePeriodCriticalPods` value is `10s`, the kubelet delays the node shutdown by 30 seconds. During the shutdown, the first 20 (30-10) seconds are reserved for gracefully shutting down regular pods, and the last 10 seconds are reserved for gracefully shutting down critical pods. - -To define a critical pod, assign a pod priority value greater than or equal to `2000000000`. To define a regular pod, assign a pod priority value of less than `2000000000`. - -For more information about how to define a priority value for pods, see the _Additional resources_ section. - diff --git a/modules/nodes-nodes-configuring-graceful-shutdown.adoc b/modules/nodes-nodes-configuring-graceful-shutdown.adoc deleted file mode 100644 index 1a600003e398..000000000000 --- a/modules/nodes-nodes-configuring-graceful-shutdown.adoc +++ /dev/null @@ -1,150 +0,0 @@ -// Module included in the following assembly: -// * nodes/nodes-nodes-graceful-shutdown - -:_mod-docs-content-type: PROCEDURE -[id="nodes-nodes-configuring-graceful-shutdown_{context}"] -= Configuring graceful node shutdown - -To configure graceful node shutdown, create a `KubeletConfig` custom resource (CR) to specify a shutdown grace period for pods on a set of nodes. The graceful node shutdown feature minimizes interruption to workloads that run on these pods. - -[NOTE] -==== -If you do not configure graceful node shutdown, the default grace period is `0` and the pod is forcefully evicted from the node. -==== - -.Prerequisites - -ifndef::openshift-rosa,openshift-dedicated[] -* You are logged in to {product-title} as a user with the `cluster-admin` role. -endif::openshift-rosa,openshift-dedicated[] -ifdef::openshift-rosa,openshift-dedicated[] -* You are logged in to {product-title} as a user with the `dedicated-admin` role. -endif::openshift-rosa,openshift-dedicated[] -* You have defined priority classes for pods that require critical or regular classification. - -.Procedure - -. Define shutdown grace periods in the `KubeletConfig` CR by saving the following YAML in the `kubelet-gns.yaml` file: -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: KubeletConfig -metadata: - name: graceful-shutdown - namespace: openshift-machine-config-operator -spec: - machineConfigPoolSelector: - matchLabels: - pools.operator.machineconfiguration.openshift.io/worker: "" <1> - kubeletConfig: - shutdownGracePeriod: "3m" <2> - shutdownGracePeriodCriticalPods: "2m" <3> -#... ----- -<1> This example applies shutdown grace periods to nodes with the `worker` role. -<2> Define a time period for regular pods to shut down. -<3> Define a time period for critical pods to shut down. - -. Create the `KubeletConfig` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f kubelet-gns.yaml ----- -+ -.Example output -[source,terminal] ----- -kubeletconfig.machineconfiguration.openshift.io/graceful-shutdown created ----- - -.Verification - -. View the kubelet logs for a node to verify the grace period configuration by using the command line or by viewing the `kubelet.conf` file. -+ -[NOTE] -==== -Ensure that the log messages for `shutdownGracePeriodRequested` and `shutdownGracePeriodCriticalPods` match the values set in the `KubeletConfig` CR. -==== - -.. To view the logs by using the command line, run the following command, replacing `` with the name of the node: -+ -[source,bash] ----- -$ oc adm node-logs -u kubelet ----- -+ -.Example output -[source,terminal] ----- -Sep 12 22:13:46 -ci-ln-qv5pvzk-72292-xvkd9-worker-a-dmbr4 -hyperkube[22317]: I0912 22:13:46.687472 -22317 nodeshutdown_manager_linux.go:134] -"Creating node shutdown manager" -shutdownGracePeriodRequested="3m0s" <1> -shutdownGracePeriodCriticalPods="2m0s" -shutdownGracePeriodByPodPriority=[ -{Priority:0 -ShutdownGracePeriodSeconds:1200} -{Priority:2000000000 -ShutdownGracePeriodSeconds:600}] -... ----- -+ -<1> Ensure that the log messages for `shutdownGracePeriodRequested` and `shutdownGracePeriodCriticalPods` match the values set in the `KubeletConfig` CR. -+ -.. To view the logs in the `kubelet.conf` file on a node, run the following commands to enter a debug session on the node: -+ -[source,terminal] ----- -$ oc debug node/ ----- -+ -[source,terminal] ----- -$ chroot /host ----- -+ -[source,terminal] ----- -$ cat /etc/kubernetes/kubelet.conf ----- -+ -.Example output -[source,terminal] ----- -#... -“memorySwap”: {}, - “containerLogMaxSize”: “50Mi”, - “logging”: { - “flushFrequency”: 0, - “verbosity”: 0, - “options”: { - “json”: { - “infoBufferSize”: “0” - } - } - }, - “shutdownGracePeriod”: “10m0s”, <1> - “shutdownGracePeriodCriticalPods”: “3m0s” -} -#... ----- -+ -<1> Ensure that the log messages for `shutdownGracePeriodRequested` and `shutdownGracePeriodCriticalPods` match the values set in the `KubeletConfig` CR. - -. During a graceful node shutdown, you can verify that a pod was gracefully shut down by running the following command, replacing `` with the name of the pod: -+ -[source,terminal] ----- -$ oc describe pod ----- -+ -.Example output -[source,terminal] ----- -Reason: Terminated -Message: Pod was terminated in response to imminent node shutdown. ----- diff --git a/modules/nodes-pods-secrets-creating-tls.adoc b/modules/nodes-pods-secrets-creating-tls.adoc deleted file mode 100644 index dcfa0e92bea1..000000000000 --- a/modules/nodes-pods-secrets-creating-tls.adoc +++ /dev/null @@ -1,51 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-secrets.adoc - -:_mod-docs-content-type: PROCEDURE -[id="nodes-pods-secrets-creating-tls_{context}"] -= Creating a TLS secret - -As an administrator, you can create a Transport Layer Security (TLS) secret, which allows you to store a certificate and its associated key that are typically used for TLS. When using this type of secret, the `data` parameter of the `Secret` object must contain the `tls.key` and the `tls.crt` keys to use. The API server does not validate the values for each key. - -One common use for TLS secrets is to configure encryption in transit for ingress. You can also use a TLS secret with other resources or directly in your workload. - -[NOTE] -==== -You can use the `stringData` parameter to use clear text content. -==== - -.Procedure - -. Create a `Secret` object in a YAML file on a control plane node: -+ -.Example `secret` object -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: secret-tls -type: kubernetes.io/tls <1> -data: - tls.crt: | <2> - MIIC2DCCAcCgAwIBAgIBATANBgkqh ... - tls.key: | - MIIEpgIBAAKCAQEA7yn3bRHQ5FHMQ ... - ----- -<1> Specifies a TLS secret. -<2> Specifies the `tls.key` and the `tls.crt` keys to use. - -. Use the following command to create the `Secret` object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- - -. To use the secret in a pod: - -.. Update the pod's service account to reference the secret, as shown in the "Understanding how to create secrets" section. - -.. Create the pod, which consumes the secret as an environment variable or as a file (using a `secret` volume), as shown in the "Understanding how to create secrets" section. diff --git a/modules/nodes-scheduler-node-names-configuring.adoc b/modules/nodes-scheduler-node-names-configuring.adoc deleted file mode 100644 index 9508050ca848..000000000000 --- a/modules/nodes-scheduler-node-names-configuring.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-scheduler-node-names.adoc - -:_mod-docs-content-type: PROCEDURE -[id="nodes-scheduler-node-name-configuring_{context}"] -= Configuring the Pod Node Constraints admission controller to use names - -You can configure the Pod Node Constraints admission controller to ensure that pods are only placed onto nodes with a specific name. - -.Prerequisites - -Ensure you have the desired labels -and node selector set up in your environment. - -For example, make sure that your pod configuration features the `nodeName` -value indicating the desired label: - -[source,yaml] ----- -apiVersion: v1 -kind: Pod -spec: - nodeName: -# ... ----- - -.Procedure - -To configure the Pod Node Constraints admission controller: - -. Create a file containing the admission controller information: -+ -[source,yaml] ----- -podNodeSelectorPluginConfig: - clusterDefaultNodeSelector: name-of-node-selector - namespace1: name-of-node-selector - namespace2: name-of-node-selector ----- -+ -For example: -+ -[source,yaml] ----- -podNodeConstraintsPluginConfig: - clusterDefaultNodeSelector: ns1 - ns1: region=west,env=test,infra=fedora,os=fedora ----- - -. Create an *AdmissionConfiguration* object that references the file: -+ -[source,yaml] ----- -kind: AdmissionConfiguration -apiVersion: apiserver.k8s.io/v1alpha1 -plugins: -- name: PodNodeConstraints - path: podnodeconstraints.yaml ----- diff --git a/modules/nodes-scheduler-node-projects-about.adoc b/modules/nodes-scheduler-node-projects-about.adoc deleted file mode 100644 index fe0fbb08d4df..000000000000 --- a/modules/nodes-scheduler-node-projects-about.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-scheduler-node-projects.adoc - -[id="nodes-scheduler-node-projects-about_{context}"] -= Understanding how to constrain pods by project name - -The Pod Node Selector admission controller determines where a pod can be placed using labels on projects and node selectors specified in pods. A new pod will be placed on a node associated with a project only if the node selectors in the pod match the labels in the project. - -After the pod is created, the node selectors are merged into the pod so that the `Pod` spec includes the labels originally included in the specification and any new labels from the node selectors. The example below illustrates the merging effect. - -The Pod Node Selector admission controller also allows you to create a list of labels that are permitted in a specific project. This list acts as a whitelist that lets developers know what labels are acceptable to use in a project and gives administrators greater control over labeling in a cluster. - -The Pod Node Selector uses the annotation key `scheduler.alpha.kubernetes.io/node-selector` to assign node selectors to namespaces. - -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - annotations: - scheduler.alpha.kubernetes.io/node-selector: name-of-node-selector - name: namespace3 ----- - -This admission controller has the following behavior: - -. If the Namespace has an annotation with a key scheduler.alpha.kubernetes.io/node-selector, use its value as the node selector. -. If the namespace lacks such an annotation, use the `clusterDefaultNodeSelector` defined in the `PodNodeSelector` plugin configuration file as the node selector. -. Evaluate the pod's node selector against the namespace node selector for conflicts. Conflicts result in rejection. -. Evaluate the pod's node selector against the namespace-specific whitelist defined the plugin configuration file. Conflicts result in rejection. - - diff --git a/modules/nodes-scheduler-node-projects-configuring.adoc b/modules/nodes-scheduler-node-projects-configuring.adoc deleted file mode 100644 index ad9b5197f91c..000000000000 --- a/modules/nodes-scheduler-node-projects-configuring.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-scheduler-node-projects.adoc - -:_mod-docs-content-type: PROCEDURE -[id="nodes-scheduler-node-projects-configuring_{context}"] -= Configuring the Pod Node Selector admission controller to use projects - -You can configure the Pod Node Selector admission controller to ensure that pods are only placed onto nodes in specific projects. -The Pod Node Selector admission controller uses a configuration file to set options for the behavior of the backend. - -.Procedure - -. Create a file containing the admission controller information: -+ -[source,yaml] ----- -podNodeSelectorPluginConfig: - clusterDefaultNodeSelector: - namespace1: - namespace2: ----- -+ -For example: -+ -[source,yaml] ----- -podNodeSelectorPluginConfig: - clusterDefaultNodeSelector: region=west - ns1: os=centos,region=west ----- - -. Create an *AdmissionConfiguration* object that references the file: -+ -[source,yaml] ----- -kind: AdmissionConfiguration -apiVersion: apiserver.k8s.io/v1alpha1 -plugins: -- name: PodNodeSelector - path: podnodeselector.yaml ----- - - diff --git a/modules/nodes-scheduler-node-selectors-configuring.adoc b/modules/nodes-scheduler-node-selectors-configuring.adoc deleted file mode 100644 index fb10be5d3886..000000000000 --- a/modules/nodes-scheduler-node-selectors-configuring.adoc +++ /dev/null @@ -1,74 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-scheduler-node-selector.adoc - -[id="nodes-scheduler-node-selectors-configuring_{context}"] -= Configuring the Pod Node Constraints admission controller to use node selectors - -You can configure the Pod Node Constraints admission controller to ensure that pods are only placed onto nodes with specific labels. - -.Prerequisites - -. Ensure you have the desired labels -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -labels on your nodes. -endif::openshift-enterprise,openshift-webscale,openshift-origin[] -and node selector set up in your environment. -+ -For example, make sure that your pod configuration features the `nodeSelector` -value indicating the desired label: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -spec: - nodeSelector: - : -... ----- - -. Create a file containing the admission controller information: -+ -[source,yaml] ----- -podNodeSelectorPluginConfig: - clusterDefaultNodeSelector: name-of-node-selector - namespace1: name-of-node-selector - namespace2: name-of-node-selector ----- -+ -For example: -+ -[source,yaml] ----- -podNodeConstraintsPluginConfig: - clusterDefaultNodeSelector: ns1 - ns1: region=west,env=test,infra=fedora,os=fedora ----- - -. Create an *AdmissionConfiguration* object that references the file: -+ -[source,yaml] ----- -kind: AdmissionConfiguration -apiVersion: apiserver.k8s.io/v1alpha1 -plugins: -- name: PodNodeConstraints - path: podnodeconstraints.yaml - nodeSelectorLabelBlacklist: - kubernetes.io/hostname - -