From 2c1d7a539ba0e6c3264dd7b9217cf176ad0e014c Mon Sep 17 00:00:00 2001 From: Andrea Hoffer Date: Tue, 9 Sep 2025 15:42:10 -0400 Subject: [PATCH] OSDOCS#15507: Removing discrete headings in core OCP docs --- architecture/architecture-installation.adoc | 3 +- ...ring-request-header-identity-provider.adoc | 4 +- .../install-sno-installing-sno.adoc | 4 +- ...to-install-with-agent-based-installer.adoc | 2 +- installing/overview/index.adoc | 2 +- .../creating-machineset-vsphere.adoc | 6 +- microshift_rest_api/objects/index.adoc | 348 +++++++------- .../advanced-migration-options-3-4.adoc | 4 +- .../migrating-applications-3-4.adoc | 4 +- .../planning-migration-3-4.adoc | 39 +- .../troubleshooting-3-4.adoc | 4 +- modules/api-compatibility-exceptions.adoc | 10 +- modules/api-support-tiers.adoc | 8 +- modules/application-health-about.adoc | 2 +- modules/build-config-capability.adoc | 2 +- modules/cli-installing-cli.adoc | 6 +- modules/cloud-credential-operator.adoc | 5 +- modules/cluster-authentication-operator.adoc | 2 +- modules/cluster-autoscaler-about.adoc | 6 +- modules/cluster-autoscaler-operator.adoc | 4 +- modules/cluster-bare-metal-operator.adoc | 2 +- modules/cluster-capi-operator.adoc | 4 +- ...ter-cloud-controller-manager-operator.adoc | 1 - modules/cluster-config-operator.adoc | 2 +- ...ster-csi-snapshot-controller-operator.adoc | 2 +- modules/cluster-dns-operator.adoc | 2 +- modules/cluster-image-registry-operator.adoc | 2 +- modules/cluster-kube-scheduler-operator.adoc | 4 +- ...ube-storage-version-migrator-operator.adoc | 2 +- .../cluster-machine-approver-operator.adoc | 2 +- ...penshift-controller-manager-operators.adoc | 2 +- modules/cluster-samples-operator.adoc | 2 +- modules/cluster-storage-operator.adoc | 6 +- modules/cluster-version-operator.adoc | 2 +- modules/cluster-wide-proxy-preqs.adoc | 4 +- modules/console-operator.adoc | 2 +- .../control-plane-machine-set-operator.adoc | 4 +- modules/cpmso-yaml-provider-spec-gcp.adoc | 2 +- modules/cpmso-yaml-provider-spec-nutanix.adoc | 2 +- modules/deployment-config-capability.adoc | 2 +- modules/dynamic-plugin-api.adoc | 136 +++--- modules/dynamic-plugin-sdk-extensions.adoc | 152 +++--- modules/etcd-operator.adoc | 6 +- modules/go-deleting-argocd-instance.adoc | 2 +- ...o-cd-instance-on-infrastructure-nodes.adoc | 2 +- ...or-environment-labels-and-annotations.adoc | 4 +- modules/go-uninstalling-gitops-operator.adoc | 2 +- ...e-aws-iam-roles-in-sre-owned-projects.adoc | 2 +- modules/images-create-guide-general.adoc | 22 +- ...images-imagestream-import-import-mode.adoc | 2 +- modules/ingress-operator.adoc | 8 +- modules/insights-operator.adoc | 6 +- ...d-setting-up-ibm-cloud-infrastructure.adoc | 20 +- ...tallation-aws-user-infra-requirements.adoc | 6 +- modules/installation-azure-regions.adoc | 2 - ...stallation-azure-user-defined-routing.adoc | 10 +- modules/installation-custom-aws-vpc.adoc | 6 +- modules/installation-disk-partitioning.adoc | 2 +- modules/installation-network-user-infra.adoc | 2 +- modules/installation-process.adoc | 10 +- ...ion-requirements-user-infra-ibm-z-kvm.adoc | 8 +- ...on-user-infra-machines-static-network.adoc | 24 +- ...-vsphere-installer-infra-requirements.adoc | 12 +- ...phere-installer-infra-static-ip-nodes.adoc | 2 +- ...roubleshooting-assisted-installer-oci.adoc | 4 +- ...-additional-install-config-parameters.adoc | 2 +- ...install-bmc-addressing-for-dell-idrac.adoc | 6 +- ...pi-install-bmc-addressing-for-hpe-ilo.adoc | 4 +- modules/ipi-install-bmc-addressing.adoc | 5 +- modules/ipi-install-configuring-nodes.adoc | 6 +- modules/kube-apiserver-operator.adoc | 6 +- modules/kube-controller-manager-operator.adoc | 2 +- modules/lvms-about-lvmcluster-cr.adoc | 2 +- modules/machine-api-operator.adoc | 4 +- modules/machine-config-operator.adoc | 1 - ...achine-lifecycle-hook-deletion-format.adoc | 2 +- .../machine-lifecycle-hook-deletion-uses.adoc | 4 +- modules/machineset-yaml-gcp.adoc | 2 +- modules/machineset-yaml-nutanix.adoc | 2 +- modules/metering-troubleshooting.adoc | 2 +- ...minimum-ibm-power-system-requirements.adoc | 10 +- .../minimum-ibm-z-system-requirements.adoc | 8 +- modules/node-tuning-operator.adoc | 2 +- modules/nodes-pods-configmap-overview.adoc | 2 +- modules/nw-egress-ips-about.adoc | 2 +- modules/nw-externalip-object.adoc | 2 +- modules/nw-infw-operator-config-object.adoc | 3 +- modules/nw-operator-cr.adoc | 10 +- ...vn-k-adminnetwork-policy-action-rules.adoc | 6 +- modules/nw-ovn-k-adminnetwork-policy.adoc | 3 +- ...nw-ovn-k-baseline-adminnetwork-policy.adoc | 4 +- .../nw-ovn-kubernetes-session-affinity.adoc | 2 +- modules/nw-ptp-introduction.adoc | 2 +- .../nw-ptp-operator-metrics-reference.adoc | 2 +- modules/nw-using-ingress-forwarded.adoc | 2 +- .../oc-mirror-image-set-config-examples.adoc | 20 +- modules/olm-bundle-format.adoc | 2 +- modules/olm-webhook-considerations.adoc | 12 +- modules/openshift-apiserver-operator.adoc | 4 +- modules/openshift-service-ca-operator.adoc | 2 +- modules/operator-marketplace.adoc | 2 +- .../preferred-ibm-z-system-requirements.adoc | 9 +- modules/private-clusters-about.adoc | 6 +- modules/prometheus-operator.adoc | 2 +- ...ap-configuring-node-feature-discovery.adoc | 52 +- modules/psap-driver-toolkit.adoc | 4 +- .../psap-node-feature-discovery-operator.adoc | 2 +- ...ry-topology-updater-command-reference.adoc | 24 +- ...tp-overview-of-gnss-grandmaster-clock.adoc | 2 +- modules/quotas-and-limits-ibm-cloud.adoc | 10 +- modules/quotas-and-limits-ibm-power-vs.adoc | 9 +- ...mmended-ibm-power-system-requirements.adoc | 10 +- ...ing-network-verification-manually-cli.adoc | 2 +- ...ing-network-verification-manually-ocm.adoc | 2 +- ...ntext-constraints-psa-sync-exclusions.adoc | 4 +- modules/serverless-kn-container.adoc | 2 +- modules/service-ca-certificates.adoc | 8 +- modules/storage-ephemeral-storage-types.adoc | 4 +- ...ook-workflow-in-user-defined-projects.adoc | 2 +- modules/update-cluster-version-object.adoc | 2 +- .../vsphere-problem-detector-operator.adoc | 4 +- modules/ztp-ztp-building-blocks.adoc | 2 +- ...ss-cluster-traffic-ingress-controller.adoc | 2 +- networking/enable-cluster-wide-proxy.adoc | 2 +- ...-nmstate-updating-node-network-config.adoc | 4 +- nodes/index.adoc | 39 +- .../configuring-samples-operator.adoc | 4 +- operators/operator-reference.adoc | 18 +- rest_api/objects/index.adoc | 447 +++++++++--------- scalability_and_performance/index.adoc | 5 +- .../etcd-certificates.adoc | 2 +- .../node-certificates.adoc | 2 +- .../proxy-certificates.adoc | 2 +- .../service-ca-certificates.adoc | 2 +- ...-provided-certificates-for-api-server.adoc | 2 +- ...ided-certificates-for-default-ingress.adoc | 2 +- ...replacing-default-ingress-certificate.adoc | 2 +- security/certificates/updating-ca-bundle.adoc | 2 +- security/index.adoc | 14 +- .../spo-release-notes.adoc | 6 +- .../spo-seccomp.adoc | 2 +- .../spo-selinux.adoc | 2 +- support/gathering-cluster-data.adoc | 4 +- ...oubleshooting-operating-system-issues.adoc | 2 +- ...ate-using-custom-machine-config-pools.adoc | 10 +- .../overview-dynamic-plugin.adoc | 2 +- .../web_terminal/installing-web-terminal.adoc | 4 +- .../creating-windows-machineset-aws.adoc | 2 +- .../creating-windows-machineset-azure.adoc | 2 +- .../creating-windows-machineset-gcp.adoc | 2 +- .../creating-windows-machineset-nutanix.adoc | 2 +- .../creating-windows-machineset-vsphere.adoc | 2 +- ...disabling-windows-container-workloads.adoc | 2 +- .../enabling-windows-container-workloads.adoc | 2 +- .../scheduling-windows-workloads.adoc | 4 +- 155 files changed, 922 insertions(+), 961 deletions(-) diff --git a/architecture/architecture-installation.adoc b/architecture/architecture-installation.adoc index 0c911f9e94dc..736bbc79ce35 100644 --- a/architecture/architecture-installation.adoc +++ b/architecture/architecture-installation.adoc @@ -12,7 +12,6 @@ include::modules/supported-platforms-for-openshift-clusters.adoc[leveloffset=+2] include::modules/installation-process.adoc[leveloffset=+2] -[discrete] === Installation scope The scope of the {product-title} installation program is intentionally narrow. It is designed for simplicity and ensured success. You can complete many more configuration tasks after installation completes. @@ -29,4 +28,4 @@ include::modules/unmanaged-operators.adoc[leveloffset=+1] [id="architecture-installation-next-steps"] == Next steps -* xref:../installing/overview/installing-preparing.adoc#installing-preparing[Selecting a cluster installation method and preparing it for users] \ No newline at end of file +* xref:../installing/overview/installing-preparing.adoc#installing-preparing[Selecting a cluster installation method and preparing it for users] diff --git a/authentication/identity_providers/configuring-request-header-identity-provider.adoc b/authentication/identity_providers/configuring-request-header-identity-provider.adoc index 2cea646b461b..7e819c89649d 100644 --- a/authentication/identity_providers/configuring-request-header-identity-provider.adoc +++ b/authentication/identity_providers/configuring-request-header-identity-provider.adoc @@ -30,8 +30,8 @@ include::modules/identity-provider-add.adoc[leveloffset=+1] This example configures an Apache authentication proxy for the {product-title} using the request header identity provider. -[discrete] + include::modules/identity-provider-apache-custom-proxy-configuration.adoc[leveloffset=+2] -[discrete] + include::modules/identity-provider-configuring-apache-request-header.adoc[leveloffset=+2] diff --git a/installing/installing_sno/install-sno-installing-sno.adoc b/installing/installing_sno/install-sno-installing-sno.adoc index 1dea5912066c..1f064dfb5da5 100644 --- a/installing/installing_sno/install-sno-installing-sno.adoc +++ b/installing/installing_sno/install-sno-installing-sno.adoc @@ -150,7 +150,7 @@ Installing a single-node cluster on {ibm-z-name} and {ibm-linuxone-name} require Installing a single-node cluster on {ibm-z-name} simplifies installation for development and test environments and requires less resource requirements at entry level. ==== -[discrete] + === Hardware requirements * The equivalent of two Integrated Facilities for Linux (IFL), which are SMT2 enabled, for each cluster. @@ -181,7 +181,7 @@ Installing a single-node cluster on {ibm-power-name} requires user-provisioned i Installing a single-node cluster on {ibm-power-name} simplifies installation for development and test environments and requires less resource requirements at entry level. ==== -[discrete] + === Hardware requirements * The equivalent of two Integrated Facilities for Linux (IFL), which are SMT2 enabled, for each cluster. diff --git a/installing/installing_with_agent_based_installer/preparing-to-install-with-agent-based-installer.adoc b/installing/installing_with_agent_based_installer/preparing-to-install-with-agent-based-installer.adoc index a35760a50775..a3486d5e5712 100644 --- a/installing/installing_with_agent_based_installer/preparing-to-install-with-agent-based-installer.adoc +++ b/installing/installing_with_agent_based_installer/preparing-to-install-with-agent-based-installer.adoc @@ -34,7 +34,7 @@ include::modules/agent-installer-fips-compliance.adoc[leveloffset=+1] //Configuring FIPS through the Agent-based Installer include::modules/agent-installer-configuring-fips-compliance.adoc[leveloffset=+1] -[discrete] + [role="_additional-resources"] .Additional resources diff --git a/installing/overview/index.adoc b/installing/overview/index.adoc index 0b9d6c2ab72a..95c0d04f801d 100644 --- a/installing/overview/index.adoc +++ b/installing/overview/index.adoc @@ -33,7 +33,7 @@ include::modules/ipi-verifying-nodes-after-installation.adoc[leveloffset=+2] * link:https://access.redhat.com/documentation/en-us/assisted_installer_for_openshift_container_platform[Assisted Installer for OpenShift Container Platform] -[discrete] + === Installation scope The scope of the {product-title} installation program is intentionally narrow. It is designed for simplicity and ensured success. You can complete many more configuration tasks after installation completes. diff --git a/machine_management/creating_machinesets/creating-machineset-vsphere.adoc b/machine_management/creating_machinesets/creating-machineset-vsphere.adoc index ad6c21701d7d..6b675a9d6556 100644 --- a/machine_management/creating_machinesets/creating-machineset-vsphere.adoc +++ b/machine_management/creating_machinesets/creating-machineset-vsphere.adoc @@ -20,15 +20,15 @@ include::modules/machineset-vsphere-required-permissions.adoc[leveloffset=+1] include::modules/compute-machineset-upi-reqs.adoc[leveloffset=+1] //Obtaining the infrastructure ID -[discrete] + include::modules/machineset-upi-reqs-infra-id.adoc[leveloffset=+2] //Satisfying vSphere credentials requirements -[discrete] + include::modules/machineset-upi-reqs-vsphere-creds.adoc[leveloffset=+2] //Satisfying ignition configuration requirements -[discrete] + include::modules/machineset-upi-reqs-ignition-config.adoc[leveloffset=+2] [role="_additional-resources"] .Additional resources diff --git a/microshift_rest_api/objects/index.adoc b/microshift_rest_api/objects/index.adoc index 865291c1360e..173e3c4abb93 100644 --- a/microshift_rest_api/objects/index.adoc +++ b/microshift_rest_api/objects/index.adoc @@ -580,7 +580,7 @@ Required:: - `diskName` - `diskURI` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -640,7 +640,7 @@ Required:: - `secretName` - `shareName` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -682,7 +682,7 @@ Required:: - `secretName` - `shareName` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -717,7 +717,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -750,7 +750,7 @@ Type:: Required:: - `monitors` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -799,7 +799,7 @@ Type:: Required:: - `monitors` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -848,7 +848,7 @@ Type:: Required:: - `volumeID` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -889,7 +889,7 @@ Type:: Required:: - `volumeID` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -930,7 +930,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -969,7 +969,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1010,7 +1010,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1043,7 +1043,7 @@ Type:: Required:: - `key` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1080,7 +1080,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1121,7 +1121,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1158,7 +1158,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1199,7 +1199,7 @@ Type:: Required:: - `name` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1325,7 +1325,7 @@ Type:: Required:: - `containerPort` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1376,7 +1376,7 @@ Required:: - `resourceName` - `restartPolicy` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1410,7 +1410,7 @@ Required:: - `driver` - `volumeHandle` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1475,7 +1475,7 @@ Type:: Required:: - `driver` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1518,7 +1518,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1547,7 +1547,7 @@ Type:: Required:: - `path` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1586,7 +1586,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1617,7 +1617,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1650,7 +1650,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1689,7 +1689,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1726,7 +1726,7 @@ Type:: Required:: - `name` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1761,7 +1761,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1804,7 +1804,7 @@ Type:: Required:: - `name` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1934,7 +1934,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1969,7 +1969,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2008,7 +2008,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2039,7 +2039,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2066,7 +2066,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2111,7 +2111,7 @@ Type:: Required:: - `driver` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2156,7 +2156,7 @@ Type:: Required:: - `driver` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2199,7 +2199,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2234,7 +2234,7 @@ Type:: Required:: - `pdName` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2277,7 +2277,7 @@ Type:: Required:: - `repository` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2315,7 +2315,7 @@ Required:: - `endpoints` - `path` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2357,7 +2357,7 @@ Required:: - `endpoints` - `path` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2394,7 +2394,7 @@ Type:: Required:: - `port` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2427,7 +2427,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2460,7 +2460,7 @@ Type:: Required:: - `path` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2503,7 +2503,7 @@ Type:: Required:: - `port` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2553,7 +2553,7 @@ Required:: - `name` - `value` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2588,7 +2588,7 @@ Required:: - `iqn` - `lun` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2659,7 +2659,7 @@ Required:: - `iqn` - `lun` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2729,7 +2729,7 @@ Required:: - `key` - `path` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2764,7 +2764,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2795,7 +2795,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2832,7 +2832,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2871,7 +2871,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2900,7 +2900,7 @@ Type:: Required:: - `path` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2933,7 +2933,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2975,7 +2975,7 @@ Required:: - `server` - `path` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -3010,7 +3010,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -3043,7 +3043,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -3084,7 +3084,7 @@ Type:: Required:: - `nodeSelectorTerms` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -3114,7 +3114,7 @@ Required:: - `key` - `operator` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -3157,7 +3157,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -3190,7 +3190,7 @@ Type:: Required:: - `fieldPath` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -3221,7 +3221,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -3272,7 +3272,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -3640,7 +3640,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -3679,7 +3679,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -3747,7 +3747,7 @@ Type:: Required:: - `spec` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -3780,7 +3780,7 @@ Type:: Required:: - `claimName` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -3813,7 +3813,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -3852,7 +3852,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4006,7 +4006,7 @@ Type:: Required:: - `pdID` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4037,7 +4037,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4070,7 +4070,7 @@ Type:: Required:: - `topologyKey` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4109,7 +4109,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4140,7 +4140,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4175,7 +4175,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4208,7 +4208,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4249,7 +4249,7 @@ Type:: Required:: - `name` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4278,7 +4278,7 @@ Type:: Required:: - `conditionType` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4307,7 +4307,7 @@ Type:: Required:: - `name` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4340,7 +4340,7 @@ Type:: Required:: - `name` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4367,7 +4367,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4440,7 +4440,7 @@ Type:: Required:: - `containers` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4648,7 +4648,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4687,7 +4687,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4720,7 +4720,7 @@ Type:: Required:: - `volumeID` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4758,7 +4758,7 @@ Required:: - `weight` - `preference` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4789,7 +4789,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4852,7 +4852,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4886,7 +4886,7 @@ Required:: - `registry` - `volume` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4936,7 +4936,7 @@ Required:: - `monitors` - `image` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4994,7 +4994,7 @@ Required:: - `monitors` - `image` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5051,7 +5051,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5092,7 +5092,7 @@ Type:: Required:: - `name` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5121,7 +5121,7 @@ Type:: Required:: - `resource` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5158,7 +5158,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5197,7 +5197,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5240,7 +5240,7 @@ Required:: - `system` - `secretRef` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5307,7 +5307,7 @@ Required:: - `system` - `secretRef` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5372,7 +5372,7 @@ Type:: Required:: - `type` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5412,7 +5412,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5445,7 +5445,7 @@ Type:: Required:: - `key` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5482,7 +5482,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5523,7 +5523,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5558,7 +5558,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5591,7 +5591,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5630,7 +5630,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5701,7 +5701,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5742,7 +5742,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5783,7 +5783,7 @@ Type:: Required:: - `path` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5820,7 +5820,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5859,7 +5859,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5902,7 +5902,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5948,7 +5948,7 @@ Required:: - `name` - `value` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5981,7 +5981,7 @@ Type:: Required:: - `port` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6012,7 +6012,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6067,7 +6067,7 @@ Required:: - `key` - `values` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6098,7 +6098,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6129,7 +6129,7 @@ Required:: - `topologyKey` - `whenUnsatisfiable` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6212,7 +6212,7 @@ Required:: - `kind` - `name` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6250,7 +6250,7 @@ Required:: - `kind` - `name` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6291,7 +6291,7 @@ Type:: Required:: - `name` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6450,7 +6450,7 @@ Required:: - `name` - `devicePath` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6484,7 +6484,7 @@ Required:: - `name` - `mountPath` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6536,7 +6536,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6563,7 +6563,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6604,7 +6604,7 @@ Type:: Required:: - `volumePath` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6646,7 +6646,7 @@ Required:: - `weight` - `podAffinityTerm` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6677,7 +6677,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6718,7 +6718,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6759,7 +6759,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6800,7 +6800,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6841,7 +6841,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6882,7 +6882,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6923,7 +6923,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6964,7 +6964,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7005,7 +7005,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7046,7 +7046,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7087,7 +7087,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7128,7 +7128,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7169,7 +7169,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7210,7 +7210,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7251,7 +7251,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7292,7 +7292,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7333,7 +7333,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7374,7 +7374,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7415,7 +7415,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7456,7 +7456,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7497,7 +7497,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7536,7 +7536,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7582,7 +7582,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7863,7 +7863,7 @@ Type:: Required:: - `rule` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7979,7 +7979,7 @@ Required:: - `reason` - `message` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8026,7 +8026,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8096,7 +8096,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8130,7 +8130,7 @@ Required:: - `key` - `operator` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8165,7 +8165,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8204,7 +8204,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8270,7 +8270,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8372,7 +8372,7 @@ Required:: - `name` - `uid` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8434,7 +8434,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8465,7 +8465,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8520,7 +8520,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8559,7 +8559,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8624,7 +8624,7 @@ Required:: - `type` - `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8728,7 +8728,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8769,7 +8769,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8810,7 +8810,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8851,7 +8851,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8892,7 +8892,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8933,7 +8933,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8974,7 +8974,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -9015,7 +9015,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -9056,7 +9056,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] diff --git a/migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc b/migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc index 80cab06da0df..66715319310a 100644 --- a/migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc +++ b/migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc @@ -32,7 +32,7 @@ include::modules/migration-state-migration-cli.adoc[leveloffset=+2] [role="_additional-resources"] [id="additional-resources-for-state-migration_{context}"] -[discrete] + === Additional resources * See xref:../migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc#migration-excluding-pvcs_advanced-migration-options-3-4[Excluding PVCs from migration] to select PVCs for state migration. @@ -55,7 +55,7 @@ include::modules/migration-editing-pvs-in-migplan.adoc[leveloffset=+2] [role="_additional-resources"] [id="additional-resources-for-editing-pv-attributes_{context}"] -[discrete] + ==== Additional resources * For details about the `move` and `copy` actions, see xref:../migrating_from_ocp_3_to_4/about-mtc-3-4.adoc#migration-mtc-workflow_about-mtc-3-4[MTC workflow]. diff --git a/migrating_from_ocp_3_to_4/migrating-applications-3-4.adoc b/migrating_from_ocp_3_to_4/migrating-applications-3-4.adoc index 0dea91aec646..2fd90bba1d72 100644 --- a/migrating_from_ocp_3_to_4/migrating-applications-3-4.adoc +++ b/migrating_from_ocp_3_to_4/migrating-applications-3-4.adoc @@ -32,7 +32,7 @@ These annotations preserve the UID range, ensuring that the containers retain th include::modules/migration-prerequisites.adoc[leveloffset=+1] [role="_additional-resources"] -[discrete] + [id="additional-resources-for-migration-prerequisites_{context}"] === Additional resources for migration prerequisites @@ -50,7 +50,7 @@ include::modules/migration-adding-replication-repository-to-cam.adoc[leveloffset include::modules/migration-creating-migration-plan-cam.adoc[leveloffset=+2] [role="_additional-resources"] -[discrete] + [id="additional-resources-for-persistent-volume-copy-methods_{context}"] === Additional resources diff --git a/migrating_from_ocp_3_to_4/planning-migration-3-4.adoc b/migrating_from_ocp_3_to_4/planning-migration-3-4.adoc index 68252334b37d..7e4896246153 100644 --- a/migrating_from_ocp_3_to_4/planning-migration-3-4.adoc +++ b/migrating_from_ocp_3_to_4/planning-migration-3-4.adoc @@ -25,7 +25,7 @@ Beginning with {product-title} 4.13, {op-system} now uses {op-system-base-full} For more information, see xref:../architecture/architecture.adoc#architecture[OpenShift Container Platform architecture]. -[discrete] + === Immutable infrastructure {product-title} 4 uses {op-system-first}, which is designed to run containerized applications, and provides efficient installation, Operator-based management, and simplified upgrades. {op-system} is an immutable container host, rather than a customizable operating system like {op-system-base}. {op-system} enables {product-title} 4 to manage and automate the deployment of the underlying container host. {op-system} is a part of {product-title}, which means that everything runs inside a container and is deployed using {product-title}. @@ -34,7 +34,7 @@ In {product-title} 4, control plane nodes must run {op-system}, ensuring that fu For more information, see xref:../architecture/architecture-rhcos.adoc#architecture-rhcos[{op-system-first}]. -[discrete] + === Operators Operators are a method of packaging, deploying, and managing a Kubernetes application. Operators ease the operational complexity of running another piece of software. They watch over your environment and use the current state to make decisions in real time. Advanced Operators are designed to upgrade and react to failures automatically. @@ -44,7 +44,7 @@ For more information, see xref:../operators/understanding/olm-what-operators-are [id="migration-differences-install"] == Installation and upgrade -[discrete] + === Installation process To install {product-title} 3.11, you prepared your {op-system-base-full} hosts, set all of the configuration values your cluster needed, and then ran an Ansible playbook to install and set up your cluster. @@ -57,14 +57,13 @@ ifndef::openshift-origin[] If you want to add {op-system-base-full} worker machines to your {product-title} {product-version} cluster, you use an Ansible playbook to join the {op-system-base} worker machines after the cluster is running. For more information, see xref:../machine_management/adding-rhel-compute.adoc#adding-rhel-compute[Adding {op-system-base} compute machines to an {product-title} cluster]. endif::[] -[discrete] === Infrastructure options In {product-title} 3.11, you installed your cluster on infrastructure that you prepared and maintained. In addition to providing your own infrastructure, {product-title} 4 offers an option to deploy a cluster on infrastructure that the {product-title} installation program provisions and the cluster maintains. For more information, see xref:../architecture/architecture-installation.adoc#installation-overview_architecture-installation[OpenShift Container Platform installation overview]. -[discrete] + === Upgrading your cluster In {product-title} 3.11, you upgraded your cluster by running Ansible playbooks. In {product-title} {product-version}, the cluster manages its own updates, including updates to {op-system-first} on cluster nodes. You can easily upgrade your cluster by using the web console or by using the `oc adm upgrade` command from the OpenShift CLI and the Operators will automatically upgrade themselves. If your {product-title} {product-version} cluster has {op-system-base} worker machines, then you will still need to run an Ansible playbook to upgrade those worker machines. @@ -81,28 +80,28 @@ Review the changes and other considerations that might affect your transition fr Review the following storage changes to consider when transitioning from {product-title} 3.11 to {product-title} {product-version}. -[discrete] + ==== Local volume persistent storage Local storage is only supported by using the Local Storage Operator in {product-title} {product-version}. It is not supported to use the local provisioner method from {product-title} 3.11. For more information, see xref:../storage/persistent_storage_local/persistent-storage-local.adoc#persistent-storage-using-local-volume[Persistent storage using local volumes]. -[discrete] + ==== FlexVolume persistent storage The FlexVolume plugin location changed from {product-title} 3.11. The new location in {product-title} {product-version} is `/etc/kubernetes/kubelet-plugins/volume/exec`. Attachable FlexVolume plugins are no longer supported. For more information, see xref:../storage/persistent_storage/persistent-storage-flexvolume.adoc#persistent-storage-using-flexvolume[Persistent storage using FlexVolume]. -[discrete] + ==== Container Storage Interface (CSI) persistent storage Persistent storage using the Container Storage Interface (CSI) was link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] in {product-title} 3.11. {product-title} {product-version} ships with xref:../storage/container_storage_interface/persistent-storage-csi.adoc#csi-drivers-supported_persistent-storage-csi[several CSI drivers]. You can also install your own driver. For more information, see xref:../storage/container_storage_interface/persistent-storage-csi.adoc#persistent-storage-using-csi[Persistent storage using the Container Storage Interface (CSI)]. -[discrete] + ==== Red Hat OpenShift Data Foundation OpenShift Container Storage 3, which is available for use with {product-title} 3.11, uses Red Hat Gluster Storage as the backing storage. @@ -111,7 +110,7 @@ OpenShift Container Storage 3, which is available for use with {product-title} 3 For more information, see xref:../storage/persistent_storage/persistent-storage-ocs.adoc#red-hat-openshift-data-foundation[Persistent storage using Red Hat OpenShift Data Foundation] and the link:https://access.redhat.com/articles/4731161[interoperability matrix] article. -[discrete] + ==== Unsupported persistent storage options Support for the following persistent storage options from {product-title} 3.11 has changed in {product-title} {product-version}: @@ -124,7 +123,7 @@ If you used one of these in {product-title} 3.11, you must choose a different pe For more information, see xref:../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[Understanding persistent storage]. -[discrete] + ==== Migration of in-tree volumes to CSI drivers {product-title} 4 is migrating in-tree volume plugins to their Container Storage Interface (CSI) counterparts. In {product-title} {product-version}, CSI drivers are the new default for the following in-tree volume types: @@ -150,7 +149,7 @@ For more information, see xref:../storage/container_storage_interface/persistent Review the following networking changes to consider when transitioning from {product-title} 3.11 to {product-title} {product-version}. -[discrete] + ==== Network isolation mode The default network isolation mode for {product-title} 3.11 was `ovs-subnet`, though users frequently switched to use `ovn-multitenant`. The default network isolation mode for {product-title} {product-version} is controlled by a network policy. @@ -159,7 +158,7 @@ If your {product-title} 3.11 cluster used the `ovs-subnet` or `ovs-multitenant` For more information, see xref:../networking/network_policy/about-network-policy.adoc#about-network-policy[About network policy]. -[discrete] + ==== OVN-Kubernetes as the default networking plugin in Red Hat OpenShift Networking In {product-title} 3.11, OpenShift SDN was the default networking plugin in Red Hat OpenShift Networking. In {product-title} {product-version}, OVN-Kubernetes is now the default networking plugin. @@ -171,21 +170,19 @@ For information on migrating to OVN-Kubernetes from OpenShift SDN, see xref:../n Review the following logging changes to consider when transitioning from {product-title} 3.11 to {product-title} {product-version}. -[discrete] + ==== Deploying OpenShift Logging {product-title} 4 provides a simple deployment mechanism for OpenShift Logging, by using a Cluster Logging custom resource. For more information, see xref:../observability/logging/cluster-logging-deploying.adoc#cluster-logging-deploying_cluster-logging-deploying[Installing OpenShift Logging]. -[discrete] ==== Aggregated logging data You cannot transition your aggregate logging data from {product-title} 3.11 into your new {product-title} 4 cluster. For more information, see xref:../observability/logging/cluster-logging.adoc#cluster-logging-about_cluster-logging[About OpenShift Logging]. -[discrete] ==== Unsupported logging configurations Some logging configurations that were available in {product-title} 3.11 are no longer supported in {product-title} {product-version}. @@ -197,14 +194,14 @@ For more information on the explicitly unsupported logging cases, see the xref:. Review the following security changes to consider when transitioning from {product-title} 3.11 to {product-title} {product-version}. -[discrete] + ==== Unauthenticated access to discovery endpoints In {product-title} 3.11, an unauthenticated user could access the discovery endpoints (for example, [x-]`/api/*` and [x-]`/apis/*`). For security reasons, unauthenticated access to the discovery endpoints is no longer allowed in {product-title} {product-version}. If you do need to allow unauthenticated access, you can configure the RBAC settings as necessary; however, be sure to consider the security implications as this can expose internal cluster components to the external network. // TODO: Anything to xref to, or additional details? -[discrete] + ==== Identity providers Configuration for identity providers has changed for {product-title} 4, including the following notable changes: @@ -214,12 +211,12 @@ Configuration for identity providers has changed for {product-title} 4, includin For more information, see xref:../authentication/understanding-identity-provider.adoc#understanding-identity-provider[Understanding identity provider configuration]. -[discrete] + ==== OAuth token storage format Newly created OAuth HTTP bearer tokens no longer match the names of their OAuth access token objects. The object names are now a hash of the bearer token and are no longer sensitive. This reduces the risk of leaking sensitive information. -[discrete] + ==== Default security context constraints The `restricted` security context constraints (SCC) in {product-title} 4 can no longer be accessed by any authenticated user as the `restricted` SCC in {product-title} 3.11. The broad authenticated access is now granted to the `restricted-v2` SCC, which is more restrictive than the old `restricted` SCC. The `restricted` SCC still exists; users that want to use it must be specifically given permissions to do it. @@ -231,7 +228,7 @@ For more information, see xref:../authentication/managing-security-context-const Review the following monitoring changes when transitioning from {product-title} 3.11 to {product-title} {product-version}. You cannot migrate Hawkular configurations and metrics to Prometheus. -[discrete] + ==== Alert for monitoring infrastructure availability The default alert that triggers to ensure the availability of the monitoring structure was called `DeadMansSwitch` in {product-title} 3.11. This was renamed to `Watchdog` in {product-title} 4. If you had PagerDuty integration set up with this alert in {product-title} 3.11, you must set up the PagerDuty integration for the `Watchdog` alert in {product-title} 4. diff --git a/migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc b/migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc index 774a628c2629..0754f0eab28b 100644 --- a/migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc +++ b/migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc @@ -16,7 +16,7 @@ For known issues, see the xref:../migration_toolkit_for_containers/release_notes include::modules/migration-mtc-workflow.adoc[leveloffset=+1] -[discrete] + include::modules/migration-about-mtc-custom-resources.adoc[leveloffset=+2] include::modules/migration-mtc-cr-manifests.adoc[leveloffset=+1] @@ -59,7 +59,7 @@ include::modules/migration-rolling-back-migration-manually.adoc[leveloffset=+2] [role="_additional-resources"] [id="additional-resources-uninstalling_{context}"] -[discrete] + === Additional resources * xref:../operators/admin/olm-deleting-operators-from-cluster.adoc#olm-deleting-operators-from-a-cluster-using-web-console_olm-deleting-operators-from-cluster[Deleting Operators from a cluster using the web console] diff --git a/modules/api-compatibility-exceptions.adoc b/modules/api-compatibility-exceptions.adoc index dc0d7ac845e3..7cd3caa50de7 100644 --- a/modules/api-compatibility-exceptions.adoc +++ b/modules/api-compatibility-exceptions.adoc @@ -10,32 +10,32 @@ The following are exceptions to compatibility in {product-title}: ifndef::microshift[] -[discrete] + [id="OS-file-system-modifications-not-made_{context}"] == RHEL CoreOS file system modifications not made with a supported Operator No assurances are made at this time that a modification made to the host operating file system is preserved across minor releases except for where that modification is made through the public interface exposed via a supported Operator, such as the Machine Config Operator or Node Tuning Operator. -[discrete] + [id="modifications-to-cluster-infrastructure-in-cloud_{context}"] == Modifications to cluster infrastructure in cloud or virtualized environments No assurances are made at this time that a modification to the cloud hosting environment that supports the cluster is preserved except for where that modification is made through a public interface exposed in the product or is documented as a supported configuration. Cluster infrastructure providers are responsible for preserving their cloud or virtualized infrastructure except for where they delegate that authority to the product through an API. endif::microshift[] -[discrete] + [id="Functional-defaults-between-upgraded-cluster-new-installation_{context}"] == Functional defaults between an upgraded cluster and a new installation No assurances are made at this time that a new installation of a product minor release will have the same functional defaults as a version of the product that was installed with a prior minor release and upgraded to the equivalent version. For example, future versions of the product may provision cloud infrastructure with different defaults than prior minor versions. In addition, different default security choices may be made in future versions of the product than those made in past versions of the product. Past versions of the product will forward upgrade, but preserve legacy choices where appropriate specifically to maintain backwards compatibility. -[discrete] + [id="API-fields-that-have-the-prefix-unsupported-annotations_{context}"] == Usage of API fields that have the prefix "unsupported” or undocumented annotations Select APIs in the product expose fields with the prefix `unsupported`. No assurances are made at this time that usage of this field is supported across releases or within a release. Product support can request a customer to specify a value in this field when debugging specific problems, but its usage is not supported outside of that interaction. Usage of annotations on objects that are not explicitly documented are not assured support across minor releases. -[discrete] + [id="API-availability-per-product-installation-topology_{context}"] == API availability per product installation topology The OpenShift distribution will continue to evolve its supported installation topology, and not all APIs in one install topology will necessarily be included in another. For example, certain topologies may restrict read/write access to particular APIs if they are in conflict with the product installation topology or not include a particular API at all if not pertinent to that topology. APIs that exist in a given topology will be supported in accordance with the compatibility tiers defined above. diff --git a/modules/api-support-tiers.adoc b/modules/api-support-tiers.adoc index ac58d33a1bc6..3efc62fc981f 100644 --- a/modules/api-support-tiers.adoc +++ b/modules/api-support-tiers.adoc @@ -8,24 +8,24 @@ All commercially supported APIs, components, and features are associated under one of the following support levels: -[discrete] + [id="api-tier-1_{context}"] == API tier 1 APIs and application operating environments (AOEs) are stable within a major release. They may be deprecated within a major release, but they will not be removed until a subsequent major release. -[discrete] + [id="api-tier-2_{context}"] == API tier 2 APIs and AOEs are stable within a major release for a minimum of 9 months or 3 minor releases from the announcement of deprecation, whichever is longer. -[discrete] + [id="api-tier-3_{context}"] == API tier 3 This level applies to languages, tools, applications, and optional Operators included with {product-title} through Operator Hub. Each component will specify a lifetime during which the API and AOE will be supported. Newer versions of language runtime specific components will attempt to be as API and AOE compatible from minor version to minor version as possible. Minor version to minor version compatibility is not guaranteed, however. Components and developer tools that receive continuous updates through the Operator Hub, referred to as Operators and operands, should be considered API tier 3. Developers should use caution and understand how these components may change with each minor release. Users are encouraged to consult the compatibility guidelines documented by the component. -[discrete] + [id="api-tier-4_{context}"] == API tier 4 No compatibility is provided. API and AOE can change at any point. These capabilities should not be used by applications needing long-term support. diff --git a/modules/application-health-about.adoc b/modules/application-health-about.adoc index 67e986681eb8..f517d7116f1e 100644 --- a/modules/application-health-about.adoc +++ b/modules/application-health-about.adoc @@ -60,7 +60,7 @@ You can configure several fields to control the behavior of a probe: ** for a readiness probe, the pod is marked `Unready` ** for a startup probe, the container is killed and is subject to the pod's `restartPolicy` -[discrete] + [id="application-health-examples"] == Example probes diff --git a/modules/build-config-capability.adoc b/modules/build-config-capability.adoc index 9676fc6bd2c4..cc93b4a46798 100644 --- a/modules/build-config-capability.adoc +++ b/modules/build-config-capability.adoc @@ -6,7 +6,7 @@ [id="build-config-capability_{context}"] = Build capability -[discrete] + == Purpose The `Build` capability enables the `Build` API. The `Build` API manages the lifecycle of `Build` and `BuildConfig` objects. diff --git a/modules/cli-installing-cli.adoc b/modules/cli-installing-cli.adoc index 17b67cac4d0a..b46857b0ccc1 100644 --- a/modules/cli-installing-cli.adoc +++ b/modules/cli-installing-cli.adoc @@ -88,7 +88,7 @@ If you are updating a cluster in a disconnected environment, install the `oc` ve endif::restricted[] ==== -[discrete] + == Installing the OpenShift CLI on Linux You can install the OpenShift CLI (`oc`) binary on Linux by using the following procedure. @@ -142,7 +142,7 @@ $ echo $PATH $ oc ---- -[discrete] + == Installing the OpenShift CLI on Windows You can install the OpenShift CLI (`oc`) binary on Windows by using the following procedure. @@ -188,7 +188,7 @@ C:\> path C:\> oc ---- -[discrete] + == Installing the OpenShift CLI on macOS You can install the OpenShift CLI (`oc`) binary on macOS by using the following procedure. diff --git a/modules/cloud-credential-operator.adoc b/modules/cloud-credential-operator.adoc index 34de63c418f4..f3a81cecb0e1 100644 --- a/modules/cloud-credential-operator.adoc +++ b/modules/cloud-credential-operator.adoc @@ -9,12 +9,11 @@ The Cloud Credential Operator (CCO) manages cloud provider credentials as Kubern By setting different values for the `credentialsMode` parameter in the `install-config.yaml` file, the CCO can be configured to operate in several different modes. If no mode is specified, or the `credentialsMode` parameter is set to an empty string (`""`), the CCO operates in its default mode. -[discrete] == Project link:https://github.com/openshift/cloud-credential-operator[openshift-cloud-credential-operator] -[discrete] + == CRDs * `credentialsrequests.cloudcredential.openshift.io` @@ -22,7 +21,7 @@ link:https://github.com/openshift/cloud-credential-operator[openshift-cloud-cred ** CR: `CredentialsRequest` ** Validation: Yes -[discrete] + == Configuration objects No configuration required. diff --git a/modules/cluster-authentication-operator.adoc b/modules/cluster-authentication-operator.adoc index e1562de337c9..ff874d62c791 100644 --- a/modules/cluster-authentication-operator.adoc +++ b/modules/cluster-authentication-operator.adoc @@ -12,7 +12,7 @@ The Cluster Authentication Operator installs and maintains the `Authentication` $ oc get clusteroperator authentication -o yaml ---- -[discrete] + == Project link:https://github.com/openshift/cluster-authentication-operator[cluster-authentication-operator] diff --git a/modules/cluster-autoscaler-about.adoc b/modules/cluster-autoscaler-about.adoc index 1dbcebe4686e..a5b0bc5a605f 100644 --- a/modules/cluster-autoscaler-about.adoc +++ b/modules/cluster-autoscaler-about.adoc @@ -28,7 +28,7 @@ on all nodes the cluster, even though it does not manage the control plane nodes Ensure that the `maxNodesTotal` value in the `ClusterAutoscaler` resource definition that you create is large enough to account for the total possible number of machines in your cluster. This value must encompass the number of control plane machines and the possible number of compute machines that you might scale to. ==== -[discrete] + [id="cluster-autoscaler-scale-down_{context}"] == Automatic node removal @@ -50,7 +50,7 @@ If the following types of pods are present on a node, the cluster autoscaler wil For example, you set the maximum CPU limit to 64 cores and configure the cluster autoscaler to only create machines that have 8 cores each. If your cluster starts with 30 cores, the cluster autoscaler can add up to 4 more nodes with 32 cores, for a total of 62. -[discrete] + [id="cluster-autoscaler-limitations_{context}"] == Limitations @@ -68,7 +68,7 @@ The cluster autoscaler only adds nodes in autoscaled node groups if doing so wou If the available node types cannot meet the requirements for a pod request, or if the node groups that could meet these requirements are at their maximum size, the cluster autoscaler cannot scale up. ==== -[discrete] + [id="cluster-autoscaler-interaction_{context}"] == Interaction with other scheduling features diff --git a/modules/cluster-autoscaler-operator.adoc b/modules/cluster-autoscaler-operator.adoc index 3475b3fc7958..5e88429e722d 100644 --- a/modules/cluster-autoscaler-operator.adoc +++ b/modules/cluster-autoscaler-operator.adoc @@ -7,12 +7,12 @@ The Cluster Autoscaler Operator manages deployments of the OpenShift Cluster Autoscaler using the `cluster-api` provider. -[discrete] + == Project link:https://github.com/openshift/cluster-autoscaler-operator[cluster-autoscaler-operator] -[discrete] + == CRDs * `ClusterAutoscaler`: This is a singleton resource, which controls the configuration autoscaler instance for the cluster. The Operator only responds to the `ClusterAutoscaler` resource named `default` in the managed namespace, the value of the `WATCH_NAMESPACE` environment variable. diff --git a/modules/cluster-bare-metal-operator.adoc b/modules/cluster-bare-metal-operator.adoc index 60da638eac59..2b6a7a830ae0 100644 --- a/modules/cluster-bare-metal-operator.adoc +++ b/modules/cluster-bare-metal-operator.adoc @@ -49,7 +49,7 @@ endif::cluster-caps[] ifdef::operator-ref[] -[discrete] + == Project link:https://github.com/openshift/cluster-baremetal-operator[cluster-baremetal-operator] diff --git a/modules/cluster-capi-operator.adoc b/modules/cluster-capi-operator.adoc index 9bbfa8f0ac39..c8cbb000b0f8 100644 --- a/modules/cluster-capi-operator.adoc +++ b/modules/cluster-capi-operator.adoc @@ -12,12 +12,12 @@ The {cluster-capi-operator} maintains the lifecycle of Cluster API resources. Th This Operator is available as a link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] for Amazon Web Services (AWS) and Google Cloud Platform (GCP) clusters. ==== -[discrete] + == Project link:https://github.com/openshift/cluster-capi-operator[cluster-capi-operator] -[discrete] + == CRDs * `awsmachines.infrastructure.cluster.x-k8s.io` diff --git a/modules/cluster-cloud-controller-manager-operator.adoc b/modules/cluster-cloud-controller-manager-operator.adoc index db2b4637927c..5b2bcc9c5554 100644 --- a/modules/cluster-cloud-controller-manager-operator.adoc +++ b/modules/cluster-cloud-controller-manager-operator.adoc @@ -21,7 +21,6 @@ The Cloud Controller Manager Operator includes the following components: By default, the Operator exposes Prometheus metrics through the `metrics` service. -[discrete] == Project link:https://github.com/openshift/cluster-cloud-controller-manager-operator[cluster-cloud-controller-manager-operator] diff --git a/modules/cluster-config-operator.adoc b/modules/cluster-config-operator.adoc index 3d6c06b5df40..f3dbdc424ea2 100644 --- a/modules/cluster-config-operator.adoc +++ b/modules/cluster-config-operator.adoc @@ -12,7 +12,7 @@ The Cluster Config Operator performs the following tasks related to `config.open * Handles migrations. -[discrete] + == Project link:https://github.com/openshift/cluster-config-operator[cluster-config-operator] diff --git a/modules/cluster-csi-snapshot-controller-operator.adoc b/modules/cluster-csi-snapshot-controller-operator.adoc index 727718cd8b4c..6c917ffdcf6f 100644 --- a/modules/cluster-csi-snapshot-controller-operator.adoc +++ b/modules/cluster-csi-snapshot-controller-operator.adoc @@ -35,7 +35,7 @@ The Cluster CSI Snapshot Controller Operator installs and maintains the CSI Snap ifdef::operator-ref[] -[discrete] + == Project link:https://github.com/openshift/cluster-csi-snapshot-controller-operator[cluster-csi-snapshot-controller-operator] diff --git a/modules/cluster-dns-operator.adoc b/modules/cluster-dns-operator.adoc index f61081a1fe00..58948328c286 100644 --- a/modules/cluster-dns-operator.adoc +++ b/modules/cluster-dns-operator.adoc @@ -14,7 +14,7 @@ The Operator creates a working default deployment based on the cluster's configu The DNS Operator manages CoreDNS as a Kubernetes daemon set exposed as a service with a static IP. CoreDNS runs on all nodes in the cluster. -[discrete] + == Project link:https://github.com/openshift/cluster-dns-operator[cluster-dns-operator] diff --git a/modules/cluster-image-registry-operator.adoc b/modules/cluster-image-registry-operator.adoc index 9318773856d8..b9d9290117f7 100644 --- a/modules/cluster-image-registry-operator.adoc +++ b/modules/cluster-image-registry-operator.adoc @@ -41,7 +41,7 @@ If you disable the `ImageRegistry` capability or if you disable the integrated { If you disable the `ImageRegistry` capability, you can reduce the overall resource footprint of {product-title} in resource-constrained environments. Depending on your deployment, you can disable this component if you do not need it. endif::[] -[discrete] + == Project link:https://github.com/openshift/cluster-image-registry-operator[cluster-image-registry-operator] diff --git a/modules/cluster-kube-scheduler-operator.adoc b/modules/cluster-kube-scheduler-operator.adoc index 56fff2e219f0..a17db30e95d8 100644 --- a/modules/cluster-kube-scheduler-operator.adoc +++ b/modules/cluster-kube-scheduler-operator.adoc @@ -16,12 +16,12 @@ The Kubernetes Scheduler Operator contains the following components: By default, the Operator exposes Prometheus metrics through the metrics service. -[discrete] + == Project link:https://github.com/openshift/cluster-kube-scheduler-operator[cluster-kube-scheduler-operator] -[discrete] + == Configuration The configuration for the Kubernetes Scheduler is the result of merging: diff --git a/modules/cluster-kube-storage-version-migrator-operator.adoc b/modules/cluster-kube-storage-version-migrator-operator.adoc index ac9625659c03..9edc2a8a7a9a 100644 --- a/modules/cluster-kube-storage-version-migrator-operator.adoc +++ b/modules/cluster-kube-storage-version-migrator-operator.adoc @@ -7,7 +7,7 @@ The Kubernetes Storage Version Migrator Operator detects changes of the default storage version, creates migration requests for resource types when the storage version changes, and processes migration requests. -[discrete] + == Project link:https://github.com/openshift/cluster-kube-storage-version-migrator-operator[cluster-kube-storage-version-migrator-operator] diff --git a/modules/cluster-machine-approver-operator.adoc b/modules/cluster-machine-approver-operator.adoc index 0d534396c726..ed9e7d264dba 100644 --- a/modules/cluster-machine-approver-operator.adoc +++ b/modules/cluster-machine-approver-operator.adoc @@ -12,7 +12,7 @@ The Cluster Machine Approver Operator automatically approves the CSRs requested For the control plane node, the `approve-csr` service on the bootstrap node automatically approves all CSRs during the cluster bootstrapping phase. ==== -[discrete] + == Project link:https://github.com/openshift/cluster-machine-approver[cluster-machine-approver-operator] diff --git a/modules/cluster-openshift-controller-manager-operators.adoc b/modules/cluster-openshift-controller-manager-operators.adoc index 2124569ae0b0..afe0dfa64507 100644 --- a/modules/cluster-openshift-controller-manager-operators.adoc +++ b/modules/cluster-openshift-controller-manager-operators.adoc @@ -19,7 +19,7 @@ The custom resource definition (CRD) `openshiftcontrollermanagers.operator.opens $ oc get crd openshiftcontrollermanagers.operator.openshift.io -o yaml ---- -[discrete] + == Project link:https://github.com/openshift/cluster-openshift-controller-manager-operator[cluster-openshift-controller-manager-operator] diff --git a/modules/cluster-samples-operator.adoc b/modules/cluster-samples-operator.adoc index d012f0a52e20..c12ddd0e64a3 100644 --- a/modules/cluster-samples-operator.adoc +++ b/modules/cluster-samples-operator.adoc @@ -57,7 +57,7 @@ The samples resource includes a finalizer, which cleans up the following upon it Upon deletion of the samples resource, the Cluster Samples Operator recreates the resource using the default configuration. -[discrete] + == Project link:https://github.com/openshift/cluster-samples-operator[cluster-samples-operator] diff --git a/modules/cluster-storage-operator.adoc b/modules/cluster-storage-operator.adoc index 18c46fa0523b..4ff5f81a431b 100644 --- a/modules/cluster-storage-operator.adoc +++ b/modules/cluster-storage-operator.adoc @@ -41,19 +41,19 @@ endif::cluster-caps[] ifdef::operator-ref[] -[discrete] + == Project link:https://github.com/openshift/cluster-storage-operator[cluster-storage-operator] -[discrete] + == Configuration No configuration is required. endif::operator-ref[] -[discrete] + == Notes * The storage class that the Operator creates can be made non-default by editing its annotation, but this storage class cannot be deleted as long as the Operator runs. diff --git a/modules/cluster-version-operator.adoc b/modules/cluster-version-operator.adoc index 2e3114fa15a1..6f458c1dd4bc 100644 --- a/modules/cluster-version-operator.adoc +++ b/modules/cluster-version-operator.adoc @@ -11,7 +11,7 @@ The CVO also checks with the OpenShift Update Service to see the valid updates a For more information regarding cluster version condition types, see "Understanding cluster version condition types". -[discrete] + == Project link:https://github.com/openshift/cluster-version-operator[cluster-version-operator] diff --git a/modules/cluster-wide-proxy-preqs.adoc b/modules/cluster-wide-proxy-preqs.adoc index e3dc88906add..d234a10eaf90 100644 --- a/modules/cluster-wide-proxy-preqs.adoc +++ b/modules/cluster-wide-proxy-preqs.adoc @@ -8,7 +8,7 @@ To configure a cluster-wide proxy, you must meet the following requirements. These requirements are valid when you configure a proxy during installation or postinstallation. -[discrete] + [id="cluster-wide-proxy-general-prereqs_{context}"] == General requirements @@ -34,7 +34,7 @@ These endpoints are required to complete requests from the nodes to the AWS EC2 When using a cluster-wide proxy, you must configure the `s3..amazonaws.com` endpoint as type `Gateway`. Also, you can configure the `ec2..amazonaws.com` and `elasticloadbalancing..amazonaws.com` endpoints only as type `Interface`. ==== -[discrete] + [id="cluster-wide-proxy-network-prereqs_{context}"] == Network requirements diff --git a/modules/console-operator.adoc b/modules/console-operator.adoc index 83852a80b12b..9c957cdedfc2 100644 --- a/modules/console-operator.adoc +++ b/modules/console-operator.adoc @@ -36,7 +36,7 @@ The Console Operator installs and maintains the {product-title} web console on a ifdef::operator-ref[] -[discrete] + == Project link:https://github.com/openshift/console-operator[console-operator] diff --git a/modules/control-plane-machine-set-operator.adoc b/modules/control-plane-machine-set-operator.adoc index d55585fcaa18..08eceaf5dd6a 100644 --- a/modules/control-plane-machine-set-operator.adoc +++ b/modules/control-plane-machine-set-operator.adoc @@ -12,12 +12,12 @@ The Control Plane Machine Set Operator automates the management of control plane This Operator is available for Amazon Web Services (AWS), Google Cloud Platform (GCP), Microsoft Azure, Nutanix, and VMware vSphere. ==== -[discrete] + == Project link:https://github.com/openshift/cluster-control-plane-machine-set-operator[cluster-control-plane-machine-set-operator] -[discrete] + == CRDs * `controlplanemachineset.machine.openshift.io` diff --git a/modules/cpmso-yaml-provider-spec-gcp.adoc b/modules/cpmso-yaml-provider-spec-gcp.adoc index 618f10713ced..c46fa0fa0b7b 100644 --- a/modules/cpmso-yaml-provider-spec-gcp.adoc +++ b/modules/cpmso-yaml-provider-spec-gcp.adoc @@ -8,7 +8,7 @@ When you create a control plane machine set for an existing cluster, the provider specification must match the `providerSpec` configuration in the control plane machine custom resource (CR) that the installation program creates. You can omit any field that is set in the failure domain section of the CR. -[discrete] + [id="cpmso-yaml-provider-spec-gcp-oc_{context}"] == Values obtained by using the OpenShift CLI diff --git a/modules/cpmso-yaml-provider-spec-nutanix.adoc b/modules/cpmso-yaml-provider-spec-nutanix.adoc index 868f3c6edc58..345da7a222f8 100644 --- a/modules/cpmso-yaml-provider-spec-nutanix.adoc +++ b/modules/cpmso-yaml-provider-spec-nutanix.adoc @@ -8,7 +8,7 @@ When you create a control plane machine set for an existing cluster, the provider specification must match the `providerSpec` configuration in the control plane machine custom resource (CR) that the installation program creates. -[discrete] + [id="cpmso-yaml-provider-spec-nutanix-oc_{context}"] == Values obtained by using the OpenShift CLI diff --git a/modules/deployment-config-capability.adoc b/modules/deployment-config-capability.adoc index 8acd0434f888..17c0cf71d8f4 100644 --- a/modules/deployment-config-capability.adoc +++ b/modules/deployment-config-capability.adoc @@ -6,7 +6,7 @@ [id="deployment-config-capability_{context}"] = DeploymentConfig capability -[discrete] + == Purpose The `DeploymentConfig` capability enables and manages the `DeploymentConfig` API. diff --git a/modules/dynamic-plugin-api.adoc b/modules/dynamic-plugin-api.adoc index f47d58714085..a8b0b779022b 100644 --- a/modules/dynamic-plugin-api.adoc +++ b/modules/dynamic-plugin-api.adoc @@ -9,7 +9,7 @@ [id="dynamic-plugin-api_{context}"] = {product-title} console API -[discrete] + == `useActivePerspective` Hook that provides the currently active perspective and a callback for setting the active perspective. It returns a tuple containing the current active perspective and setter callback. @@ -30,7 +30,7 @@ const Component: React.FC = (props) => { } ---- -[discrete] + == `GreenCheckCircleIcon` Component for displaying a green check mark circle icon. @@ -49,7 +49,7 @@ Component for displaying a green check mark circle icon. |`size` |(optional) icon size: (`sm`, `md`, `lg`, `xl`) |=== -[discrete] + == `RedExclamationCircleIcon` Component for displaying a red exclamation mark circle icon. @@ -68,7 +68,7 @@ Component for displaying a red exclamation mark circle icon. |`size` |(optional) icon size: (`sm`, `md`, `lg`, `xl`) |=== -[discrete] + == `YellowExclamationTriangleIcon` Component for displaying a yellow triangle exclamation icon. @@ -87,7 +87,7 @@ Component for displaying a yellow triangle exclamation icon. |`size` |(optional) icon size: (`sm`, `md`, `lg`, `xl`) |=== -[discrete] + == `BlueInfoCircleIcon` Component for displaying a blue info circle icon. @@ -106,7 +106,7 @@ Component for displaying a blue info circle icon. |`size` |(optional) icon size: ('sm', 'md', 'lg', 'xl') |=== -[discrete] + == `ErrorStatus` Component for displaying an error status popover. @@ -127,7 +127,7 @@ Component for displaying an error status popover. |`popoverTitle` |(optional) title for popover |=== -[discrete] + == `InfoStatus` Component for displaying an information status popover. @@ -148,7 +148,7 @@ Component for displaying an information status popover. |`popoverTitle` |(optional) title for popover |=== -[discrete] + == `ProgressStatus` Component for displaying a progressing status popover. @@ -169,7 +169,7 @@ Component for displaying a progressing status popover. |`popoverTitle` |(optional) title for popover |=== -[discrete] + == `SuccessStatus` Component for displaying a success status popover. @@ -190,7 +190,7 @@ Component for displaying a success status popover. |`popoverTitle` |(optional) title for popover |=== -[discrete] + == `checkAccess` Provides information about user access to a given resource. It returns an object with resource access information. @@ -202,7 +202,7 @@ Provides information about user access to a given resource. It returns an object |`impersonate` |impersonation details |=== -[discrete] + == `useAccessReview` Hook that provides information about user access to a given resource. It returns an array with `isAllowed` and `loading` values. @@ -214,7 +214,7 @@ Hook that provides information about user access to a given resource. It returns |`impersonate` |impersonation details |=== -[discrete] + == `useResolvedExtensions` React hook for consuming Console extensions with resolved `CodeRef` properties. This hook accepts the same argument(s) as `useExtensions` hook and returns an adapted list of extension instances, resolving all code references within each extension's properties. @@ -238,7 +238,7 @@ extension as an argument and return a boolean flag indicating whether or not the extension meets desired type constraints |=== -[discrete] + == `HorizontalNav` A component that creates a Navigation bar for a page. Routing is handled as part of the component. `console.tab/horizontalNav` can be used to add additional content to any horizontal navigation. @@ -268,7 +268,6 @@ K8sResourceCommon type |`match` |match object provided by React Router |=== -[discrete] == `VirtualizedTable` A component for making virtualized tables. @@ -307,7 +306,6 @@ const MachineList: React.FC = (props) => { |`rowData` |(optional) data specific to row |=== -[discrete] == `TableData` Component for displaying table data within a table row. @@ -338,7 +336,7 @@ const PodRow: React.FC> = ({ obj, activeColumnIDs }) |`className` |(optional) option class name for styling |=== -[discrete] + == `useActiveColumns` A hook that provides a list of user-selected active TableColumns. @@ -374,7 +372,7 @@ user settings. Usually a group/version/kind (GVK) string for a resource. A tuple containing the current user selected active columns (a subset of options.columns), and a boolean flag indicating whether user settings have been loaded. -[discrete] + == `ListPageHeader` Component for generating a page header. @@ -399,7 +397,7 @@ const exampleList: React.FC = () => { |`badge` |(optional) badge icon as react node |=== -[discrete] + == `ListPageCreate` Component for adding a create button for a specific resource kind that automatically generates a link to the create YAML for this resource. @@ -424,7 +422,7 @@ const exampleList: React.FC = () => { |`groupVersionKind` |the resource group/version/kind to represent |=== -[discrete] + == `ListPageCreateLink` Component for creating a stylized link. @@ -454,7 +452,7 @@ determine access |`children` |(optional) children for the component |=== -[discrete] + == `ListPageCreateButton` Component for creating button. @@ -482,7 +480,7 @@ determine access |`pfButtonProps` |(optional) Patternfly Button props |=== -[discrete] + == `ListPageCreateDropdown` Component for creating a dropdown wrapped with permissions check. @@ -518,7 +516,6 @@ determine access |`children` |(optional) children for the dropdown toggle |=== -[discrete] == `ListPageFilter` Component that generates filter for list page. @@ -570,7 +567,6 @@ both name and label filter |`hideColumnManagement` |(optional) flag to hide the column management |=== -[discrete] == `useListPageFilter` A hook that manages filter state for the ListPageFilter component. It returns a tuple containing the data filtered by all static filters, the data filtered by all static and row filters, and a callback that updates rowFilters. @@ -608,7 +604,6 @@ available filter options statically applied to the data |=== -[discrete] == `ResourceLink` Component that creates a link to a specific resource type with an icon badge. @@ -656,7 +651,7 @@ link to |`truncate` |(optional) flag to truncate the link if too long |=== -[discrete] + == `ResourceIcon` Component that creates an icon badge for a specific resource type. @@ -675,7 +670,7 @@ Component that creates an icon badge for a specific resource type. |`className` |(optional) class style for component |=== -[discrete] + == `useK8sModel` Hook that retrieves the k8s model for provided K8sGroupVersionKind from redux. It returns an array with the first item as k8s model and second item as `inFlight` status. @@ -697,7 +692,7 @@ K8sGroupVersionKind is preferred alternatively can pass reference for group, version, kind which is deprecated, i.e, group/version/kind (GVK) K8sResourceKindReference. |=== -[discrete] + == `useK8sModels` Hook that retrieves all current k8s models from redux. It returns an array with the first item as the list of k8s model and second item as `inFlight` status. @@ -711,7 +706,7 @@ const Component: React.FC = () => { } ---- -[discrete] + == `useK8sWatchResource` Hook that retrieves the k8s resource along with status for loaded and error. It returns an array with first item as resource(s), second item as loaded status and third item as error state if any. @@ -734,7 +729,7 @@ const Component: React.FC = () => { |`initResource` |options needed to watch for resource. |=== -[discrete] + == `useK8sWatchResources` Hook that retrieves the k8s resources along with their respective status for loaded and error. It returns a map where keys are as provided in initResouces and value has three properties data, loaded and error. @@ -761,7 +756,7 @@ wherein key will be unique to resource and value will be options needed to watch for the respective resource. |=== -[discrete] + == `consoleFetch` A custom wrapper around `fetch` that adds console specific headers and allows for retries and timeouts.It also validates the response status code and throws appropriate error or logs out the user if required. It returns a promise that resolves to the response. @@ -774,7 +769,7 @@ A custom wrapper around `fetch` that adds console specific headers and allows fo |`timeout` |The timeout in milliseconds |=== -[discrete] + == `consoleFetchJSON` A custom wrapper around `fetch` that adds console specific headers and allows for retries and timeouts. It also validates the response status code and throws appropriate error or logs out the user if required. It returns the response as a JSON object. Uses `consoleFetch` internally. It returns a promise that resolves to the response as JSON object. @@ -794,7 +789,7 @@ A custom wrapper around `fetch` that adds console specific headers and allows fo the active cluster the user has selected |=== -[discrete] + == `consoleFetchText` A custom wrapper around `fetch` that adds console specific headers and allows for retries and timeouts. It also validates the response status code and throws appropriate error or logs out the user if required. It returns the response as a text. Uses `consoleFetch` internally. It returns a promise that resolves to the response as text. @@ -812,7 +807,7 @@ A custom wrapper around `fetch` that adds console specific headers and allows fo the active cluster the user has selected |=== -[discrete] + == `getConsoleRequestHeaders` A function that creates impersonation and multicluster related headers for API requests using current redux state. It returns an object containing the appropriate impersonation and clustr requst headers, based on redux state. @@ -824,7 +819,7 @@ A function that creates impersonation and multicluster related headers for API r targetCluster |=== -[discrete] + == `k8sGetResource` It fetches a resource from the cluster, based on the provided options. If the name is provided it returns one resource else it returns all the resources matching the model. It returns a promise that resolves to the response as JSON object with a resource if the name is providedelse it returns all the resources matching the @@ -852,7 +847,7 @@ URL. request headers, method, redirect, etc. See link:{power-bi-url}[Interface RequestInit] for more. |=== -[discrete] + == `k8sCreateResource` It creates a resource in the cluster, based on the provided options. It returns a promise that resolves to the response of the resource created. In case of failure promise gets rejected with HTTP error response. @@ -872,7 +867,7 @@ It creates a resource in the cluster, based on the provided options. It returns URL. |=== -[discrete] + == `k8sUpdateResource` It updates the entire resource in the cluster, based on providedoptions. When a client needs to replace an existing resource entirely, they can use k8sUpdate. Alternatively can use k8sPatch to perform the partial update. It returns a promise that resolves to the response of the resource updated. In case of failure promise gets rejected with HTTP error response. @@ -897,7 +892,7 @@ cluster-scoped resources. URL. |=== -[discrete] + == `k8sPatchResource` It patches any resource in the cluster, based on provided options. When a client needs to perform the partial update, they can use @@ -921,7 +916,7 @@ with the operation, path, and value. URL. |=== -[discrete] + == `k8sDeleteResource` It deletes resources from the cluster, based on the provided model, resource. The garbage collection works based on `Foreground`|`Background` can be configured with propagationPolicy property in provided model or passed in json. It returns a promise that resolves to the response of kind Status. In case of failure promise gets rejected with HTTP error response. @@ -952,7 +947,7 @@ request headers, method, redirect, etc. See link:{power-bi-url}[Interface Reques explicitly if provided else will default to model's "propagationPolicy". |=== -[discrete] + == `k8sListResource` Lists the resources as an array in the cluster, based on provided options. It returns a promise that resolves to the response. @@ -971,12 +966,12 @@ URL and can pass label selector's as well with key "labelSelector". request headers, method, redirect, etc. See link:{power-bi-url}[Interface RequestInit] for more. |=== -[discrete] + == `k8sListResourceItems` Same interface as k8sListResource but returns the sub items. It returns the apiVersion for the model, i.e., `group/version`. -[discrete] + == `getAPIVersionForModel` Provides apiVersion for a k8s model. @@ -987,7 +982,7 @@ Provides apiVersion for a k8s model. |`model` |k8s model |=== -[discrete] + == `getGroupVersionKindForResource` Provides a group, version, and kind for a resource. It returns the group, version, kind for the provided resource. If the resource does not have an API group, group "core" will be returned. If the resource has an invalid apiVersion, then it will throw an Error. @@ -998,7 +993,7 @@ Provides a group, version, and kind for a resource. It returns the group, versio |`resource` |k8s resource |=== -[discrete] + == `getGroupVersionKindForModel` Provides a group, version, and kind for a k8s model. This returns the group, version, kind for the provided model. If the model does not have an apiGroup, group "core" will be returned. @@ -1009,7 +1004,7 @@ Provides a group, version, and kind for a k8s model. This returns the group, ver |`model` |k8s model |=== -[discrete] + == `StatusPopupSection` Component that shows the status in a popup window. Helpful component for building `console.dashboards/overview/health/resource` extensions. @@ -1038,7 +1033,7 @@ Component that shows the status in a popup window. Helpful component for buildin |`children` |(optional) children for the popup |=== -[discrete] + == `StatusPopupItem` Status element used in status popup; used in `StatusPopupSection`. @@ -1067,7 +1062,7 @@ Status element used in status popup; used in `StatusPopupSection`. |`children` |child elements |=== -[discrete] + == `Overview` Creates a wrapper component for a dashboard. @@ -1087,7 +1082,7 @@ Creates a wrapper component for a dashboard. |`children` |(optional) elements of the dashboard |=== -[discrete] + == `OverviewGrid` Creates a grid of card elements for a dashboard; used within `Overview`. @@ -1108,7 +1103,7 @@ Creates a grid of card elements for a dashboard; used within `Overview`. |`rightCards` |(optional) cards for right side of grid |=== -[discrete] + == `InventoryItem` Creates an inventory card item. @@ -1132,7 +1127,7 @@ Creates an inventory card item. |`children` |elements to render inside the item |=== -[discrete] + == `InventoryItemTitle` Creates a title for an inventory card item; used within `InventoryItem`. @@ -1156,7 +1151,7 @@ Creates a title for an inventory card item; used within `InventoryItem`. |`children` |elements to render inside the title |=== -[discrete] + == `InventoryItemBody` Creates the body of an inventory card; used within `InventoryCard` and can be used with `InventoryTitle`. @@ -1181,7 +1176,7 @@ Creates the body of an inventory card; used within `InventoryCard` and can be us |`error` |elements of the div |=== -[discrete] + == `InventoryItemStatus` Creates a count and icon for an inventory card with optional link address; used within `InventoryItemBody` @@ -1207,7 +1202,7 @@ Creates a count and icon for an inventory card with optional link address; used |`linkTo` |(optional) link address |=== -[discrete] + == `InventoryItemLoading` Creates a skeleton container for when an inventory card is loading; used with `InventoryItem` and related components @@ -1227,7 +1222,7 @@ return ( ) ---- -[discrete] + == `useFlag` Hook that returns the given feature flag from FLAGS redux state. It returns the boolean value of the requested feature flag or undefined. @@ -1238,7 +1233,7 @@ Hook that returns the given feature flag from FLAGS redux state. It returns the |`flag` |The feature flag to return |=== -[discrete] + == `CodeEditor` A basic lazy loaded Code editor with hover help and completion. @@ -1269,7 +1264,7 @@ A basic lazy loaded Code editor with hover help and completion. |=== -[discrete] + == `ResourceYAMLEditor` A lazy loaded YAML editor for Kubernetes resources with hover help and completion. The component use the YAMLEditor and add on top of it more functionality likeresource update handling, alerts, save, cancel and reload buttons, accessibility and more. Unless `onSave` callback is provided, the resource update is automatically handled.It should be wrapped in a `React.Suspense` component. @@ -1298,7 +1293,7 @@ the editor. This prop is used only during the initial render default update performed on the resource by the editor |=== -[discrete] + == `ResourceEventStream` A component to show events related to a particular resource. @@ -1316,7 +1311,7 @@ return |`resource` |An object whose related events should be shown. |=== -[discrete] + == `usePrometheusPoll` Sets up a poll to Prometheus for a single query. It returns a tuple containing the query response, a boolean flag indicating whether the response has completed, and any errors encountered during the request or post-processing of the request. @@ -1344,7 +1339,7 @@ of the query range |`\{string} [options.timeout]` | (optional) a search param to append |=== -[discrete] + == `Timestamp` A component to render timestamp. The timestamps are synchronized between invidual instances of the Timestamp component. The provided timestamp is formatted according to user locale. @@ -1363,7 +1358,6 @@ tooltip. |`className` |additional class name for the component. |=== -[discrete] == `useModal` A hook to launch Modals. @@ -1374,7 +1368,7 @@ A hook to launch Modals. const context: AppPage: React.FC = () => {
const [launchModal] = useModal();
const onClick = () => launchModal(ModalComponent);
return (

)
}
` ---- -[discrete] + == `ActionServiceProvider` Component that allows to receive contributions from other plugins for the `console.action/provider` extension type. @@ -1401,7 +1395,7 @@ Component that allows to receive contributions from other plugins for the `conso |`context` |Object with contextId and optional plugin data |=== -[discrete] + == `NamespaceBar` A component that renders a horizontal toolbar with a namespace dropdown menu in the leftmost position. Additional components can be passed in as children and will be rendered to the right of the namespace dropdown. This component is designed to be used at the top of the page. It should be used on pages where the user needs to be able to change the active namespace, such as on pages with k8s resources. @@ -1440,7 +1434,7 @@ dropdown and has no effect on child components. toolbar to the right of the namespace dropdown. |=== -[discrete] + == `ErrorBoundaryFallbackPage` Creates full page ErrorBoundaryFallbackPage component to display the "Oh no! Something went wrong." message along with the stack trace and other helpful debugging information. This is to be used inconjunction with an component. @@ -1468,7 +1462,7 @@ Creates full page ErrorBoundaryFallbackPage component to display the "Oh no! Som |`title` |title to render as the header of the error boundary page |=== -[discrete] + == `QueryBrowser` A component that renders a graph of the results from a Prometheus PromQL query along with controls for interacting with the graph. @@ -1511,7 +1505,7 @@ A component that renders a graph of the results from a Prometheus PromQL query a |`units` |(optional) Units to display on the Y-axis and in the tooltip. |=== -[discrete] + == `useAnnotationsModal` A hook that provides a callback to launch a modal for editing Kubernetes resource annotations. @@ -1535,7 +1529,7 @@ const PodAnnotationsButton = ({ pod }) => { .Returns A function which will launch a modal for editing a resource's annotations. -[discrete] + == `useDeleteModal` A hook that provides a callback to launch a modal for deleting a resource. @@ -1563,7 +1557,7 @@ const DeletePodButton = ({ pod }) => { .Returns A function which will launch a modal for deleting a resource. -[discrete] + == `useLabelsModel` A hook that provides a callback to launch a modal for editing Kubernetes resource labels. @@ -1587,7 +1581,7 @@ const PodLabelsButton = ({ pod }) => { .Returns A function which will launch a modal for editing a resource's labels. -[discrete] + == `useActiveNamespace` Hook that provides the currently active namespace and a callback for setting the active namespace. @@ -1611,7 +1605,6 @@ const Component: React.FC = (props) => { .Returns A tuple containing the current active namespace and setter callback. -[discrete] == `PerspectiveContext` Deprecated: Use the provided `usePerspectiveContext` instead. Creates the perspective context. @@ -1622,7 +1615,7 @@ Deprecated: Use the provided `usePerspectiveContext` instead. Creates the perspe |`PerspectiveContextType` |object with active perspective and setter |=== -[discrete] + == `useAccessReviewAllowed` Deprecated: Use `useAccessReview` from `@console/dynamic-plugin-sdk` instead. Hook that provides allowed status about user access to a given resource. It returns the `isAllowed` boolean value. @@ -1634,7 +1627,7 @@ Deprecated: Use `useAccessReview` from `@console/dynamic-plugin-sdk` instead. Ho |`impersonate` |impersonation details |=== -[discrete] + == `useSafetyFirst` Deprecated: This hook is not related to console functionality. Hook that ensures a safe asynchronnous setting of React state in case a given component could be unmounted. It returns an array with a pair of state value and its set function. @@ -1647,7 +1640,6 @@ Deprecated: This hook is not related to console functionality. Hook that ensures :!power-bi-url: -[discrete] == `YAMLEditor` Deprecated: A basic lazy loaded YAML editor with hover help and completion. @@ -1683,4 +1675,4 @@ section on top of the editor. |`ref` |React reference to `{ editor?: IStandaloneCodeEditor }`. Using the `editor` property, you are able to access to all methods to control the editor. -|=== \ No newline at end of file +|=== diff --git a/modules/dynamic-plugin-sdk-extensions.adoc b/modules/dynamic-plugin-sdk-extensions.adoc index 12eadcc03e7d..3638cb0f86f8 100644 --- a/modules/dynamic-plugin-sdk-extensions.adoc +++ b/modules/dynamic-plugin-sdk-extensions.adoc @@ -6,7 +6,7 @@ [id="dynamic-plugin-sdk-extensions_{context}"] = Dynamic plugin extension types -[discrete] + == `console.action/filter` `ActionFilter` can be used to filter an action. @@ -26,7 +26,7 @@ remove the `ModifyCount` action from a deployment with a horizontal pod autoscaler (HPA). |=== -[discrete] + == `console.action/group` `ActionGroup` contributes an action group that can also be a submenu. @@ -50,7 +50,7 @@ item referenced here. For arrays, the first one found in order is used. The `insertBefore` value takes precedence. |=== -[discrete] + == `console.action/provider` `ActionProvider` contributes a hook that returns list of actions for specific context. @@ -66,7 +66,7 @@ that returns actions for the given scope. If `contextId` = `resource`, then the scope will always be a Kubernetes resource object. |=== -[discrete] + == `console.action/resource-provider` `ResourceActionProvider` contributes a hook that returns list of actions for specific resource model. @@ -81,7 +81,7 @@ provider provides actions for. which returns actions for the given resource model |=== -[discrete] + == `console.alert-action` This extension can be used to trigger a specific action when a specific Prometheus alert is observed by the Console based on its `rule.name` value. @@ -96,7 +96,7 @@ This extension can be used to trigger a specific action when a specific Promethe |`action` |`CodeRef<(alert: any) => void>` |no | Function to perform side effect | |=== -[discrete] + == `console.catalog/item-filter` This extension can be used for plugins to contribute a handler that can filter specific catalog items. For example, the plugin can contribute a filter that filters helm charts from specific provider. @@ -114,7 +114,7 @@ of a specific type. Value is a function that takes `CatalogItem[]` and returns a subset based on the filter criteria. |=== -[discrete] + == `console.catalog/item-metadata` This extension can be used to contribute a provider that adds extra metadata to specific catalog items. @@ -132,7 +132,7 @@ catalog this provider contributes to. |no |A hook which returns a function that will be used to provide metadata to catalog items of a specific type. |=== -[discrete] + == `console.catalog/item-provider` This extension allows plugins to contribute a provider for a catalog item type. For example, a Helm Plugin can add a provider that fetches all the Helm Charts. This extension can also be used by other plugins to add more items to a specific catalog item type. @@ -157,7 +157,7 @@ Higher priority providers may override catalog items provided by other providers. |=== -[discrete] + == `console.catalog/item-type` This extension allows plugins to contribute a new type of catalog item. For example, a Helm plugin can define a new catalog item type as HelmCharts that it wants to contribute to the Developer Catalog. @@ -182,7 +182,7 @@ the catalog item. to the catalog item. |=== -[discrete] + == `console.catalog/item-type-metadata` This extension allows plugins to contribute extra metadata like custom filters or groupings for any catalog item type. For example, a plugin can attach a custom filter for HelmCharts that can filter based on chart provider. @@ -199,7 +199,7 @@ the catalog item. to the catalog item. |=== -[discrete] + == `console.cluster-overview/inventory-item` Adds a new inventory item into cluster overview page. @@ -211,7 +211,7 @@ Adds a new inventory item into cluster overview page. be rendered. |=== -[discrete] + == `console.cluster-overview/multiline-utilization-item` Adds a new cluster overview multi-line utilization item. @@ -231,7 +231,7 @@ utilization query. Top consumer popover instead of plain value. |=== -[discrete] + == `console.cluster-overview/utilization-item` Adds a new cluster overview utilization item. @@ -257,7 +257,7 @@ query. consumer popover instead of plain value. |=== -[discrete] + == `console.context-provider` Adds a new React context provider to the web console application root. @@ -269,7 +269,6 @@ Adds a new React context provider to the web console application root. |`useValueHook` |`CodeRef<() => T>` |no |Hook for the Context value. |=== -[discrete] == `console.dashboards/card` Adds a new dashboard card. @@ -290,7 +289,7 @@ component. Ignored for small screens; defaults to `12`. |=== -[discrete] + == `console.dashboards/custom/overview/detail/item` Adds an item to the Details card of Overview Dashboard. @@ -309,7 +308,7 @@ Adds an item to the Details card of Overview Dashboard. | `error` | `CodeRef<() => string>` | yes | Function returning errors to be displayed by the component |=== -[discrete] + == `console.dashboards/overview/activity/resource` Adds an activity to the Activity Card of Overview Dashboard where the triggering of activity is based on watching a Kubernetes resource. @@ -331,7 +330,7 @@ every resource represents activity. the given action, which will be used for ordering. |=== -[discrete] + == `console.dashboards/overview/health/operator` Adds a health subsystem to the status card of the *Overview* dashboard, where the source of status is a Kubernetes REST API. @@ -356,7 +355,7 @@ provided, then a list page of the first resource from resources prop is used. |=== -[discrete] + == `console.dashboards/overview/health/prometheus` Adds a health subsystem to the status card of Overview dashboard where the source of status is Prometheus. @@ -385,7 +384,7 @@ link, which opens a pop-up menu with the given content. topology for which the subsystem should be hidden. |=== -[discrete] + == `console.dashboards/overview/health/resource` Adds a health subsystem to the status card of Overview dashboard where the source of status is a Kubernetes Resource. @@ -407,7 +406,7 @@ opens a pop-up menu with the given content. |`popupTitle` |`string` |yes |The title of the popover. |=== -[discrete] + == `console.dashboards/overview/health/url` Adds a health subsystem to the status card of Overview dashboard where the source of status is a Kubernetes REST API. @@ -431,7 +430,7 @@ represented as a link which opens popup with given content. |`popupTitle` |`string` |yes |The title of the popover. |=== -[discrete] + == `console.dashboards/overview/inventory/item` Adds a resource tile to the overview inventory card. @@ -449,7 +448,7 @@ various statuses to groups. resources which will be fetched and passed to the `mapper` function. |=== -[discrete] + == `console.dashboards/overview/inventory/item/group` Adds an inventory status group. @@ -464,7 +463,7 @@ Adds an inventory status group. |no |React component representing the status group icon. |=== -[discrete] + == `console.dashboards/overview/inventory/item/replacement` Replaces an overview inventory card. @@ -481,7 +480,7 @@ various statuses to groups. resources which will be fetched and passed to the `mapper` function. |=== -[discrete] + == `console.dashboards/overview/prometheus/activity/resource` Adds an activity to the Activity Card of Prometheus Overview Dashboard where the triggering of activity is based on watching a Kubernetes resource. @@ -499,7 +498,7 @@ Adds an activity to the Activity Card of Prometheus Overview Dashboard where the action. If not defined, every resource represents activity. |=== -[discrete] + == `console.dashboards/project/overview/item` Adds a resource tile to the project overview inventory card. @@ -517,7 +516,7 @@ various statuses to groups. resources which will be fetched and passed to the `mapper` function. |=== -[discrete] + == `console.dashboards/tab` Adds a new dashboard tab, placed after the *Overview* tab. @@ -533,7 +532,7 @@ and when adding cards to this tab. |`title` |`string` |no |The title of the tab. |=== -[discrete] + == `console.file-upload` This extension can be used to provide a handler for the file drop action on specific file extensions. @@ -547,7 +546,7 @@ This extension can be used to provide a handler for the file drop action on spec file drop action. |=== -[discrete] + == `console.flag` Gives full control over the web console feature flags. @@ -558,7 +557,7 @@ Gives full control over the web console feature flags. |`handler` |`CodeRef` |no |Used to set or unset arbitrary feature flags. |=== -[discrete] + == `console.flag/hookProvider` Gives full control over the web console feature flags with hook handlers. @@ -569,7 +568,7 @@ Gives full control over the web console feature flags with hook handlers. |`handler` |`CodeRef` |no |Used to set or unset arbitrary feature flags. |=== -[discrete] + == `console.flag/model` Adds a new web console feature flag driven by the presence of a `CustomResourceDefinition` (CRD) object on the cluster. @@ -583,7 +582,7 @@ Adds a new web console feature flag driven by the presence of a `CustomResourceD CRD. |=== -[discrete] + == `console.global-config` This extension identifies a resource used to manage the configuration of the cluster. A link to the resource will be added to the *Administration* -> *Cluster Settings* -> *Configuration* page. @@ -603,7 +602,7 @@ config resource. instance. |=== -[discrete] + == `console.model-metadata` Customize the display of models by overriding values retrieved and generated through API discovery. @@ -629,7 +628,7 @@ provided. uppercase characters in `kind`, up to 4 characters long. Requires that `kind` is provided. |=== -[discrete] + == `console.navigation/href` This extension can be used to contribute a navigation item that points to a specific link in the UI. @@ -667,7 +666,7 @@ item referenced here. For arrays, the first one found in order is used. |`prefixNamespaced` |`boolean` |yes |If `true`, adds `/k8s/ns/active-namespace` to the beginning. |=== -[discrete] + == `console.navigation/resource-cluster` This extension can be used to contribute a navigation item that points to a cluster resource details page. The K8s model of that resource can be used to define the navigation item. @@ -703,7 +702,7 @@ item referenced here. For arrays, the first one found in order is used. name of the link will equal the plural value of the model. |=== -[discrete] + == `console.navigation/resource-ns` This extension can be used to contribute a navigation item that points to a namespaced resource details page. The K8s model of that resource can be used to define the navigation item. @@ -739,7 +738,7 @@ item referenced here. For arrays, the first one found in order is used. name of the link will equal the plural value of the model. |=== -[discrete] + == `console.navigation/section` This extension can be used to define a new section of navigation items in the navigation tab. @@ -766,7 +765,7 @@ item referenced here. For arrays, the first one found in order is used. separator will be shown above the section. |=== -[discrete] + == `console.navigation/separator` This extension can be used to add a separator between navigation items in the navigation. @@ -793,7 +792,7 @@ item referenced here. For arrays, the first one found in order is used. `insertBefore` takes precedence. |=== -[discrete] + == `console.page/resource/details` [cols=",,,",options="header",] @@ -807,7 +806,7 @@ resource page links to. |no |The component to be rendered when the route matches. |=== -[discrete] + == `console.page/resource/list` Adds new resource list page to Console router. @@ -823,7 +822,7 @@ resource page links to. |no |The component to be rendered when the route matches. |=== -[discrete] + == `console.page/route` Adds a new page to the web console router. See link:https://v5.reactrouter.com/[React Router]. @@ -845,7 +844,7 @@ belongs to. If not specified, contributes to all perspectives. the `location.pathname` exactly. |=== -[discrete] + == `console.page/route/standalone` Adds a new standalone page, rendered outside the common page layout, to the web console router. See link:https://v5.reactrouter.com/[React Router]. @@ -864,7 +863,7 @@ Adds a new standalone page, rendered outside the common page layout, to the web the `location.pathname` exactly. |=== -[discrete] + == `console.perspective` This extension contributes a new perspective to the console, which enables customization of the navigation menu. @@ -895,7 +894,7 @@ the nav |The hook to detect default perspective |=== -[discrete] + == `console.project-overview/inventory-item` Adds a new inventory item into the *Project Overview* page. @@ -907,7 +906,7 @@ Adds a new inventory item into the *Project Overview* page. |no |The component to be rendered. |=== -[discrete] + == `console.project-overview/utilization-item` Adds a new project overview utilization item. @@ -935,7 +934,7 @@ query. |`CodeRef>` |yes |Shows the top consumer popover instead of plain value. |=== -[discrete] + == `console.pvc/alert` This extension can be used to contribute custom alerts on the PVC details page. @@ -947,7 +946,7 @@ This extension can be used to contribute custom alerts on the PVC details page. |no |The alert component. |=== -[discrete] + == `console.pvc/create-prop` This extension can be used to specify additional properties that will be used when creating PVC resources on the PVC list page. @@ -959,7 +958,7 @@ This extension can be used to specify additional properties that will be used wh |`path` |`string` |no |Path for the create prop action. |=== -[discrete] + == `console.pvc/delete` This extension allows hooking into deleting PVC resources. It can provide an alert with additional information and custom PVC delete logic. @@ -977,7 +976,7 @@ This extension allows hooking into deleting PVC resources. It can provide an ale |no |Alert component to show additional information. |=== -[discrete] + == `console.pvc/status` [cols=",,,",options="header",] @@ -992,7 +991,7 @@ This extension allows hooking into deleting PVC resources. It can provide an ale |Predicate that tells whether to render the status component or not. |=== -[discrete] + == `console.redux-reducer` Adds new reducer to Console Redux store which operates on `plugins.` substate. @@ -1007,7 +1006,7 @@ substate within the Redux state object. function, operating on the reducer-managed substate. |=== -[discrete] + == `console.resource/create` This extension allows plugins to provide a custom component (i.e., wizard or form) for specific resources, which will be rendered, when users try to create a new resource instance. @@ -1023,7 +1022,6 @@ resource page will be rendered component to be rendered when the model matches |=== -[discrete] == `console.storage-class/provisioner` Adds a new storage class provisioner as an option during storage class creation. @@ -1039,7 +1037,7 @@ Adds a new storage class provisioner as an option during storage class creation. |Other provisioner type |=== -[discrete] + == `console.storage-provider` This extension can be used to contribute a new storage provider to select, when attaching storage and a provider specific component. @@ -1054,7 +1052,7 @@ This extension can be used to contribute a new storage provider to select, when |no | Provider specific component to render. | |=== -[discrete] + == `console.tab` Adds a tab to a horizontal nav matching the `contextId`. @@ -1074,7 +1072,7 @@ Adds a tab to a horizontal nav matching the `contextId`. |no |Tab content component. |=== -[discrete] + == `console.tab/horizontalNav` This extension can be used to add a tab on the resource details page. @@ -1093,7 +1091,7 @@ horizontal tab. It takes tab name as name and href of the tab |no |The component to be rendered when the route matches. |=== -[discrete] + == `console.telemetry/listener` This component can be used to register a listener function receiving telemetry events. These events include user identification, page navigation, and other application specific events. The listener may use this data for reporting and analytics purposes. @@ -1105,7 +1103,7 @@ This component can be used to register a listener function receiving telemetry e events |=== -[discrete] + == `console.topology/adapter/build` `BuildAdapter` contributes an adapter to adapt element to data that can be used by the Build component. @@ -1118,7 +1116,7 @@ events |no |Adapter to adapt element to data that can be used by Build component. |=== -[discrete] + == `console.topology/adapter/network` `NetworkAdapater` contributes an adapter to adapt element to data that can be used by the `Networking` component. @@ -1131,7 +1129,7 @@ events |no |Adapter to adapt element to data that can be used by Networking component. |=== -[discrete] + == `console.topology/adapter/pod` `PodAdapter` contributes an adapter to adapt element to data that can be used by the `Pod` component. @@ -1144,7 +1142,7 @@ events |no |Adapter to adapt element to data that can be used by Pod component. | |=== -[discrete] + == `console.topology/component/factory` Getter for a `ViewComponentFactory`. @@ -1155,7 +1153,7 @@ Getter for a `ViewComponentFactory`. |`getFactory` |`CodeRef` |no |Getter for a `ViewComponentFactory`. |=== -[discrete] + == `console.topology/create/connector` Getter for the create connector function. @@ -1167,7 +1165,7 @@ Getter for the create connector function. the create connector function. |=== -[discrete] + == `console.topology/data/factory` Topology Data Model Factory Extension @@ -1195,7 +1193,7 @@ for function to determine if a resource is depicted by this model factory. |Getter for function to reconcile data model after all extensions' models have loaded. |=== -[discrete] + == `console.topology/decorator/provider` Topology Decorator Provider Extension @@ -1209,7 +1207,7 @@ Topology Decorator Provider Extension |`decorator` |`CodeRef` |no |Decorator specific to the extension | |=== -[discrete] + == `console.topology/details/resource-alert` `DetailsResourceAlert` contributes an alert for specific topology context or graph element. @@ -1225,7 +1223,7 @@ alert should not be shown after dismissed. |no |Hook to return the contents of the alert. |=== -[discrete] + == `console.topology/details/resource-link` `DetailsResourceLink` contributes a link for specific topology context or graph element. @@ -1240,7 +1238,7 @@ alert should not be shown after dismissed. chance to create the link. |=== -[discrete] + == `console.topology/details/tab` `DetailsTab` contributes a tab for the topology details panel. @@ -1260,7 +1258,7 @@ item referenced here. For arrays, the first one found in order is used. The `insertBefore` value takes precedence. |=== -[discrete] + == `console.topology/details/tab-section` `DetailsTabSection` contributes a section for a specific tab in the topology details panel. @@ -1289,7 +1287,7 @@ item referenced here. For arrays, the first one found in order is used. The `insertBefore` value takes precedence. |=== -[discrete] + == `console.topology/display/filters` Topology Display Filters Extension @@ -1301,7 +1299,7 @@ Topology Display Filters Extension |`applyDisplayOptions` |`CodeRef` |no | Function to apply filters to the model |=== -[discrete] + == `console.topology/relationship/provider` Topology relationship provider connector extension @@ -1315,7 +1313,7 @@ Topology relationship provider connector extension |`priority` |`number` |no |Priority for relationship, higher will be preferred in case of multiple |=== -[discrete] + == `console.user-preference/group` This extension can be used to add a group on the console user-preferences page. It will appear as a vertical tab option on the console user-preferences page. @@ -1334,7 +1332,7 @@ this group should be placed this group should be placed |=== -[discrete] + == `console.user-preference/item` This extension can be used to add an item to the user preferences group on the console user preferences page. @@ -1362,7 +1360,7 @@ this item should be placed this item should be placed |=== -[discrete] + == `console.yaml-template` YAML templates for editing resources via the yaml editor. @@ -1378,7 +1376,7 @@ YAML templates for editing resources via the yaml editor. to mark this as the default template. |=== -[discrete] + == `dev-console.add/action` This extension allows plugins to contribute an add action item to the add page of developer perspective. For example, a Serverless plugin can add a new action item for adding serverless functions to the add page of developer console. @@ -1403,7 +1401,7 @@ action would belong to. access review to control the visibility or enablement of the action. |=== -[discrete] + == `dev-console.add/action-group` This extension allows plugins to contibute a group in the add page of developer console. Groups can be referenced by actions, which will be grouped together in the add action page based on their extension definition. For example, a Serverless plugin can contribute a Serverless group and together with multiple add actions. @@ -1422,7 +1420,7 @@ group should be placed should be placed |=== -[discrete] + == `dev-console.import/environment` This extension can be used to specify extra build environment variable fields under the builder image selector in the developer console git import form. When set, the fields will override environment variables of the same name in the build section. @@ -1438,7 +1436,7 @@ custom environment variables for |`environments` |`ImageEnvironment[]` |no |List of environment variables |=== -[discrete] + == `console.dashboards/overview/detail/item` Deprecated. use `CustomOverviewDetailItem` type instead @@ -1450,7 +1448,7 @@ Deprecated. use `CustomOverviewDetailItem` type instead on the `DetailItem` component |=== -[discrete] + == `console.page/resource/tab` Deprecated. Use `console.tab/horizontalNav` instead. Adds a new resource tab page to Console router. diff --git a/modules/etcd-operator.adoc b/modules/etcd-operator.adoc index 32882175e3c0..aeb457ea01b5 100644 --- a/modules/etcd-operator.adoc +++ b/modules/etcd-operator.adoc @@ -6,12 +6,12 @@ = etcd cluster Operator The etcd cluster Operator automates etcd cluster scaling, enables etcd monitoring and metrics, and simplifies disaster recovery procedures. -[discrete] + == Project link:https://github.com/openshift/cluster-etcd-operator/[cluster-etcd-operator] -[discrete] + == CRDs * `etcds.operator.openshift.io` @@ -19,7 +19,7 @@ link:https://github.com/openshift/cluster-etcd-operator/[cluster-etcd-operator] ** CR: `etcd` ** Validation: Yes -[discrete] + == Configuration objects [source,terminal] diff --git a/modules/go-deleting-argocd-instance.adoc b/modules/go-deleting-argocd-instance.adoc index 8f6458dcaa9c..00f6226f3642 100644 --- a/modules/go-deleting-argocd-instance.adoc +++ b/modules/go-deleting-argocd-instance.adoc @@ -8,7 +8,7 @@ Delete the Argo CD instances added to the namespace of the GitOps Operator. -[discrete] + .Procedure . In the *Terminal* type the following command: diff --git a/modules/go-run-argo-cd-instance-on-infrastructure-nodes.adoc b/modules/go-run-argo-cd-instance-on-infrastructure-nodes.adoc index 6f2505c0ef9f..5891f629b938 100644 --- a/modules/go-run-argo-cd-instance-on-infrastructure-nodes.adoc +++ b/modules/go-run-argo-cd-instance-on-infrastructure-nodes.adoc @@ -9,7 +9,7 @@ The default Argo CD instance and the accompanying controllers, installed by the {gitops-title} Operator, can now run on the infrastructure nodes of the cluster by setting a simple configuration toggle. -[discrete] + .Procedure . Label the existing nodes: + diff --git a/modules/go-settings-for-environment-labels-and-annotations.adoc b/modules/go-settings-for-environment-labels-and-annotations.adoc index 3dcfe4774635..46b1f6c99bfd 100644 --- a/modules/go-settings-for-environment-labels-and-annotations.adoc +++ b/modules/go-settings-for-environment-labels-and-annotations.adoc @@ -8,7 +8,7 @@ This section provides reference settings for environment labels and annotations required to display an environment application in the *Environments* page, in the *Developer* perspective of the {product-title} web console. -[discrete] + == Environment labels The environment application manifest must contain `labels.openshift.gitops/environment` and `destination.namespace` fields. You must set identical values for the `` variable and the name of the environment application manifest. @@ -41,7 +41,7 @@ spec: ---- <1> The name of the environment application manifest. The value set is the same as the value of the `` variable. -[discrete] + == Environment annotations The environment namespace manifest must contain the `annotations.app.openshift.io/vcs-uri` and `annotations.app.openshift.io/vcs-ref` fields to specify the version controller code source of the application. You must set identical values for the `` variable and the name of the environment namespace manifest. diff --git a/modules/go-uninstalling-gitops-operator.adoc b/modules/go-uninstalling-gitops-operator.adoc index 37caa6949d1e..5a5bb6f4a378 100644 --- a/modules/go-uninstalling-gitops-operator.adoc +++ b/modules/go-uninstalling-gitops-operator.adoc @@ -6,7 +6,7 @@ [id='go-uninstalling-gitops-operator_{context}'] = Uninstalling the GitOps Operator -[discrete] + .Procedure . From the *Operators* -> *OperatorHub* page, use the *Filter by keyword* box to search for `{gitops-title} Operator` tile. diff --git a/modules/how-service-accounts-assume-aws-iam-roles-in-sre-owned-projects.adoc b/modules/how-service-accounts-assume-aws-iam-roles-in-sre-owned-projects.adoc index 466658abc7f7..a673d409a4c2 100644 --- a/modules/how-service-accounts-assume-aws-iam-roles-in-sre-owned-projects.adoc +++ b/modules/how-service-accounts-assume-aws-iam-roles-in-sre-owned-projects.adoc @@ -11,7 +11,7 @@ When you install a {product-title} cluster that uses the AWS Security Token Serv Cluster Operators use service accounts to assume IAM roles. When a service account assumes an IAM role, temporary STS credentials are provided for the service account to use in the cluster Operator's pod. If the assumed role has the necessary AWS privileges, the service account can run AWS SDK operations in the pod. -[discrete] + [id="workflow-for-assuming-aws-iam-roles-in-sre-owned-projects_{context}"] == Workflow for assuming AWS IAM roles in SRE owned projects diff --git a/modules/images-create-guide-general.adoc b/modules/images-create-guide-general.adoc index 9b6282647019..973d64954327 100644 --- a/modules/images-create-guide-general.adoc +++ b/modules/images-create-guide-general.adoc @@ -6,28 +6,28 @@ The following guidelines apply when creating a container image in general, and are independent of whether the images are used on {product-title}. -[discrete] + == Reuse images Wherever possible, base your image on an appropriate upstream image using the `FROM` statement. This ensures your image can easily pick up security fixes from an upstream image when it is updated, rather than you having to update your dependencies directly. In addition, use tags in the `FROM` instruction, for example, `rhel:rhel7`, to make it clear to users exactly which version of an image your image is based on. Using a tag other than `latest` ensures your image is not subjected to breaking changes that might go into the `latest` version of an upstream image. -[discrete] + == Maintain compatibility within tags When tagging your own images, try to maintain backwards compatibility within a tag. For example, if you provide an image named `image` and it currently includes version `1.0`, you might provide a tag of `image:v1`. When you update the image, as long as it continues to be compatible with the original image, you can continue to tag the new image `image:v1`, and downstream consumers of this tag are able to get updates without being broken. If you later release an incompatible update, then switch to a new tag, for example `image:v2`. This allows downstream consumers to move up to the new version at will, but not be inadvertently broken by the new incompatible image. Any downstream consumer using `image:latest` takes on the risk of any incompatible changes being introduced. -[discrete] + == Avoid multiple processes Do not start multiple services, such as a database and `SSHD`, inside one container. This is not necessary because containers are lightweight and can be easily linked together for orchestrating multiple processes. {product-title} allows you to easily colocate and co-manage related images by grouping them into a single pod. This colocation ensures the containers share a network namespace and storage for communication. Updates are also less disruptive as each image can be updated less frequently and independently. Signal handling flows are also clearer with a single process as you do not have to manage routing signals to spawned processes. -[discrete] + == Use `exec` in wrapper scripts Many images use wrapper scripts to do some setup before starting a process for the software being run. If your image uses such a script, that script uses `exec` so that the script's process is replaced by your software. If you do not use `exec`, then signals sent by your container runtime go to your wrapper script instead of your software's process. This is not what you want. @@ -42,7 +42,7 @@ Also see the https://felipec.wordpress.com/2013/11/04/init/["Demystifying the in systems. //// -[discrete] + == Clean temporary files Remove all temporary files you create during the build process. This also includes any files added with the `ADD` command. For example, run the `yum clean` command after performing `yum install` operations. @@ -68,7 +68,7 @@ The current container build process does not allow a command run in a later laye In addition, performing multiple commands in a single `RUN` statement reduces the number of layers in your image, which improves download and extraction time. -[discrete] + == Place instructions in the proper order The container builder reads the `Dockerfile` and runs the instructions from top to bottom. Every instruction that is successfully executed creates a layer which can be reused the next time this or another image is built. It is very important to place instructions that rarely change at the top of your `Dockerfile`. Doing so ensures the next builds of the same image are very fast because the cache is not invalidated by upper layer changes. @@ -95,7 +95,7 @@ RUN yum -y install mypackage && yum clean all -y Then each time you changed `myfile` and reran `podman build` or `docker build`, the `ADD` operation would invalidate the `RUN` layer cache, so the `yum` operation must be rerun as well. -[discrete] + == Mark important ports The EXPOSE instruction makes a port in the container available to the host system and other containers. While it is possible to specify that a port should be exposed with a `podman run` invocation, using the EXPOSE instruction in a `Dockerfile` makes it easier for both humans and software to use your image by explicitly declaring the ports your software needs to run: @@ -104,24 +104,24 @@ The EXPOSE instruction makes a port in the container available to the host syste * Exposed ports are present in the metadata for your image returned by `podman inspect`. * Exposed ports are linked when you link one container to another. -[discrete] + == Set environment variables It is good practice to set environment variables with the `ENV` instruction. One example is to set the version of your project. This makes it easy for people to find the version without looking at the `Dockerfile`. Another example is advertising a path on the system that could be used by another process, such as `JAVA_HOME`. -[discrete] + == Avoid default passwords Avoid setting default passwords. Many people extend the image and forget to remove or change the default password. This can lead to security issues if a user in production is assigned a well-known password. Passwords are configurable using an environment variable instead. If you do choose to set a default password, ensure that an appropriate warning message is displayed when the container is started. The message should inform the user of the value of the default password and explain how to change it, such as what environment variable to set. -[discrete] + == Avoid sshd It is best to avoid running `sshd` in your image. You can use the `podman exec` or `docker exec` command to access containers that are running on the local host. Alternatively, you can use the `oc exec` command or the `oc rsh` command to access containers that are running on the {product-title} cluster. Installing and running `sshd` in your image opens up additional vectors for attack and requirements for security patching. -[discrete] + == Use volumes for persistent data Images use a link:https://docs.docker.com/reference/builder/#volume[volume] for persistent data. This way {product-title} mounts the network storage to the node running the container, and if the container moves to a new node the storage is reattached to that node. By using the volume for all persistent storage needs, the content is preserved even if the container is restarted or moved. If your image writes data to arbitrary locations within the container, that content could not be preserved. diff --git a/modules/images-imagestream-import-import-mode.adoc b/modules/images-imagestream-import-import-mode.adoc index 54815378bebc..d460d7d0cf4b 100644 --- a/modules/images-imagestream-import-import-mode.adoc +++ b/modules/images-imagestream-import-import-mode.adoc @@ -44,7 +44,7 @@ $ oc import-image --from=/ The `--import-mode=` default value is `Legacy`. Excluding this value, or failing to specify either `Legacy` or `PreserveOriginal`, imports a single sub-manifest. An invalid import mode returns the following error: `error: valid ImportMode values are Legacy or PreserveOriginal`. ==== -[discrete] + [id="images-imagestream-import-import-mode-limitations"] == Limitations diff --git a/modules/ingress-operator.adoc b/modules/ingress-operator.adoc index 0512ccd81023..1b9135acbdfe 100644 --- a/modules/ingress-operator.adoc +++ b/modules/ingress-operator.adoc @@ -7,12 +7,12 @@ The Ingress Operator configures and manages the {product-title} router. -[discrete] + == Project link:https://github.com/openshift/cluster-ingress-operator[openshift-ingress-operator] -[discrete] + == CRDs * `clusteringresses.ingress.openshift.io` @@ -20,7 +20,7 @@ link:https://github.com/openshift/cluster-ingress-operator[openshift-ingress-ope ** CR: `clusteringresses` ** Validation: No -[discrete] + == Configuration objects * Cluster config @@ -33,7 +33,7 @@ link:https://github.com/openshift/cluster-ingress-operator[openshift-ingress-ope $ oc get clusteringresses.ingress.openshift.io -n openshift-ingress-operator default -o yaml ---- -[discrete] + == Notes The Ingress Operator sets up the router in the `openshift-ingress` project and creates the deployment for the router: diff --git a/modules/insights-operator.adoc b/modules/insights-operator.adoc index af7fde91bc74..7a1c449ed61d 100644 --- a/modules/insights-operator.adoc +++ b/modules/insights-operator.adoc @@ -35,19 +35,19 @@ The Insights Operator gathers {product-title} configuration data and sends it to ifdef::operator-ref[] -[discrete] + == Project link:https://github.com/openshift/insights-operator[insights-operator] -[discrete] + == Configuration No configuration is required. endif::operator-ref[] -[discrete] + == Notes Insights Operator complements {product-title} Telemetry. diff --git a/modules/install-ibm-cloud-setting-up-ibm-cloud-infrastructure.adoc b/modules/install-ibm-cloud-setting-up-ibm-cloud-infrastructure.adoc index 3cb86f9c43dd..aafd05c42067 100644 --- a/modules/install-ibm-cloud-setting-up-ibm-cloud-infrastructure.adoc +++ b/modules/install-ibm-cloud-setting-up-ibm-cloud-infrastructure.adoc @@ -14,17 +14,17 @@ Red Hat supports IPMI and PXE on the `provisioning` network only. Red Hat has no You can customize {ibm-cloud-name} nodes using the {ibm-cloud-name} API. When creating {ibm-cloud-name} nodes, you must consider the following requirements. -[discrete] + == Use one data center per cluster All nodes in the {product-title} cluster must run in the same {ibm-cloud-name} data center. -[discrete] + == Create public and private VLANs Create all nodes with a single public VLAN and a single private VLAN. -[discrete] + == Ensure subnets have sufficient IP addresses {ibm-cloud-name} public VLAN subnets use a `/28` prefix by default, which provides 16 IP addresses. That is sufficient for a cluster consisting of three control plane nodes, four worker nodes, and two IP addresses for the API VIP and Ingress VIP on the `baremetal` network. For larger clusters, you might need a smaller prefix. @@ -41,7 +41,7 @@ Create all nodes with a single public VLAN and a single private VLAN. |256| `/24` |==== -[discrete] + == Configuring NICs {product-title} deploys with two networks: @@ -73,7 +73,7 @@ In the previous example, NIC1 on all control plane and worker nodes connects to Ensure PXE is enabled on the NIC used for the `provisioning` network and is disabled on all other NICs. ==== -[discrete] + == Configuring canonical names Clients access the {product-title} cluster nodes over the `baremetal` network. Configure {ibm-cloud-name} subdomains or subzones where the canonical name extension is the cluster name. @@ -88,7 +88,7 @@ For example: test-cluster.example.com ---- -[discrete] + == Creating DNS entries You must create DNS `A` record entries resolving to unused IP addresses on the public subnet for the following: @@ -125,7 +125,7 @@ The following table provides an example of fully qualified domain names. The API After provisioning the {ibm-cloud-name} nodes, you must create a DNS entry for the `api..` domain name on the external DNS because removing CoreDNS causes the local entry to disappear. Failure to create a DNS record for the `api..` domain name in the external DNS server prevents worker nodes from joining the cluster. ==== -[discrete] + == Network Time Protocol (NTP) Each {product-title} node in the cluster must have access to an NTP server. {product-title} nodes use NTP to synchronize their clocks. For example, cluster nodes use SSL certificates that require validation, which might fail if the date and time between the nodes are not in sync. @@ -135,7 +135,7 @@ Each {product-title} node in the cluster must have access to an NTP server. {pro Define a consistent clock date and time format in each cluster node's BIOS settings, or installation might fail. ==== -[discrete] + == Configure a DHCP server {ibm-cloud-bm} does not run DHCP on the public or private VLANs. After provisioning {ibm-cloud-name} nodes, you must set up a DHCP server for the public VLAN, which corresponds to {product-title}'s `baremetal` network. @@ -147,7 +147,7 @@ The IP addresses allocated to each node do not need to match the IP addresses al See the "Configuring the public subnet" section for details. -[discrete] + == Ensure BMC access privileges The "Remote management" page for each node on the dashboard contains the node's intelligent platform management interface (IPMI) credentials. The default IPMI privileges prevent the user from making certain boot target changes. You must change the privilege level to `OPERATOR` so that Ironic can make those changes. @@ -161,7 +161,7 @@ ipmi://:?privilegelevel=OPERATOR Alternatively, contact {ibm-cloud-name} support and request that they increase the IPMI privileges to `ADMINISTRATOR` for each node. -[discrete] + == Create bare metal servers Create bare metal servers in the link:https://cloud.ibm.com[{ibm-cloud-name} dashboard] by navigating to *Create resource* -> *Bare Metal Servers for Classic*. diff --git a/modules/installation-aws-user-infra-requirements.adoc b/modules/installation-aws-user-infra-requirements.adoc index b53fc09f6beb..fba502562e7a 100644 --- a/modules/installation-aws-user-infra-requirements.adoc +++ b/modules/installation-aws-user-infra-requirements.adoc @@ -34,7 +34,7 @@ Alternatively, you can manually create the components or you can reuse existing If you are working in a disconnected environment, you are unable to reach the public IP addresses for EC2, ELB, and S3 endpoints. Depending on the level to which you want to restrict internet traffic during the installation, the following configuration options are available: -[discrete] + [id="create-vpc-endpoints_{context}"] === Option 1: Create VPC endpoints @@ -46,12 +46,12 @@ Create a VPC endpoint and attach it to the subnets that the clusters are using. With this option, network traffic remains private between your VPC and the required AWS services. -[discrete] + [id="create-proxy-without-vpc-endpoints_{context}"] === Option 2: Create a proxy without VPC endpoints As part of the installation process, you can configure an HTTP or HTTPS proxy. With this option, internet traffic goes through the proxy to reach the required AWS services. -[discrete] + [id="create-proxy-with-vpc-endpoints_{context}"] === Option 3: Create a proxy with VPC endpoints As part of the installation process, you can configure an HTTP or HTTPS proxy with VPC endpoints. Create a VPC endpoint and attach it to the subnets that the clusters are using. Name the endpoints as follows: diff --git a/modules/installation-azure-regions.adoc b/modules/installation-azure-regions.adoc index 8850dfcf702a..ede300a570ca 100644 --- a/modules/installation-azure-regions.adoc +++ b/modules/installation-azure-regions.adoc @@ -11,7 +11,6 @@ The installation program dynamically generates the list of available Microsoft Azure regions based on your subscription. -[discrete] == Supported Azure public regions * `australiacentral` (Australia Central) @@ -60,7 +59,6 @@ The installation program dynamically generates the list of available Microsoft A * `westus2` (West US 2) * `westus3` (West US 3) -[discrete] == Supported Azure Government regions Support for the following Microsoft Azure Government (MAG) regions was added in {product-title} version 4.6: diff --git a/modules/installation-azure-user-defined-routing.adoc b/modules/installation-azure-user-defined-routing.adoc index eb7912308e9c..465d4c943144 100644 --- a/modules/installation-azure-user-defined-routing.adoc +++ b/modules/installation-azure-user-defined-routing.adoc @@ -35,7 +35,7 @@ There are several pre-existing networking setups that are supported for internet ifdef::restricted[] -[discrete] + == Restricted cluster with Azure Firewall You can use Azure Firewall to restrict the outbound routing for the Virtual Network (VNet) that is used to install the {product-title} cluster. For more information, see link:https://learn.microsoft.com/en-us/azure/aks/egress-outboundtype#deploy-a-cluster-with-outbound-type-of-udr-and-azure-firewall[providing user-defined routing with Azure Firewall]. You can create a {product-title} cluster in a restricted network by using VNet with Azure Firewall and configuring the user-defined routing. @@ -47,28 +47,28 @@ If you are using Azure Firewall for restricting internet access, you must set th endif::restricted[] ifdef::private[] -[discrete] + == Private cluster with network address translation You can use link:https://docs.microsoft.com/en-us/azure/virtual-network/nat-overview[Azure VNET network address translation (NAT)] to provide outbound internet access for the subnets in your cluster. You can reference link:https://docs.microsoft.com/en-us/azure/virtual-network/quickstart-create-nat-gateway-cli[Create a NAT gateway using Azure CLI] in the Azure documentation for configuration instructions. When using a VNet setup with Azure NAT and user-defined routing configured, you can create a private cluster with no public endpoints. -[discrete] + == Private cluster with Azure Firewall You can use Azure Firewall to provide outbound routing for the VNet used to install the cluster. You can learn more about link:https://docs.microsoft.com/en-us/azure/aks/egress-outboundtype#deploy-a-cluster-with-outbound-type-of-udr-and-azure-firewall[providing user-defined routing with Azure Firewall] in the Azure documentation. When using a VNet setup with Azure Firewall and user-defined routing configured, you can create a private cluster with no public endpoints. -[discrete] + == Private cluster with a proxy configuration You can use a proxy with user-defined routing to allow egress to the internet. You must ensure that cluster Operators do not access Azure APIs using a proxy; Operators must have access to Azure APIs outside of the proxy. When using the default route table for subnets, with `0.0.0.0/0` populated automatically by Azure, all Azure API requests are routed over Azure's internal network even though the IP addresses are public. As long as the Network Security Group rules allow egress to Azure API endpoints, proxies with user-defined routing configured allow you to create private clusters with no public endpoints. -[discrete] + == Private cluster with no internet access You can install a private network that restricts all access to the internet, except the Azure API. This is accomplished by mirroring the release image registry locally. Your cluster must have access to the following: diff --git a/modules/installation-custom-aws-vpc.adoc b/modules/installation-custom-aws-vpc.adoc index bc96eee16d51..abafcc7068f7 100644 --- a/modules/installation-custom-aws-vpc.adoc +++ b/modules/installation-custom-aws-vpc.adoc @@ -112,7 +112,7 @@ ifdef::aws-secret[] A cluster in an SC2S or C2S Region is unable to reach the public IP addresses for the EC2, ELB, and S3 endpoints. Depending on the level to which you want to restrict internet traffic during the installation, the following configuration options are available: endif::aws-secret[] -[discrete] + [id="create-vpc-endpoints_{context}"] === Option 1: Create VPC endpoints @@ -143,12 +143,12 @@ endif::aws-secret[] With this option, network traffic remains private between your VPC and the required AWS services. -[discrete] + [id="create-proxy-without-vpc-endpoints_{context}"] === Option 2: Create a proxy without VPC endpoints As part of the installation process, you can configure an HTTP or HTTPS proxy. With this option, internet traffic goes through the proxy to reach the required AWS services. -[discrete] + [id="create-proxy-with-vpc-endpoints_{context}"] === Option 3: Create a proxy with VPC endpoints As part of the installation process, you can configure an HTTP or HTTPS proxy with VPC endpoints. Create a VPC endpoint and attach it to the subnets that the clusters are using. Name the endpoints as follows: diff --git a/modules/installation-disk-partitioning.adoc b/modules/installation-disk-partitioning.adoc index 8c4f428934fb..5de7fd4ccbb3 100644 --- a/modules/installation-disk-partitioning.adoc +++ b/modules/installation-disk-partitioning.adoc @@ -30,7 +30,7 @@ Kubernetes supports only two file system partitions. If you add more than one pa ==== * Retain existing partitions: For a brownfield installation where you are reinstalling {product-title} on an existing node and want to retain data partitions installed from your previous operating system, there are both boot arguments and options to `coreos-installer` that allow you to retain existing data partitions. -[discrete] + = Creating a separate `/var` partition In general, disk partitioning for {product-title} should be left to the installer. However, there are cases where you might want to create separate partitions in a part of the filesystem that you expect to grow. diff --git a/modules/installation-network-user-infra.adoc b/modules/installation-network-user-infra.adoc index 8660785086bb..38484d3f48dc 100644 --- a/modules/installation-network-user-infra.adoc +++ b/modules/installation-network-user-infra.adoc @@ -241,7 +241,7 @@ ifdef::vsphere[] endif::[] ifndef::azure,gcp[] -[discrete] + == NTP configuration for user-provisioned infrastructure {product-title} clusters are configured to use a public Network Time Protocol (NTP) server by default. If you want to use a local enterprise NTP server, or if your cluster is being deployed in a disconnected network, you can configure the cluster to use a specific time server. For more information, see the documentation for _Configuring chrony time service_. diff --git a/modules/installation-process.adoc b/modules/installation-process.adoc index 23ef2fa631a8..574fa75e2918 100644 --- a/modules/installation-process.adoc +++ b/modules/installation-process.adoc @@ -45,7 +45,7 @@ The installation configuration files are all pruned when you run the installatio You cannot modify the parameters that you set during installation, but you can modify many cluster attributes after installation. ==== -[discrete] + == The installation process with the {ai-full} Installation with the link:https://access.redhat.com/documentation/en-us/assisted_installer_for_openshift_container_platform[{ai-full}] involves creating a cluster configuration interactively by using the web-based user interface or the RESTful API. The {ai-full} user interface prompts you for required values and provides reasonable default values for the remaining parameters, unless you change them in the user interface or with the API. The {ai-full} generates a discovery image, which you download and use to boot the cluster machines. The image installs {op-system} and an agent, and the agent handles the provisioning for you. You can install {product-title} with the {ai-full} and full integration on Nutanix, vSphere, and bare metal. Additionally, you can install {product-title} with the {ai-full} on other platforms without integration. @@ -54,14 +54,14 @@ Installation with the link:https://access.redhat.com/documentation/en-us/assiste If possible, use the {ai-full} feature to avoid having to download and configure the Agent-based Installer. -[discrete] + == The installation process with Agent-based infrastructure Agent-based installation is similar to using the {ai-full}, except that you must initially download and install the link:https://console.redhat.com/openshift/install/metal/agent-based[Agent-based Installer]. An Agent-based installation is useful when you want the convenience of the {ai-full}, but you need to install a cluster in a disconnected environment. If possible, use the Agent-based installation feature to avoid having to create a provisioner machine with a bootstrap VM, and then provision and maintain the cluster infrastructure. -[discrete] + == The installation process with installer-provisioned infrastructure The default installation type uses installer-provisioned infrastructure. By default, the installation program acts as an installation wizard, prompting you for values that it cannot determine on its own and providing reasonable default values for the remaining parameters. You can also customize the installation process to support advanced infrastructure scenarios. The installation program provisions the underlying infrastructure for the cluster. @@ -72,7 +72,7 @@ If possible, use this feature to avoid having to provision and maintain the clus With installer-provisioned infrastructure clusters, {product-title} manages all aspects of the cluster, including the operating system itself. Each machine boots with a configuration that references resources hosted in the cluster that it joins. This configuration allows the cluster to manage itself as updates are applied. -[discrete] + == The installation process with user-provisioned infrastructure You can also install {product-title} on infrastructure that you provide. You use the installation program to generate the assets that you require to provision the cluster infrastructure, create the cluster infrastructure, and then deploy the cluster to the infrastructure that you provided. @@ -86,7 +86,7 @@ If you do not use infrastructure that the installation program provisioned, you If your cluster uses user-provisioned infrastructure, you have the option of adding {op-system-base} compute machines to your cluster. -[discrete] + == Installation process details When a cluster is provisioned, each machine in the cluster requires information about the cluster. {product-title} uses a temporary bootstrap machine during initial configuration to provide the required information to the permanent control plane. The temporary bootstrap machine boots by using an Ignition config file that describes how to create the cluster. The bootstrap machine creates the control plane machines that make up the control plane. The control plane machines then create the compute machines, which are also known as worker machines. The following figure illustrates this process: diff --git a/modules/installation-requirements-user-infra-ibm-z-kvm.adoc b/modules/installation-requirements-user-infra-ibm-z-kvm.adoc index 0565f6cd1dd3..ba0d7c0bc02d 100644 --- a/modules/installation-requirements-user-infra-ibm-z-kvm.adoc +++ b/modules/installation-requirements-user-infra-ibm-z-kvm.adoc @@ -71,7 +71,7 @@ You can install {product-title} version {product-version} on the following {ibm- [id="minimum-ibm-z-system-requirements_{context}"] == Minimum {ibm-z-title} system environment -[discrete] + === Hardware requirements * The equivalent of six Integrated Facilities for Linux (IFL), which are SMT2 enabled, for each cluster. @@ -87,7 +87,7 @@ You can use dedicated or shared IFLs to assign sufficient compute resources. Res Since the overall performance of the cluster can be impacted, the LPARs that are used to set up the {product-title} clusters must provide sufficient compute capacity. In this context, LPAR weight management, entitlements, and CPU shares on the hypervisor level play an important role. ==== -[discrete] + === Operating system requirements * One LPAR running on {op-system-base} 8.6 or later with KVM, which is managed by libvirt @@ -142,13 +142,13 @@ Each cluster virtual machine must meet the following minimum requirements: [id="preferred-ibm-z-system-requirements_{context}"] == Preferred {ibm-z-title} system environment -[discrete] + === Hardware requirements * Three LPARS that each have the equivalent of six IFLs, which are SMT2 enabled, for each cluster. * Two network connections to both connect to the `LoadBalancer` service and to serve data for traffic outside the cluster. -[discrete] + === Operating system requirements * For high availability, two or three LPARs running on {op-system-base} 8.6 or later with KVM, which are managed by libvirt. diff --git a/modules/installation-user-infra-machines-static-network.adoc b/modules/installation-user-infra-machines-static-network.adoc index b920a033602b..95cc2a16a629 100644 --- a/modules/installation-user-infra-machines-static-network.adoc +++ b/modules/installation-user-infra-machines-static-network.adoc @@ -77,7 +77,7 @@ endif::ibm-z-kvm[] The following examples are the networking options for ISO installation. -[discrete] + [id="configuring-dhcp-or-static-ip-addresses_{context}"] === Configuring DHCP or static IP addresses @@ -101,7 +101,7 @@ nameserver=4.4.4.41 When you use DHCP to configure IP addressing for the {op-system} machines, the machines also obtain the DNS server information through DHCP. For DHCP-based deployments, you can define the DNS server address that is used by the {op-system} nodes through your DHCP server configuration. ==== -[discrete] + === Configuring an IP address without a static hostname You can configure an IP address without assigning a static hostname. If a static hostname is not set by the user, it will be picked up and automatically set by a reverse DNS lookup. To configure an IP address without a static hostname refer to the following example: @@ -118,7 +118,7 @@ ip=10.10.10.2::10.10.10.254:255.255.255.0::enp1s0:none nameserver=4.4.4.41 ---- -[discrete] + === Specifying multiple network interfaces You can specify multiple network interfaces by setting multiple `ip=` entries. @@ -129,7 +129,7 @@ ip=10.10.10.2::10.10.10.254:255.255.255.0:core0.example.com:enp1s0:none ip=10.10.10.3::10.10.10.254:255.255.255.0:core0.example.com:enp2s0:none ---- -[discrete] + === Configuring default gateway and route Optional: You can configure routes to additional networks by setting an `rd.route=` value. @@ -153,7 +153,7 @@ ip=::10.10.10.254:::: rd.route=20.20.20.0/24:20.20.20.254:enp2s0 ---- -[discrete] + === Disabling DHCP on a single interface You can disable DHCP on a single interface, such as when there are two or more network interfaces and only one interface is being used. In the example, the `enp1s0` interface has a static networking configuration and DHCP is disabled for `enp2s0`, which is not used: @@ -164,7 +164,7 @@ ip=10.10.10.2::10.10.10.254:255.255.255.0:core0.example.com:enp1s0:none ip=::::core0.example.com:enp2s0:none ---- -[discrete] + === Combining DHCP and static IP configurations You can combine DHCP and static IP configurations on systems with multiple network interfaces, for example: @@ -175,7 +175,7 @@ ip=enp1s0:dhcp ip=10.10.10.2::10.10.10.254:255.255.255.0:core0.example.com:enp2s0:none ---- -[discrete] + === Configuring VLANs on individual interfaces Optional: You can configure VLANs on individual interfaces by using the `vlan=` parameter. @@ -196,7 +196,7 @@ ip=enp2s0.100:dhcp vlan=enp2s0.100:enp2s0 ---- -[discrete] + === Providing multiple DNS servers You can provide multiple DNS servers by adding a `nameserver=` entry for each server, for example: @@ -209,7 +209,7 @@ nameserver=8.8.8.8 ifndef::ibm-z-kvm[] -[discrete] + === Bonding multiple network interfaces to a single interface Optional: You can bond multiple network interfaces to a single interface by using the `bond=` option. Refer to the following examples: @@ -251,7 +251,7 @@ Always set the `fail_over_mac=1` option in active-backup mode, to avoid problems endif::ibm-z[] ifdef::ibm-z[] -[discrete] + === Bonding multiple network interfaces to a single interface Optional: You can configure VLANs on bonded interfaces by using the `vlan=` parameter and to use DHCP, for example: @@ -276,7 +276,7 @@ endif::ibm-z[] ifndef::ibm-z[] [id="bonding-multiple-sriov-network-interfaces-to-dual-port_{context}"] -[discrete] + === Bonding multiple SR-IOV network interfaces to a dual port NIC interface :FeatureName: Support for Day 1 operations associated with enabling NIC partitioning for SR-IOV devices @@ -316,7 +316,7 @@ ip=10.10.10.2::10.10.10.254:255.255.255.0:core0.example.com:bond0:none endif::ibm-z[] ifndef::ibm-power[] -[discrete] + === Using network teaming Optional: You can use a network teaming as an alternative to bonding by using the `team=` parameter: diff --git a/modules/installation-vsphere-installer-infra-requirements.adoc b/modules/installation-vsphere-installer-infra-requirements.adoc index d5e7592f7645..b392fb52080d 100644 --- a/modules/installation-vsphere-installer-infra-requirements.adoc +++ b/modules/installation-vsphere-installer-infra-requirements.adoc @@ -22,7 +22,7 @@ ifdef::upi[] Before you install an {product-title} cluster on your vCenter that uses infrastructure that you provided, you must prepare your environment. endif::upi[] -[discrete] + [id="installation-vsphere-installer-infra-requirements-account_{context}"] == Required vCenter account privileges @@ -383,7 +383,6 @@ endif::ipi[] For more information about creating an account with only the required privileges, see link:https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.security.doc/GUID-5372F580-5C23-4E9C-8A4E-EF1B4DD9033E.html[vSphere Permissions and User Management Tasks] in the vSphere documentation. -[discrete] [id="installation-vsphere-installer-infra-requirements-vmotion_{context}"] == Using {product-title} with vMotion @@ -408,7 +407,7 @@ You can specify the path of any datastore that exists in a datastore cluster. By If you must specify VMs across multiple datastores, use a `datastore` object to specify a failure domain in your cluster's `install-config.yaml` configuration file. For more information, see "VMware vSphere region and zone enablement". ==== -[discrete] + [id="installation-vsphere-installer-infra-requirements-resources_{context}"] == Cluster resources @@ -435,13 +434,13 @@ Although these resources use 856 GB of storage, the bootstrap node is destroyed If you deploy more compute machines, the {product-title} cluster will use more storage. -[discrete] + [id="installation-vsphere-installer-infra-requirements-limits_{context}"] == Cluster limits Available resources vary between clusters. The number of possible clusters within a vCenter is limited primarily by available storage space and any limitations on the number of required resources. Be sure to consider both limitations to the vCenter resources that the cluster creates and the resources that you require to deploy a cluster, such as IP addresses and networks. -[discrete] + [id="installation-vsphere-installer-infra-requirements-networking_{context}"] == Networking requirements @@ -465,7 +464,6 @@ Ensure that each {product-title} node in the cluster has access to a Network Tim Additionally, you must create the following networking resources before you install the {product-title} cluster: -[discrete] [id="installation-vsphere-installer-infra-requirements-_{context}"] === Required IP Addresses ifndef::upi[] @@ -477,7 +475,7 @@ For a network that uses DHCP, an installer-provisioned vSphere installation requ You must provide these IP addresses to the installation program when you install the {product-title} cluster. endif::upi[] -[discrete] + [id="installation-vsphere-installer-infra-requirements-dns-records_{context}"] === DNS records You must create DNS records for two static IP addresses in the appropriate DNS server for the vCenter instance that hosts your {product-title} cluster. In each record, `` is the cluster name and `` is the cluster base domain that you specify when you install the cluster. A complete DNS record takes the form: `...`. diff --git a/modules/installation-vsphere-installer-infra-static-ip-nodes.adoc b/modules/installation-vsphere-installer-infra-static-ip-nodes.adoc index 4b0b64155d6e..1ef41fd49fec 100644 --- a/modules/installation-vsphere-installer-infra-static-ip-nodes.adoc +++ b/modules/installation-vsphere-installer-infra-static-ip-nodes.adoc @@ -3,7 +3,7 @@ // * installing/installing_vsphere/ipi/ipi-vsphere-installation-reqs.adoc :_mod-docs-content-type: CONCEPT -[discrete] + [id="installation-vsphere-installer-infra-static-ip-nodes_{context}"] == Static IP addresses for vSphere nodes diff --git a/modules/installing-troubleshooting-assisted-installer-oci.adoc b/modules/installing-troubleshooting-assisted-installer-oci.adoc index e517975e3ef8..29841fa1a2e1 100644 --- a/modules/installing-troubleshooting-assisted-installer-oci.adoc +++ b/modules/installing-troubleshooting-assisted-installer-oci.adoc @@ -8,7 +8,7 @@ If you experience issues with using the {ai-full} to install an {product-title} cluster on {oci-first}, read the following sections to troubleshoot common problems. -[discrete] + == The Ingress Load Balancer in {oci} is not at a healthy status This issue is classed as a `Warning` because by using the Resource Manager to create a stack, you created a pool of compute nodes, 3 by default, that are automatically added as backend listeners for the Ingress Load Balancer. By default, the {product-title} deploys 2 router pods, which are based on the default values from the {product-title} manifest files. The `Warning` is expected because a mismatch exists with the number of router pods available, two, to run on the three compute nodes. @@ -18,7 +18,7 @@ image::ingress_load_balancer_warning_message.png[Example of an warning message t You do not need to modify the Ingress Load Balancer configuration. Instead, you can point the Ingress Load Balancer to specific compute nodes that operate in your cluster on {product-title}. To do this, use placement mechanisms, such as annotations, on {product-title} to ensure router pods only run on the compute nodes that you originally configured on the Ingress Load Balancer as backend listeners. -[discrete] + == {oci} create stack operation fails with an Error: 400-InvalidParameter message On attempting to create a stack on {oci}, you identified that the *Logs* section of the job outputs an error message. For example: diff --git a/modules/ipi-install-additional-install-config-parameters.adoc b/modules/ipi-install-additional-install-config-parameters.adoc index 987b0e710632..0ea2d5e3181c 100644 --- a/modules/ipi-install-additional-install-config-parameters.adoc +++ b/modules/ipi-install-additional-install-config-parameters.adoc @@ -195,7 +195,7 @@ a|`provisioningNetworkCIDR` |=== -[discrete] + == Hosts The `hosts` parameter is a list of separate bare metal assets used to build the cluster. diff --git a/modules/ipi-install-bmc-addressing-for-dell-idrac.adoc b/modules/ipi-install-bmc-addressing-for-dell-idrac.adoc index d7f269a93566..6ffb9504357a 100644 --- a/modules/ipi-install-bmc-addressing-for-dell-idrac.adoc +++ b/modules/ipi-install-bmc-addressing-for-dell-idrac.adoc @@ -25,7 +25,7 @@ platform: For Dell hardware, Red Hat supports integrated Dell Remote Access Controller (iDRAC) virtual media, Redfish network boot, and IPMI. -[discrete] + == BMC address formats for Dell iDRAC [width="100%", cols="1,3", options="header"] |==== @@ -42,7 +42,7 @@ Use `idrac-virtualmedia` as the protocol for Redfish virtual media. `redfish-vir See the following sections for additional details. -[discrete] + == Redfish virtual media for Dell iDRAC For Redfish virtual media on Dell servers, use `idrac-virtualmedia://` in the `address` setting. Using `redfish-virtualmedia://` will not work. @@ -90,7 +90,7 @@ platform: disableCertificateVerification: True ---- -[discrete] + == Redfish network boot for iDRAC To enable Redfish, use `redfish://` or `redfish+http://` to disable transport layer security (TLS). The installation program requires both the hostname or the IP address and the path to the system ID. The following example demonstrates a Redfish configuration within the `install-config.yaml` file. diff --git a/modules/ipi-install-bmc-addressing-for-hpe-ilo.adoc b/modules/ipi-install-bmc-addressing-for-hpe-ilo.adoc index 63c5a71b09ff..e44f91bf3ec5 100644 --- a/modules/ipi-install-bmc-addressing-for-hpe-ilo.adoc +++ b/modules/ipi-install-bmc-addressing-for-hpe-ilo.adoc @@ -35,7 +35,7 @@ For HPE integrated Lights Out (iLO), Red Hat supports Redfish virtual media, Red See the following sections for additional details. -[discrete] + == Redfish virtual media for HPE iLO To enable Redfish virtual media for HPE servers, use `redfish-virtualmedia://` in the `address` setting. The following example demonstrates using Redfish virtual media within the `install-config.yaml` file. @@ -75,7 +75,7 @@ Redfish virtual media is not supported on 9th generation systems running iLO4, b ==== -[discrete] + == Redfish network boot for HPE iLO To enable Redfish, use `redfish://` or `redfish+http://` to disable TLS. The installer requires both the hostname or the IP address and the path to the system ID. The following example demonstrates a Redfish configuration within the `install-config.yaml` file. diff --git a/modules/ipi-install-bmc-addressing.adoc b/modules/ipi-install-bmc-addressing.adoc index 3af78860f1d1..823fb5271615 100644 --- a/modules/ipi-install-bmc-addressing.adoc +++ b/modules/ipi-install-bmc-addressing.adoc @@ -8,7 +8,6 @@ Most vendors support Baseboard Management Controller (BMC) addressing with the Intelligent Platform Management Interface (IPMI). IPMI does not encrypt communications. It is suitable for use within a data center over a secured or dedicated management network. Check with your vendor to see if they support Redfish network boot. Redfish delivers simple and secure management for converged, hybrid IT and the Software Defined Data Center (SDDC). Redfish is human readable and machine capable, and leverages common internet and web services standards to expose information directly to the modern tool chain. If your hardware does not support Redfish network boot, use IPMI. -[discrete] == IPMI Hosts using IPMI use the `ipmi://:` address format, which defaults to port `623` if not specified. The following example demonstrates an IPMI configuration within the `install-config.yaml` file. @@ -31,7 +30,7 @@ platform: The `provisioning` network is required when PXE booting using IPMI for BMC addressing. It is not possible to PXE boot hosts without a `provisioning` network. If you deploy without a `provisioning` network, you must use a virtual media BMC addressing option such as `redfish-virtualmedia` or `idrac-virtualmedia`. See "Redfish virtual media for HPE iLO" in the "BMC addressing for HPE iLO" section or "Redfish virtual media for Dell iDRAC" in the "BMC addressing for Dell iDRAC" section for additional details. ==== -[discrete] + == Redfish network boot To enable Redfish, use `redfish://` or `redfish+http://` to disable TLS. The installer requires both the hostname or the IP address and the path to the system ID. The following example demonstrates a Redfish configuration within the `install-config.yaml` file. @@ -64,7 +63,7 @@ platform: password: disableCertificateVerification: True ---- -[discrete] + == Redfish APIs Several redfish API endpoints are called onto your BCM when using the bare-metal installer-provisioned infrastructure. diff --git a/modules/ipi-install-configuring-nodes.adoc b/modules/ipi-install-configuring-nodes.adoc index 32105291de7e..6b670ed66aad 100644 --- a/modules/ipi-install-configuring-nodes.adoc +++ b/modules/ipi-install-configuring-nodes.adoc @@ -6,7 +6,7 @@ [id="configuring-nodes_{context}"] = Configuring nodes -[discrete] + == Configuring nodes when using the `provisioning` network Each node in the cluster requires the following configuration for proper installation. @@ -48,7 +48,7 @@ Configure the control plane and worker nodes as follows: | NIC1 PXE-enabled (provisioning network) | 1 |=== -[discrete] + == Configuring nodes without the `provisioning` network The installation process requires one NIC: @@ -67,7 +67,7 @@ The `provisioning` network is optional, but it is required for PXE booting. If y ==== [id="configuring-nodes-for-secure-boot_{context}"] -[discrete] + == Configuring nodes for Secure Boot manually Secure Boot prevents a node from booting unless it verifies the node is using only trusted software, such as UEFI firmware drivers, EFI applications, and the operating system. diff --git a/modules/kube-apiserver-operator.adoc b/modules/kube-apiserver-operator.adoc index 0d5233096f64..af829b7bbfbe 100644 --- a/modules/kube-apiserver-operator.adoc +++ b/modules/kube-apiserver-operator.adoc @@ -7,12 +7,12 @@ The Kubernetes API Server Operator manages and updates the Kubernetes API server deployed on top of {product-title}. The Operator is based on the {product-title} `library-go` framework and it is installed using the Cluster Version Operator (CVO). -[discrete] + == Project link:https://github.com/openshift/cluster-kube-apiserver-operator[openshift-kube-apiserver-operator] -[discrete] + == CRDs * `kubeapiservers.operator.openshift.io` @@ -20,7 +20,7 @@ link:https://github.com/openshift/cluster-kube-apiserver-operator[openshift-kube ** CR: `kubeapiserver` ** Validation: Yes -[discrete] + == Configuration objects [source,terminal] diff --git a/modules/kube-controller-manager-operator.adoc b/modules/kube-controller-manager-operator.adoc index ac1405aac486..adc98b22090d 100644 --- a/modules/kube-controller-manager-operator.adoc +++ b/modules/kube-controller-manager-operator.adoc @@ -16,7 +16,7 @@ It contains the following components: By default, the Operator exposes Prometheus metrics through the `metrics` service. -[discrete] + == Project link:https://github.com/openshift/cluster-kube-controller-manager-operator[cluster-kube-controller-manager-operator] diff --git a/modules/lvms-about-lvmcluster-cr.adoc b/modules/lvms-about-lvmcluster-cr.adoc index e8eb6206a28a..ae1aa8d0478e 100644 --- a/modules/lvms-about-lvmcluster-cr.adoc +++ b/modules/lvms-about-lvmcluster-cr.adoc @@ -53,7 +53,7 @@ spec: ---- <1> Optional field -[discrete] + == Explanation of fields in the LVMCluster CR The `LVMCluster` CR fields are described in the following table: diff --git a/modules/machine-api-operator.adoc b/modules/machine-api-operator.adoc index f0f01e7250ee..3c0b860483f2 100644 --- a/modules/machine-api-operator.adoc +++ b/modules/machine-api-operator.adoc @@ -7,12 +7,12 @@ The Machine API Operator manages the lifecycle of specific purpose custom resource definitions (CRD), controllers, and RBAC objects that extend the Kubernetes API. This declares the desired state of machines in a cluster. -[discrete] + == Project link:https://github.com/openshift/machine-api-operator[machine-api-operator] -[discrete] + == CRDs * `MachineSet` diff --git a/modules/machine-config-operator.adoc b/modules/machine-config-operator.adoc index 01e131167bba..a72da284a6a3 100644 --- a/modules/machine-config-operator.adoc +++ b/modules/machine-config-operator.adoc @@ -21,7 +21,6 @@ include::snippets/mcs-endpoint-limitation.adoc[] * xref:../networking/openshift_sdn/about-openshift-sdn.adoc#about-openshift-sdn[About the OpenShift SDN network plugin]. -[discrete] == Project link:https://github.com/openshift/machine-config-operator[openshift-machine-config-operator] diff --git a/modules/machine-lifecycle-hook-deletion-format.adoc b/modules/machine-lifecycle-hook-deletion-format.adoc index e5a40cac70a7..50263b567d4b 100644 --- a/modules/machine-lifecycle-hook-deletion-format.adoc +++ b/modules/machine-lifecycle-hook-deletion-format.adoc @@ -42,7 +42,7 @@ spec: <1> The name of the `preTerminate` lifecycle hook. <2> The hook-implementing controller that manages the `preTerminate` lifecycle hook. -[discrete] + [id="machine-lifecycle-hook-deletion-example_{context}"] == Example lifecycle hook configuration diff --git a/modules/machine-lifecycle-hook-deletion-uses.adoc b/modules/machine-lifecycle-hook-deletion-uses.adoc index 8fa8910d02ef..21b595a95377 100644 --- a/modules/machine-lifecycle-hook-deletion-uses.adoc +++ b/modules/machine-lifecycle-hook-deletion-uses.adoc @@ -8,7 +8,7 @@ Operators can use lifecycle hooks for the machine deletion phase to modify the machine deletion process. The following examples demonstrate possible ways that an Operator can use this functionality. -[discrete] + [id="machine-lifecycle-hook-deletion-uses-predrain_{context}"] == Example use cases for `preDrain` lifecycle hooks @@ -18,7 +18,7 @@ Implementing custom draining logic:: An Operator can use a `preDrain` lifecycle + For example, the machine controller drain libraries do not support ordering, but a custom drain provider could provide this functionality. By using a custom drain provider, an Operator could prioritize moving mission-critical applications before draining the node to ensure that service interruptions are minimized in cases where cluster capacity is limited. -[discrete] + [id="machine-lifecycle-hook-deletion-uses-preterminate_{context}"] == Example use cases for `preTerminate` lifecycle hooks diff --git a/modules/machineset-yaml-gcp.adoc b/modules/machineset-yaml-gcp.adoc index 72f58ad2b88a..b0f754c87dc4 100644 --- a/modules/machineset-yaml-gcp.adoc +++ b/modules/machineset-yaml-gcp.adoc @@ -19,7 +19,7 @@ ifndef::infra[``] ifdef::infra[`infra`] is the node label to add. -[discrete] + [id="cpmso-yaml-provider-spec-gcp-oc_{context}"] == Values obtained by using the OpenShift CLI diff --git a/modules/machineset-yaml-nutanix.adoc b/modules/machineset-yaml-nutanix.adoc index 0ec2248ad5a9..d8cf3c5b575a 100644 --- a/modules/machineset-yaml-nutanix.adoc +++ b/modules/machineset-yaml-nutanix.adoc @@ -20,7 +20,7 @@ ifndef::infra[``] ifdef::infra[``] is the node label to add. -[discrete] + [id="machineset-yaml-nutanix-oc_{context}"] == Values obtained by using the OpenShift CLI diff --git a/modules/metering-troubleshooting.adoc b/modules/metering-troubleshooting.adoc index 8abee1df9ccc..e0a857ced20f 100644 --- a/modules/metering-troubleshooting.adoc +++ b/modules/metering-troubleshooting.adoc @@ -110,7 +110,7 @@ Node: ip-10-xx-xx-xx.ap-southeast-1.compute.internal/10.xx.xx.xx ---- <1> The Reporting Operator pod was terminated due to OOM kill. -[discrete] + [id="metering-check-and-increase-memory-limits_{context}"] === Increasing the reporting-operator pod memory limit diff --git a/modules/minimum-ibm-power-system-requirements.adoc b/modules/minimum-ibm-power-system-requirements.adoc index d9e8ffd61409..e25dba314fe3 100644 --- a/modules/minimum-ibm-power-system-requirements.adoc +++ b/modules/minimum-ibm-power-system-requirements.adoc @@ -16,12 +16,12 @@ You can install {product-title} version {product-version} on the following {ibm- Support for {op-system} functionality for all {ibm-power-name}8 models, {ibm-power-name} AC922, {ibm-power-name} IC922, and {ibm-power-name} LC922 is deprecated in {product-title} {product-version}. Red Hat recommends that you use later hardware models. ==== -[discrete] + == Hardware requirements * Six logical partitions (LPARs) across multiple PowerVM servers -[discrete] + == Operating system requirements * One instance of an {ibm-power-name}9 or Power10 processor-based system @@ -32,19 +32,19 @@ On your {ibm-power-name} instance, set up: * Two LPARs for {product-title} compute machines * One LPAR for the temporary {product-title} bootstrap machine -[discrete] + == Disk storage for the {ibm-power-title} guest virtual machines * Local storage, or storage provisioned by the Virtual I/O Server using vSCSI, NPIV (N-Port ID Virtualization) or SSP (shared storage pools) -[discrete] + == Network for the PowerVM guest virtual machines * Dedicated physical adapter, or SR-IOV virtual function * Available by the Virtual I/O Server using Shared Ethernet Adapter * Virtualized by the Virtual I/O Server using {ibm-name} vNIC -[discrete] + == Storage / main memory * 100 GB / 16 GB for {product-title} control plane machines diff --git a/modules/minimum-ibm-z-system-requirements.adoc b/modules/minimum-ibm-z-system-requirements.adoc index 1b52bf7870d4..6c43544f0d2a 100644 --- a/modules/minimum-ibm-z-system-requirements.adoc +++ b/modules/minimum-ibm-z-system-requirements.adoc @@ -14,7 +14,6 @@ You can install {product-title} version {product-version} on the following {ibm- * {ibm-name} z16 (all models), {ibm-name} z15 (all models), {ibm-name} z14 (all models) * {ibm-linuxone-name} 4 (all models), {ibm-linuxone-name} III (all models), {ibm-linuxone-name} Emperor II, {ibm-linuxone-name} Rockhopper II -[discrete] == Hardware requirements * The equivalent of six Integrated Facilities for Linux (IFL), which are SMT2 enabled, for each cluster. @@ -30,7 +29,7 @@ You can use dedicated or shared IFLs to assign sufficient compute resources. Res Since the overall performance of the cluster can be impacted, the LPARs that are used to set up the {product-title} clusters must provide sufficient compute capacity. In this context, LPAR weight management, entitlements, and CPU shares on the hypervisor level play an important role. ==== -[discrete] + == Operating system requirements * One instance of z/VM 7.2 or later @@ -41,7 +40,7 @@ On your z/VM instance, set up: * Two guest virtual machines for {product-title} compute machines * One guest virtual machine for the temporary {product-title} bootstrap machine -[discrete] + == {ibm-z-title} network connectivity requirements To install on {ibm-z-name} under z/VM, you require a single z/VM virtual NIC in layer 2 mode. You also need: @@ -49,13 +48,12 @@ To install on {ibm-z-name} under z/VM, you require a single z/VM virtual NIC in * A direct-attached OSA or RoCE network adapter * A z/VM VSWITCH in layer 2 Ethernet mode set up -[discrete] === Disk storage for the z/VM guest virtual machines * FICON attached disk storage (DASDs). These can be z/VM minidisks, fullpack minidisks, or dedicated DASDs, all of which must be formatted as CDL, which is the default. To reach the minimum required DASD size for {op-system-first} installations, you need extended address volumes (EAV). If available, use HyperPAV to ensure optimal performance. * FCP attached disk storage -[discrete] + === Storage / Main Memory * 16 GB for {product-title} control plane machines diff --git a/modules/node-tuning-operator.adoc b/modules/node-tuning-operator.adoc index 473d3669fe62..3fd57ccce270 100644 --- a/modules/node-tuning-operator.adoc +++ b/modules/node-tuning-operator.adoc @@ -61,7 +61,7 @@ In earlier versions of {product-title}, the Performance Addon Operator was used endif::cluster-caps[] ifdef::operators[] -[discrete] + == Project link:https://github.com/openshift/cluster-node-tuning-operator[cluster-node-tuning-operator] diff --git a/modules/nodes-pods-configmap-overview.adoc b/modules/nodes-pods-configmap-overview.adoc index 6bc04d7b8349..e7877cd5f58f 100644 --- a/modules/nodes-pods-configmap-overview.adoc +++ b/modules/nodes-pods-configmap-overview.adoc @@ -49,7 +49,7 @@ Users and system components can store configuration data in a config map. A config map is similar to a secret, but designed to more conveniently support working with strings that do not contain sensitive information. -[discrete] + == Config map restrictions *A config map must be created before its contents can be consumed in pods.* diff --git a/modules/nw-egress-ips-about.adoc b/modules/nw-egress-ips-about.adoc index c33ecb1ef6f6..2e75ccbab5bb 100644 --- a/modules/nw-egress-ips-about.adoc +++ b/modules/nw-egress-ips-about.adoc @@ -196,7 +196,7 @@ OVN-Kubernetes provides a mechanism to control and direct outbound network traff ==== ifndef::openshift-rosa[] -[discrete] + [id="nw-egress-ips-multi-nic-requirements_{context}"] === Requirements for assigning an egress IP to a network interface that is not the primary network interface diff --git a/modules/nw-externalip-object.adoc b/modules/nw-externalip-object.adoc index 440cea43bac4..a78cf606d9d8 100644 --- a/modules/nw-externalip-object.adoc +++ b/modules/nw-externalip-object.adoc @@ -47,7 +47,7 @@ policy: <1> A list of allowed IP address ranges in CIDR format. <2> A list of rejected IP address ranges in CIDR format. -[discrete] + == Example external IP configurations Several possible configurations for external IP address pools are displayed in the following examples: diff --git a/modules/nw-infw-operator-config-object.adoc b/modules/nw-infw-operator-config-object.adoc index 3b49e5cf645c..d5167d4d6cf0 100644 --- a/modules/nw-infw-operator-config-object.adoc +++ b/modules/nw-infw-operator-config-object.adoc @@ -45,7 +45,6 @@ One label used in `nodeSelector` must match a label on the nodes in order for th The Operator consumes the CR and creates an ingress node firewall daemon set on all the nodes that match the `nodeSelector`. ==== -[discrete] [id="nw-ingress-node-firewall-example-cr-2_{context}"] == Ingress Node Firewall Operator example configuration @@ -67,4 +66,4 @@ spec: [NOTE] ==== The Operator consumes the CR and creates an ingress node firewall daemon set on all the nodes that match the `nodeSelector`. -==== \ No newline at end of file +==== diff --git a/modules/nw-operator-cr.adoc b/modules/nw-operator-cr.adoc index acff8b242952..aa957babfce4 100644 --- a/modules/nw-operator-cr.adoc +++ b/modules/nw-operator-cr.adoc @@ -115,7 +115,7 @@ If you are using the OVN-Kubernetes cluster network plugin, the kube-proxy confi For a cluster that needs to deploy objects across multiple networks, ensure that you specify the same value for the `clusterNetwork.hostPrefix` parameter for each network type that is defined in the `install-config.yaml` file. Setting a different value for each `clusterNetwork.hostPrefix` parameter can impact the OVN-Kubernetes network plugin, where the plugin cannot effectively route object traffic among different nodes. ==== -[discrete] + [id="nw-operator-cr-defaultnetwork_{context}"] === defaultNetwork object configuration @@ -144,7 +144,6 @@ The values for the `defaultNetwork` object are defined in the following table: |==== -[discrete] [id="nw-operator-configuration-parameters-for-openshift-sdn_{context}"] ==== Configuration for the OpenShift SDN network plugin @@ -175,9 +174,9 @@ The maximum transmission unit (MTU) for the VXLAN overlay network. This is detec If the auto-detected value is not what you expect it to be, confirm that the MTU on the primary network interface on your nodes is correct. You cannot use this option to change the MTU value of the primary network interface on the nodes. -If your cluster requires different MTU values for different nodes, you must set this value to `50` less than the lowest MTU value in your cluster. For example, if some nodes in your cluster have an MTU of `9001`, and some have an MTU of `1500`, you must set this value to `1450`. +If your cluster requires different MTU values for different nodes, you must set this value to `50` less than the lowest MTU value in your cluster. For example, if some nodes in your cluster have an MTU of `9001`, and some have an MTU of `1500`, you must set this value to `1450`. -You can set the value during cluster installation or as a post-installation task. For more information, see "Changing the MTU for the cluster network" in the {product-title} Networking document. +You can set the value during cluster installation or as a post-installation task. For more information, see "Changing the MTU for the cluster network" in the {product-title} Networking document. endif::operator[] ifdef::operator[] The maximum transmission unit (MTU) for the VXLAN overlay network. This value is normally configured automatically. @@ -212,7 +211,6 @@ defaultNetwork: ---- endif::operator[] -[discrete] [id="nw-operator-configuration-parameters-for-ovn-sdn_{context}"] ==== Configuration for the OVN-Kubernetes network plugin @@ -434,7 +432,6 @@ defaultNetwork: ipsecConfig: {} ---- -[discrete] [id="nw-operator-cr-kubeproxyconfig_{context}"] === kubeProxyConfig object configuration (OpenShiftSDN container network interface only) @@ -497,4 +494,3 @@ endif::operator[] ifeval::["{context}" == "cluster-network-operator"] :!operator: endif::[] - diff --git a/modules/nw-ovn-k-adminnetwork-policy-action-rules.adoc b/modules/nw-ovn-k-adminnetwork-policy-action-rules.adoc index d8033fbbf152..60dd3435c74b 100644 --- a/modules/nw-ovn-k-adminnetwork-policy-action-rules.adoc +++ b/modules/nw-ovn-k-adminnetwork-policy-action-rules.adoc @@ -7,7 +7,7 @@ = AdminNetworkPolicy actions for rules As an administrator, you can set `Allow`, `Deny`, or `Pass` as the `action` field for your `AdminNetworkPolicy` rules. Because OVN-Kubernetes uses a tiered ACLs to evaluate network traffic rules, ANP allow you to set very strong policy rules that can only be changed by an administrator modifying them, deleting the rule, or overriding them by setting a higher priority rule. -[discrete] + [id="adminnetworkpolicy-allow-example_{context}"] == AdminNetworkPolicy Allow example The following ANP that is defined at priority 9 ensures all ingress traffic is allowed from the `monitoring` namespace towards any tenant (all other namespaces) in the cluster. @@ -39,7 +39,7 @@ spec: ==== This is an example of a strong `Allow` ANP because it is non-overridable by all the parties involved. No tenants can block themselves from being monitored using `NetworkPolicy` objects and the monitoring tenant also has no say in what it can or cannot monitor. -[discrete] + [id="adminnetworkpolicy-deny-example_{context}"] == AdminNetworkPolicy Deny example The following ANP that is defined at priority 5 ensures all ingress traffic from the `monitoring` namespace is blocked towards restricted tenants (namespaces that have labels `security: restricted`). @@ -74,7 +74,7 @@ This is a strong `Deny` ANP that is non-overridable by all the parties involved. When combined with the strong `Allow` example, the `block-monitoring` ANP has a lower priority value giving it higher precedence, which ensures restricted tenants are never monitored. -[discrete] + [id="adminnetworkpolicy-pass-example_{context}"] == AdminNetworkPolicy Pass example TThe following ANP that is defined at priority 7 ensures all ingress traffic from the `monitoring` namespace towards internal infrastructure tenants (namespaces that have labels `security: internal`) are passed on to tier 2 of the ACLs and evaluated by the namespaces’ `NetworkPolicy` objects. diff --git a/modules/nw-ovn-k-adminnetwork-policy.adoc b/modules/nw-ovn-k-adminnetwork-policy.adoc index 2c94d31e4de9..9f579a28d6b9 100644 --- a/modules/nw-ovn-k-adminnetwork-policy.adoc +++ b/modules/nw-ovn-k-adminnetwork-policy.adoc @@ -25,7 +25,6 @@ An ANP allows administrators to specify the following: The `AdminNetworkPolicy` resource is a `TechnologyPreviewNoUpgrade` feature that can be enabled on test clusters that are not in production. For more information on feature gates and `TechnologyPreviewNoUpgrade` features, see "Enabling features using feature gates" in the "Additional resources" of this section. ==== -[discrete] [id="adminnetworkpolicy-example_{context}"] == AdminNetworkPolicy example @@ -77,4 +76,4 @@ spec: <6> Specify the namespaces to select the pods from to apply the ANP resource. <7> Specify `podSelector.matchLabels` name of the pods to apply the ANP resource. <8> ANP have both ingress and egress rules. ANP rules for `spec.egress` field accepts values of `Pass`, `Deny`, and `Allow` for the `action` field. -==== \ No newline at end of file +==== diff --git a/modules/nw-ovn-k-baseline-adminnetwork-policy.adoc b/modules/nw-ovn-k-baseline-adminnetwork-policy.adoc index d3f5e3a135a0..df36638a0b18 100644 --- a/modules/nw-ovn-k-baseline-adminnetwork-policy.adoc +++ b/modules/nw-ovn-k-baseline-adminnetwork-policy.adoc @@ -23,7 +23,6 @@ A BANP allows administrators to specify: `BaselineAdminNetworkPolicy` is a `TechnologyPreviewNoUpgrade` feature that can be enabled on test clusters that are not in production. ==== -[discrete] [id="baselineddminnetworkpolicy-example_{context}"] == BaselineAdminNetworkPolicy example @@ -76,7 +75,7 @@ spec: ==== -[discrete] + [id="BaselineAdminNetworkPolicy-default-deny-example"_{context}] == BaselineAdminNetworkPolicy Deny example The following BANP singleton ensures that the administrator has set up a default deny policy for all ingress monitoring traffic coming into the tenants at `internal` security level. When combined with the "AdminNetworkPolicy Pass example", this deny policy acts as a guardrail policy for all ingress traffic that is passed by the ANP `pass-monitoring` policy. @@ -137,4 +136,3 @@ spec: ==== In this scenario, Tenant 1's policy would be evaluated after the "AdminNetworkPolicy `Pass` action example" and before the "BaselineAdminNetwork Policy `Deny` example", which denies all ingress monitoring traffic coming into tenants with `security` level `internal`. With Tenant 1's `NetworkPolicy` object in place, they will be able to collect data on their application. Tenant 2, however, who does not have any `NetworkPolicy` objects in place, will not be able to collect data. As an administrator, you have not by default monitored internal tenants, but instead, you created a BANP that allows tenants to use `NetworkPolicy` objects to override the default behavior of your BANP. - diff --git a/modules/nw-ovn-kubernetes-session-affinity.adoc b/modules/nw-ovn-kubernetes-session-affinity.adoc index b4cbfec20fa8..c34fb8577fc9 100644 --- a/modules/nw-ovn-kubernetes-session-affinity.adoc +++ b/modules/nw-ovn-kubernetes-session-affinity.adoc @@ -7,7 +7,7 @@ = Session affinity Session affinity is a feature that applies to Kubernetes `Service` objects. You can use _session affinity_ if you want to ensure that each time you connect to a :, the traffic is always load balanced to the same back end. For more information, including how to set session affinity based on a client's IP address, see link:https://kubernetes.io/docs/reference/networking/virtual-ips/#session-affinity[Session affinity]. -[discrete] + [id="nw-ovn-kubernetes-session-affinity-stickyness-timeout_{context}"] == Stickiness timeout for session affinity The OVN-Kubernetes network plugin for {product-title} calculates the stickiness timeout for a session from a client based on the last packet. For example, if you run a `curl` command 10 times, the sticky session timer starts from the tenth packet not the first. As a result, if the client is continuously contacting the service, then the session never times out. The timeout starts when the service has not received a packet for the amount of time set by the link:https://kubernetes.io/docs/reference/networking/virtual-ips/#session-stickiness-timeout[`timeoutSeconds`] parameter. diff --git a/modules/nw-ptp-introduction.adoc b/modules/nw-ptp-introduction.adoc index 19f04afc3410..0db3cb19fbd5 100644 --- a/modules/nw-ptp-introduction.adoc +++ b/modules/nw-ptp-introduction.adoc @@ -22,7 +22,7 @@ Boundary clock:: The boundary clock has ports in two or more communication paths Ordinary clock:: The ordinary clock has a single port connection that can play the role of source or destination clock, depending on its position in the network. The ordinary clock can read and write timestamps. -[discrete] + [id="ptp-advantages-over-ntp_{context}"] == Advantages of PTP over NTP diff --git a/modules/nw-ptp-operator-metrics-reference.adoc b/modules/nw-ptp-operator-metrics-reference.adoc index becf9c2911e9..a9ed0185d73c 100644 --- a/modules/nw-ptp-operator-metrics-reference.adoc +++ b/modules/nw-ptp-operator-metrics-reference.adoc @@ -71,7 +71,7 @@ a|Returns values for `HoldOverTimeout`, `MaxOffsetThreshold`, and `MinOffsetThre |==== -[discrete] + == PTP fast event metrics only when T-GM is enabled The following table describes the PTP fast event metrics that are available only when PTP grandmaster clock (T-GM) is enabled. diff --git a/modules/nw-using-ingress-forwarded.adoc b/modules/nw-using-ingress-forwarded.adoc index 1c4e74099de4..f8e0c26b1cd9 100644 --- a/modules/nw-using-ingress-forwarded.adoc +++ b/modules/nw-using-ingress-forwarded.adoc @@ -33,7 +33,7 @@ spec: ---- -[discrete] + == Example use cases *As a cluster administrator, you can:* diff --git a/modules/oc-mirror-image-set-config-examples.adoc b/modules/oc-mirror-image-set-config-examples.adoc index ca38c9a8aee5..9c0854f6e05d 100644 --- a/modules/oc-mirror-image-set-config-examples.adoc +++ b/modules/oc-mirror-image-set-config-examples.adoc @@ -10,7 +10,7 @@ The following `ImageSetConfiguration` file examples show the configuration for various mirroring use cases. // Moved to first; unchanged -[discrete] + [id="oc-mirror-image-set-examples-shortest-upgrade-path_{context}"] == Use case: Including the shortest {product-title} update path @@ -34,7 +34,7 @@ mirror: ---- // Moved to second; unchanged -[discrete] + [id="oc-mirror-image-set-examples-minimum-to-latest_{context}"] == Use case: Including all versions of {product-title} from a minimum to the latest version for multi-architecture releases @@ -64,7 +64,7 @@ mirror: // Updated: // - Added a note below about the maxVersion // - Added a note about not necessarily getting all versions in the range -[discrete] + [id="oc-mirror-image-set-examples-operator-versions_{context}"] == Use case: Including Operator versions from a minimum to the latest @@ -102,7 +102,7 @@ mirror: To specify a maximum version instead of the latest, set the `mirror.operators.packages.channels.maxVersion` field. ==== -[discrete] + [id="oc-mirror-image-set-examples-nutanix-operator_{context}"] == Use case: Including the Nutanix CSI Operator The following `ImageSetConfiguration` file uses a local storage backend and includes the Nutanix CSI Operator, the OpenShift Update Service (OSUS) graph image, and an additional Red Hat Universal Base Image (UBI). @@ -133,7 +133,7 @@ mirror: ---- // New example; including the default channel -[discrete] + [id="oc-mirror-image-set-examples-default-channel_{context}"] == Use case: Including the default Operator channel @@ -164,7 +164,7 @@ mirror: ---- // New example; Entire catalog; all versions -[discrete] + [id="oc-mirror-image-set-examples-entire-catalog-full_{context}"] == Use case: Including an entire catalog (all versions) @@ -187,7 +187,7 @@ mirror: // New example; Entire catalog; heads only // - Included 'targetCatalog' in example -[discrete] + [id="oc-mirror-image-set-examples-entire-catalog-heads_{context}"] == Use case: Including an entire catalog (channel heads only) @@ -213,7 +213,7 @@ mirror: ---- // Moved to last; unchanged -[discrete] + [id="oc-mirror-image-set-examples-helm_{context}"] == Use case: Including arbitrary images and helm charts @@ -248,13 +248,13 @@ mirror: - name: registry.redhat.io/ubi9/ubi:latest ---- -[discrete] + [id="oc-mirror-image-set-examples-eus_{context}"] == Use case: Including the upgrade path for EUS releases The following `ImageSetConfiguration` file includes the `eus-` channel, where the `maxVersion` value is at least two minor versions higher than the `minVersion` value. -For example, in this `ImageSetConfiguration` file, the `minVersion` is set to `4.12.28`, while the `maxVersion` for the `eus-4.14` channel is `4.14.16`. +For example, in this `ImageSetConfiguration` file, the `minVersion` is set to `4.12.28`, while the `maxVersion` for the `eus-4.14` channel is `4.14.16`. .Example `ImageSetConfiguration` file [source,yaml,subs="attributes+"] diff --git a/modules/olm-bundle-format.adoc b/modules/olm-bundle-format.adoc index 2b5ed934c0c9..97d708f51011 100644 --- a/modules/olm-bundle-format.adoc +++ b/modules/olm-bundle-format.adoc @@ -43,7 +43,7 @@ etcd └── dependencies.yaml ---- -[discrete] + [id="olm-bundle-format-manifests-optional_{context}"] === Additionally supported objects diff --git a/modules/olm-webhook-considerations.adoc b/modules/olm-webhook-considerations.adoc index 1d0e85e00b61..78fdf6e05e97 100644 --- a/modules/olm-webhook-considerations.adoc +++ b/modules/olm-webhook-considerations.adoc @@ -13,18 +13,18 @@ When deploying an Operator with webhooks using Operator Lifecycle Manager (OLM), When the webhook is created, OLM ensures that the webhook only acts upon namespaces that match the Operator group that the Operator is deployed in. -[discrete] + [id="olm-webhook-ca_{context}"] -=== Certificate authority constraints +== Certificate authority constraints OLM is configured to provide each deployment with a single certificate authority (CA). The logic that generates and mounts the CA into the deployment was originally used by the API service lifecycle logic. As a result: * The TLS certificate file is mounted to the deployment at `/apiserver.local.config/certificates/apiserver.crt`. * The TLS key file is mounted to the deployment at `/apiserver.local.config/certificates/apiserver.key`. -[discrete] + [id="olm-admission-webhook-constraints_{context}"] -=== Admission webhook rules constraints +== Admission webhook rules constraints To prevent an Operator from configuring the cluster into an unrecoverable state, OLM places the CSV in the failed phase if the rules defined in an admission webhook intercept any of the following requests: @@ -32,9 +32,9 @@ To prevent an Operator from configuring the cluster into an unrecoverable state, * Requests that target the `operators.coreos.com` group * Requests that target the `ValidatingWebhookConfigurations` or `MutatingWebhookConfigurations` resources -[discrete] + [id="olm-conversion-webhook-constraints_{context}"] -=== Conversion webhook constraints +== Conversion webhook constraints OLM places the CSV in the failed phase if a conversion webhook definition does not adhere to the following constraints: diff --git a/modules/openshift-apiserver-operator.adoc b/modules/openshift-apiserver-operator.adoc index 52d2a5932451..6df89656bcfa 100644 --- a/modules/openshift-apiserver-operator.adoc +++ b/modules/openshift-apiserver-operator.adoc @@ -7,12 +7,12 @@ The OpenShift API Server Operator installs and maintains the `openshift-apiserver` on a cluster. -[discrete] + == Project link:https://github.com/openshift/cluster-openshift-apiserver-operator[openshift-apiserver-operator] -[discrete] + == CRDs * `openshiftapiservers.operator.openshift.io` diff --git a/modules/openshift-service-ca-operator.adoc b/modules/openshift-service-ca-operator.adoc index cc86ad8eea50..72537ccbfee7 100644 --- a/modules/openshift-service-ca-operator.adoc +++ b/modules/openshift-service-ca-operator.adoc @@ -7,7 +7,7 @@ The OpenShift Service CA Operator mints and manages serving certificates for Kubernetes services. -[discrete] + == Project link:https://github.com/openshift/service-ca-operator[openshift-service-ca-operator] diff --git a/modules/operator-marketplace.adoc b/modules/operator-marketplace.adoc index a698a1b842cc..bbf632261ca0 100644 --- a/modules/operator-marketplace.adoc +++ b/modules/operator-marketplace.adoc @@ -42,7 +42,7 @@ If you enable the `marketplace` capability, you can enable and disable individua endif::cluster-caps[] ifdef::operator-ref[] -[discrete] + == Project link:https://github.com/operator-framework/operator-marketplace[operator-marketplace] diff --git a/modules/preferred-ibm-z-system-requirements.adoc b/modules/preferred-ibm-z-system-requirements.adoc index 944650fbc68e..e6556872a04f 100644 --- a/modules/preferred-ibm-z-system-requirements.adoc +++ b/modules/preferred-ibm-z-system-requirements.adoc @@ -9,14 +9,14 @@ [id="preferred-ibm-z-system-requirements_{context}"] = Preferred {ibm-z-title} system environment -[discrete] + == Hardware requirements * Three LPARS that each have the equivalent of six IFLs, which are SMT2 enabled, for each cluster. * Two network connections to both connect to the `LoadBalancer` service and to serve data for traffic outside the cluster. * HiperSockets, which are attached to a node either directly as a device or by bridging with one z/VM VSWITCH to be transparent to the z/VM guest. To directly connect HiperSockets to a node, you must set up a gateway to the external network via a {op-system-base} 8 guest to bridge to the HiperSockets network. -[discrete] + == Operating system requirements * Two or three instances of z/VM 7.2 or later for high availability @@ -28,7 +28,7 @@ On your z/VM instances, set up: * One guest virtual machine for the temporary {product-title} bootstrap machine. * To ensure the availability of integral components in an overcommitted environment, increase the priority of the control plane by using the CP command `SET SHARE`. Do the same for infrastructure nodes, if they exist. See link:https://www.ibm.com/docs/en/zvm/latest?topic=commands-set-share[SET SHARE] ({ibm-name} Documentation). -[discrete] + == {ibm-z-title} network connectivity requirements To install on {ibm-z-name} under z/VM, you require a single z/VM virtual NIC in layer 2 mode. You also need: @@ -36,13 +36,12 @@ To install on {ibm-z-name} under z/VM, you require a single z/VM virtual NIC in * A direct-attached OSA or RoCE network adapter * A z/VM VSWITCH in layer 2 Ethernet mode set up -[discrete] === Disk storage for the z/VM guest virtual machines * FICON attached disk storage (DASDs). These can be z/VM minidisks, fullpack minidisks, or dedicated DASDs, all of which must be formatted as CDL, which is the default. To reach the minimum required DASD size for {op-system-first} installations, you need extended address volumes (EAV). If available, use HyperPAV and High Performance FICON (zHPF) to ensure optimal performance. * FCP attached disk storage -[discrete] + === Storage / Main Memory * 16 GB for {product-title} control plane machines diff --git a/modules/private-clusters-about.adoc b/modules/private-clusters-about.adoc index a8c4f480da65..85e04de6eb2b 100644 --- a/modules/private-clusters-about.adoc +++ b/modules/private-clusters-about.adoc @@ -11,7 +11,7 @@ By default, {product-title} is provisioned using publicly-accessible DNS and end include::snippets/snip-private-clusters-public-ingress.adoc[] -[discrete] + [id="private-clusters-about-dns_{context}"] == DNS @@ -19,14 +19,14 @@ If you install {product-title} on installer-provisioned infrastructure, the inst The `*.apps` records in the public and private zone are identical, so when you delete the public zone, the private zone seamlessly provides all DNS resolution for the cluster. -[discrete] + [id="private-clusters-about-ingress-controller_{context}"] == Ingress Controller Because the default `Ingress` object is created as public, the load balancer is internet-facing and in the public subnets. The Ingress Operator generates a default certificate for an Ingress Controller to serve as a placeholder until you configure a custom default certificate. Do not use Operator-generated default certificates in production clusters. The Ingress Operator does not rotate its own signing certificate or the default certificates that it generates. Operator-generated default certificates are intended as placeholders for custom default certificates that you configure. -[discrete] + [id="private-clusters-about-api-server_{context}"] == API server diff --git a/modules/prometheus-operator.adoc b/modules/prometheus-operator.adoc index ef0556211fbc..838d5a14f26a 100644 --- a/modules/prometheus-operator.adoc +++ b/modules/prometheus-operator.adoc @@ -15,7 +15,7 @@ Once installed, the Prometheus Operator provides the following features: * Target Services via Labels: Automatically generate monitoring target configurations based on familiar Kubernetes label queries; no need to learn a Prometheus specific configuration language. -[discrete] + == Project link:https://github.com/openshift/prometheus-operator[prometheus-operator] diff --git a/modules/psap-configuring-node-feature-discovery.adoc b/modules/psap-configuring-node-feature-discovery.adoc index 4995a7e748a5..b60eb032ddcb 100644 --- a/modules/psap-configuring-node-feature-discovery.adoc +++ b/modules/psap-configuring-node-feature-discovery.adoc @@ -11,7 +11,7 @@ The `core` section contains common configuration settings that are not specific to any particular feature source. -[discrete] + [id="configuring-node-feature-discovery-operator-core-sleepInterval_{context}"] === core.sleepInterval @@ -27,7 +27,7 @@ core: ---- The default value is `60s`. -[discrete] + [id="configuring-node-feature-discovery-operator-core-sources_{context}"] === core.sources @@ -46,7 +46,7 @@ core: - custom ---- -[discrete] + [id="configuring-node-feature-discovery-operator-core-label-whitelist_{context}"] === core.labelWhiteList @@ -65,7 +65,7 @@ core: labelWhiteList: '^cpu-cpuid' ---- -[discrete] + [id="configuring-node-feature-discovery-operator-core-no-publish_{context}"] === core.noPublish @@ -83,7 +83,7 @@ core: ---- The default value is `false`. -[discrete] + [id="configuring-node-feature-discovery-operator-core-klog_{context}"] == core.klog @@ -91,7 +91,7 @@ The following options specify the logger configuration, most of which can be dyn The logger options can also be specified using command-line flags, which take precedence over any corresponding config file options. -[discrete] + [id="configuring-node-feature-discovery-operator-core-klog-adddirheader_{context}"] === core.klog.addDirHeader @@ -101,7 +101,7 @@ Default: `false` Run-time configurable: yes -[discrete] + [id="configuring-node-feature-discovery-operator-core-klog-alsologtostderr_{context}"] === core.klog.alsologtostderr @@ -111,7 +111,7 @@ Default: `false` Run-time configurable: yes -[discrete] + [id="configuring-node-feature-discovery-operator-core-klog-BacktraceAt_{context}"] === core.klog.logBacktraceAt @@ -121,7 +121,7 @@ Default: *empty* Run-time configurable: yes -[discrete] + [id="configuring-node-feature-discovery-operator-core-klog-logdir_{context}"] === core.klog.logDir @@ -131,7 +131,7 @@ Default: *empty* Run-time configurable: no -[discrete] + [id="configuring-node-feature-discovery-operator-core-klog-logfile_{context}"] === core.klog.logFile @@ -141,7 +141,7 @@ Default: *empty* Run-time configurable: no -[discrete] + [id="configuring-node-feature-discovery-operator-core-klog-logFileMaxSize_{context}"] === core.klog.logFileMaxSize @@ -151,7 +151,7 @@ Default: `1800` Run-time configurable: no -[discrete] + [id="configuring-node-feature-discovery-operator-core-klog-logtostderr_{context}"] === core.klog.logtostderr @@ -161,7 +161,7 @@ Default: `true` Run-time configurable: yes -[discrete] + [id="configuring-node-feature-discovery-operator-core-klog-skipHeaders_{context}"] === core.klog.skipHeaders @@ -171,7 +171,7 @@ Default: `false` Run-time configurable: yes -[discrete] + [id="configuring-node-feature-discovery-operator-core-klog-skipLogHeaders_{context}"] === core.klog.skipLogHeaders @@ -181,7 +181,7 @@ Default: `false` Run-time configurable: no -[discrete] + [id="configuring-node-feature-discovery-operator-core-klog-stderrthreshold_{context}"] === core.klog.stderrthreshold @@ -191,7 +191,7 @@ Default: `2` Run-time configurable: yes -[discrete] + [id="configuring-node-feature-discovery-operator-core-klog-v_{context}"] === core.klog.v @@ -201,7 +201,7 @@ Default: `0` Run-time configurable: yes -[discrete] + [id="configuring-node-feature-discovery-operator-core-klog-vmodule_{context}"] === core.klog.vmodule @@ -216,7 +216,7 @@ Run-time configurable: yes The `sources` section contains feature source specific configuration parameters. -[discrete] + [id="configuring-node-feature-discovery-operator-sources-cpu-cpuid-attributeBlacklist_{context}"] === sources.cpu.cpuid.attributeBlacklist @@ -235,7 +235,7 @@ sources: attributeBlacklist: [MMX, MMXEXT] ---- -[discrete] + [id="configuring-node-feature-discovery-operator-sources-cpu-cpuid-attributeWhitelist_{context}"] === sources.cpu.cpuid.attributeWhitelist @@ -254,7 +254,7 @@ sources: attributeWhitelist: [AVX512BW, AVX512CD, AVX512DQ, AVX512F, AVX512VL] ---- -[discrete] + [id="configuring-node-feature-discovery-operator-sources-kernel-kconfigFilet_{context}"] === sources.kernel.kconfigFile @@ -270,7 +270,7 @@ sources: kconfigFile: "/path/to/kconfig" ---- -[discrete] + [id="configuring-node-feature-discovery-operator-sources-kernel-configOpts_{context}"] === sources.kernel.configOpts @@ -286,7 +286,7 @@ sources: configOpts: [NO_HZ, X86, DMI] ---- -[discrete] + [id="configuring-node-feature-discovery-operator-sources-pci-deviceClassWhitelist_{context}"] === sources.pci.deviceClassWhitelist @@ -303,7 +303,7 @@ sources: deviceClassWhitelist: ["0200", "03"] ---- -[discrete] + [id="configuring-node-feature-discovery-operator-sources-pci-deviceLabelFields_{context}"] === sources.pci.deviceLabelFields @@ -321,7 +321,7 @@ sources: With the example config above, NFD would publish labels such as `feature.node.kubernetes.io/pci-__.present=true` -[discrete] + [id="configuring-node-feature-discovery-operator-sources-usb-deviceClassWhitelist_{context}"] === sources.usb.deviceClassWhitelist @@ -339,7 +339,7 @@ sources: deviceClassWhitelist: ["ef", "ff"] ---- -[discrete] + [id="configuring-node-feature-discovery-operator-sources-usb-deviceLabelFields_{context}"] === sources.usb.deviceLabelFields @@ -357,7 +357,7 @@ sources: With the example config above, NFD would publish labels like: `feature.node.kubernetes.io/usb-_.present=true`. -[discrete] + [id="configuring-node-feature-discovery-operator-sources-custom_{context}"] === sources.custom diff --git a/modules/psap-driver-toolkit.adoc b/modules/psap-driver-toolkit.adoc index 78fcd1cc671d..36903e08464a 100644 --- a/modules/psap-driver-toolkit.adoc +++ b/modules/psap-driver-toolkit.adoc @@ -6,7 +6,7 @@ [id="about-driver-toolkit_{context}"] = About the Driver Toolkit -[discrete] + == Background The Driver Toolkit is a container image in the {product-title} payload used as a base image on which you can build driver containers. The Driver Toolkit image includes the kernel packages commonly required as dependencies to build or install kernel modules, as well as a few tools needed in driver containers. The version of these packages will match the kernel version running on the {op-system-first} nodes in the corresponding {product-title} release. @@ -35,7 +35,7 @@ The Driver Toolkit also has several tools that are commonly needed to build and * `kernel-abi-whitelists` * dependencies for the above -[discrete] + == Purpose Prior to the Driver Toolkit's existence, users would install kernel packages in a pod or build config on {product-title} using link:https://www.openshift.com/blog/how-to-use-entitled-image-builds-to-build-drivercontainers-with-ubi-on-openshift[entitled builds] or by installing from the kernel RPMs in the hosts `machine-os-content`. The Driver Toolkit simplifies the process by removing the entitlement step, and avoids the privileged operation of accessing the machine-os-content in a pod. The Driver Toolkit can also be used by partners who have access to pre-released {product-title} versions to prebuild driver-containers for their hardware devices for future {product-title} releases. diff --git a/modules/psap-node-feature-discovery-operator.adoc b/modules/psap-node-feature-discovery-operator.adoc index af3e0ae4a5ff..62c5ac85c8f5 100644 --- a/modules/psap-node-feature-discovery-operator.adoc +++ b/modules/psap-node-feature-discovery-operator.adoc @@ -20,7 +20,7 @@ The Node Feature Discovery Operator (NFD) manages the detection of hardware feat The NFD Operator can be found on the Operator Hub by searching for “Node Feature Discovery”. ifdef::operators[] -[discrete] + == Project link:https://github.com/openshift/cluster-nfd-operator[cluster-nfd-operator] diff --git a/modules/psap-node-feature-discovery-topology-updater-command-reference.adoc b/modules/psap-node-feature-discovery-topology-updater-command-reference.adoc index 3826b2bdd245..769be8dd4016 100644 --- a/modules/psap-node-feature-discovery-topology-updater-command-reference.adoc +++ b/modules/psap-node-feature-discovery-topology-updater-command-reference.adoc @@ -13,7 +13,7 @@ To view available command-line flags, run the `nfd-topology-updater -help` comma $ podman run gcr.io/k8s-staging-nfd/node-feature-discovery:master nfd-topology-updater -help ---- -[discrete] + [id="nfd-topology-updater-ca-file_{context}"] == -ca-file @@ -32,7 +32,7 @@ The `-ca-file` flag must be specified together with the `-cert-file` and `-key-f $ nfd-topology-updater -ca-file=/opt/nfd/ca.crt -cert-file=/opt/nfd/updater.crt -key-file=/opt/nfd/updater.key ---- -[discrete] + [id="nfd-topology-updater-cert-file_{context}"] == -cert-file @@ -51,13 +51,13 @@ The `-cert-file` flag must be specified together with the `-ca-file` and `-key-f $ nfd-topology-updater -cert-file=/opt/nfd/updater.crt -key-file=/opt/nfd/updater.key -ca-file=/opt/nfd/ca.crt ---- -[discrete] + [id="nfd-topology-updater-help_{context}"] == -h, -help Print usage and exit. -[discrete] + [id="nfd-topology-updater-key-file_{context}"] == -key-file @@ -76,7 +76,7 @@ The `-key-file` flag must be specified together with the `-ca-file` and `-cert-f $ nfd-topology-updater -key-file=/opt/nfd/updater.key -cert-file=/opt/nfd/updater.crt -ca-file=/opt/nfd/ca.crt ---- -[discrete] + [id="nfd-topology-updater-kubelet-config-file_{context}"] == -kubelet-config-file @@ -91,7 +91,7 @@ Default: `/host-var/lib/kubelet/config.yaml` $ nfd-topology-updater -kubelet-config-file=/var/lib/kubelet/config.yaml ---- -[discrete] + [id="nfd-topology-updater-no-publish_{context}"] == -no-publish @@ -118,7 +118,7 @@ Default: `false` $ nfd-topology-updater -oneshot -no-publish ---- -[discrete] + [id="nfd-topology-updater-podresources-socket_{context}"] == -podresources-socket @@ -132,7 +132,7 @@ Default: `/host-var/liblib/kubelet/pod-resources/kubelet.sock` $ nfd-topology-updater -podresources-socket=/var/lib/kubelet/pod-resources/kubelet.sock ---- -[discrete] + [id="nfd-topology-updater-server_{context}"] == -server @@ -146,7 +146,7 @@ Default: `localhost:8080` $ nfd-topology-updater -server=nfd-master.nfd.svc.cluster.local:443 ---- -[discrete] + [id="nfd-topology-updater-server-name-override_{context}"] == -server-name-override @@ -160,7 +160,7 @@ Default: empty $ nfd-topology-updater -server-name-override=localhost ---- -[discrete] + [id="nfd-topology-updater-sleep-interval_{context}"] == -sleep-interval @@ -174,13 +174,13 @@ Default: `60s` $ nfd-topology-updater -sleep-interval=1h ---- -[discrete] + [id="nfd-topology-updater-version_{context}"] == -version Print version and exit. -[discrete] + [id="nfd-topology-updater-watch-namespace_{context}"] == -watch-namespace diff --git a/modules/ptp-overview-of-gnss-grandmaster-clock.adoc b/modules/ptp-overview-of-gnss-grandmaster-clock.adoc index 9ae039beef3f..8b1cd6394cbb 100644 --- a/modules/ptp-overview-of-gnss-grandmaster-clock.adoc +++ b/modules/ptp-overview-of-gnss-grandmaster-clock.adoc @@ -29,7 +29,7 @@ DPLL provides clock synchronization between different PTP nodes in the network. DPLL compares the phase of the local system clock signal with the phase of the incoming synchronization signal, for example, PTP messages from the PTP grandmaster clock. The DPLL continuously adjusts the local clock frequency and phase to minimize the phase difference between the local clock and the reference clock. -[discrete] + [id="handling-leap-second-events-in-gnss_{context}"] == Handling leap second events in GNSS-synced PTP grandmaster clocks diff --git a/modules/quotas-and-limits-ibm-cloud.adoc b/modules/quotas-and-limits-ibm-cloud.adoc index da06b2c73280..c02b4ea850b5 100644 --- a/modules/quotas-and-limits-ibm-cloud.adoc +++ b/modules/quotas-and-limits-ibm-cloud.adoc @@ -10,12 +10,12 @@ The {product-title} cluster uses a number of {ibm-cloud-name} components, and th For a comprehensive list of the default {ibm-cloud-name} quotas and service limits, see {ibm-cloud-name}'s documentation for link:https://cloud.ibm.com/docs/vpc?topic=vpc-quotas[Quotas and service limits]. -[discrete] + == Virtual Private Cloud (VPC) Each {product-title} cluster creates its own VPC. The default quota of VPCs per region is 10 and will allow 10 clusters. To have more than 10 clusters in a single region, you must increase this quota. -[discrete] + == Application load balancer By default, each cluster creates three application load balancers (ALBs): @@ -28,7 +28,7 @@ You can create additional `LoadBalancer` service objects to create additional AL VPC ALBs are supported. Classic ALBs are not supported for {ibm-cloud-name}. -[discrete] + == Floating IP address By default, the installation program distributes control plane and compute machines across all availability zones within a region to provision the cluster in a highly available configuration. In each availability zone, a public gateway is created and requires a separate floating IP address. @@ -41,7 +41,7 @@ The default quota for a floating IP address is 20 addresses per availability zon {ibm-cloud-name} can support up to 19 clusters per region in an account. If you plan to have more than 19 default clusters, you must increase this quota. -[discrete] + == Virtual Server Instances (VSI) By default, a cluster creates VSIs using `bx2-4x16` profiles, which includes the following resources by default: @@ -82,7 +82,7 @@ For more information, see {ibm-cloud-name}'s documentation on link:https://cloud If you plan to exceed the resources stated in the table, you must increase your {ibm-cloud-name} account quota. -[discrete] + == Block Storage Volumes For each VPC machine, a block storage device is attached for its boot volume. The default cluster configuration creates seven VPC machines, resulting in seven block storage volumes. Additional Kubernetes persistent volume claims (PVCs) of the {ibm-cloud-name} storage class create additional block storage volumes. The default quota of VPC block storage volumes are 300 per region. To have more than 300 volumes, you must increase this quota. diff --git a/modules/quotas-and-limits-ibm-power-vs.adoc b/modules/quotas-and-limits-ibm-power-vs.adoc index a7866d0c206f..2b1585687d04 100644 --- a/modules/quotas-and-limits-ibm-power-vs.adoc +++ b/modules/quotas-and-limits-ibm-power-vs.adoc @@ -10,12 +10,12 @@ The {product-title} cluster uses several {ibm-cloud-name} and {ibm-power-server- For a comprehensive list of the default {ibm-cloud-name} quotas and service limits, see the {ibm-cloud-name} documentation for link:https://cloud.ibm.com/docs/vpc?topic=vpc-quotas[Quotas and service limits]. -[discrete] + == Virtual Private Cloud Each {product-title} cluster creates its own Virtual Private Cloud (VPC). The default quota of VPCs per region is 10. If you have 10 VPCs created, you will need to increase your quota before attempting an installation. -[discrete] + == Application load balancer By default, each cluster creates two application load balancers (ALBs): @@ -27,7 +27,6 @@ You can create additional `LoadBalancer` service objects to create additional AL VPC ALBs are supported. Classic ALBs are not supported for {ibm-power-server-name}. -[discrete] == Cloud connections There is a limit of two cloud connections per {ibm-power-server-name} instance. It is recommended that you have only one cloud connection in your {ibm-power-server-name} instance to serve your cluster. @@ -37,17 +36,15 @@ There is a limit of two cloud connections per {ibm-power-server-name} instance. Cloud Connections are no longer supported in `dal10`. A transit gateway is used instead. ==== -[discrete] + == Dynamic Host Configuration Protocol Service There is a limit of one Dynamic Host Configuration Protocol (DHCP) service per {ibm-power-server-name} instance. -[discrete] == Networking Due to networking limitations, there is a restriction of one OpenShift cluster installed through IPI per zone per account. This is not configurable. -[discrete] == Virtual Server Instances By default, a cluster creates server instances with the following resources : diff --git a/modules/recommended-ibm-power-system-requirements.adoc b/modules/recommended-ibm-power-system-requirements.adoc index 0877e2291516..254c1d3e4de9 100644 --- a/modules/recommended-ibm-power-system-requirements.adoc +++ b/modules/recommended-ibm-power-system-requirements.adoc @@ -7,12 +7,12 @@ [id="recommended-ibm-power-system-requirements_{context}"] = Recommended {ibm-power-title} system requirements -[discrete] + == Hardware requirements * Six LPARs across multiple PowerVM servers -[discrete] + == Operating system requirements * One instance of an {ibm-power-name}9 or {ibm-power-name}10 processor-based system @@ -23,19 +23,19 @@ On your {ibm-power-name} instance, set up: * Two LPARs for {product-title} compute machines * One LPAR for the temporary {product-title} bootstrap machine -[discrete] + == Disk storage for the {ibm-power-title} guest virtual machines * Local storage, or storage provisioned by the Virtual I/O Server using vSCSI, NPIV (N-Port ID Virtualization) or SSP (shared storage pools) -[discrete] + == Network for the PowerVM guest virtual machines * Dedicated physical adapter, or SR-IOV virtual function * Available by the Virtual I/O Server using Shared Ethernet Adapter * Virtualized by the Virtual I/O Server using {ibm-name} vNIC -[discrete] + == Storage / main memory * 120 GB / 32 GB for {product-title} control plane machines diff --git a/modules/running-network-verification-manually-cli.adoc b/modules/running-network-verification-manually-cli.adoc index 7a43a8cc30c0..8888c4c6a32b 100644 --- a/modules/running-network-verification-manually-cli.adoc +++ b/modules/running-network-verification-manually-cli.adoc @@ -3,7 +3,7 @@ // * networking/network-verification.adoc :_mod-docs-content-type: PROCEDURE -[discrete] + [id="running-network-verification-manually-cli_{context}"] = Running the network verification manually using the CLI diff --git a/modules/running-network-verification-manually-ocm.adoc b/modules/running-network-verification-manually-ocm.adoc index 7549d1294e1a..f84bcbfd35f1 100644 --- a/modules/running-network-verification-manually-ocm.adoc +++ b/modules/running-network-verification-manually-ocm.adoc @@ -8,7 +8,7 @@ ifdef::openshift-dedicated[] = Running the network verification manually endif::openshift-dedicated[] ifdef::openshift-rosa[] -[discrete] + [id="running-network-verification-manually-ocm_{context}"] = Running the network verification manually using {cluster-manager} endif::openshift-rosa[] diff --git a/modules/security-context-constraints-psa-sync-exclusions.adoc b/modules/security-context-constraints-psa-sync-exclusions.adoc index 5244d42d93db..01138a9c8822 100644 --- a/modules/security-context-constraints-psa-sync-exclusions.adoc +++ b/modules/security-context-constraints-psa-sync-exclusions.adoc @@ -28,7 +28,7 @@ If necessary, you can enable synchronization again by using one of the following If you force synchronization by adding this label, then any modified pod security admission labels will be overwritten. ==== -[discrete] + == Permanently disabled namespaces endif::openshift-dedicated,openshift-rosa[] @@ -45,7 +45,7 @@ ifndef::openshift-dedicated,openshift-rosa[] endif::openshift-dedicated,openshift-rosa[] ifndef::openshift-dedicated,openshift-rosa[] -[discrete] + == Initially disabled namespaces By default, all namespaces that have an `openshift-` prefix have pod security admission synchronization disabled initially. You can enable synchronization for user-created [x-]`openshift-*` namespaces and for the `openshift-operators` namespace. diff --git a/modules/serverless-kn-container.adoc b/modules/serverless-kn-container.adoc index 457cb7989878..5a5f65dd82f3 100644 --- a/modules/serverless-kn-container.adoc +++ b/modules/serverless-kn-container.adoc @@ -10,7 +10,7 @@ You can use the `kn container add` command to print YAML container spec to stand The `kn container add` command accepts all container-related flags that are supported for use with the `kn service create` command. The `kn container add` command can also be chained by using UNIX pipes (`|`) to create multiple container definitions at once. -[discrete] + [id="serverless-kn-container-examples_{context}"] == Example commands diff --git a/modules/service-ca-certificates.adoc b/modules/service-ca-certificates.adoc index 9ccd56e4ea1e..f6132de11dbb 100644 --- a/modules/service-ca-certificates.adoc +++ b/modules/service-ca-certificates.adoc @@ -5,13 +5,13 @@ [id="service-ca-certificates_{context}"] = Service CA certificates -[discrete] + == Purpose `service-ca` is an Operator that creates a self-signed CA when an {product-title} cluster is deployed. -[discrete] + == Expiration A custom expiration term is not supported. The self-signed CA is stored in a @@ -50,12 +50,12 @@ cluster are restarted, which ensures that pods are using service serving certificates issued by the new service CA. ==== -[discrete] + == Management These certificates are managed by the system and not the user. -[discrete] + == Services Services that use service CA certificates include: diff --git a/modules/storage-ephemeral-storage-types.adoc b/modules/storage-ephemeral-storage-types.adoc index e4f64a4d1289..98a02254fb3d 100644 --- a/modules/storage-ephemeral-storage-types.adoc +++ b/modules/storage-ephemeral-storage-types.adoc @@ -11,7 +11,7 @@ Ephemeral local storage is always made available in the primary partition. There are two basic ways of creating the primary partition: root and runtime. -[discrete] + == Root This partition holds the kubelet root directory, `/var/lib/kubelet/` by @@ -22,7 +22,7 @@ layers. Kubelet manages shared access and isolation of this partition. This partition is ephemeral, and applications cannot expect any performance SLAs, such as disk IOPS, from this partition. -[discrete] + == Runtime This is an optional partition that runtimes can use for overlay diff --git a/modules/understanding-pod-identity-webhook-workflow-in-user-defined-projects.adoc b/modules/understanding-pod-identity-webhook-workflow-in-user-defined-projects.adoc index dea8fab285fe..8dd5633eaad2 100644 --- a/modules/understanding-pod-identity-webhook-workflow-in-user-defined-projects.adoc +++ b/modules/understanding-pod-identity-webhook-workflow-in-user-defined-projects.adoc @@ -19,7 +19,7 @@ You can use the pod identity webhook to enable a service account in a user-defin To enable the pod identity webhook for a pod, you must create a service account with an `eks.amazonaws.com/role-arn` annotation in your project. The annotation must reference the Amazon Resource Name (ARN) of the AWS IAM role that you want the service account to assume. You must also reference the service account in your `Pod` specification and deploy the pod in the same project as the service account. -[discrete] + [id="pod-identity-webhook-workflow-in-user-defined-projects_{context}"] == Pod identity webhook workflow in user-defined projects diff --git a/modules/update-cluster-version-object.adoc b/modules/update-cluster-version-object.adoc index 05dc37f545fd..7cca7be80ca0 100644 --- a/modules/update-cluster-version-object.adoc +++ b/modules/update-cluster-version-object.adoc @@ -20,7 +20,7 @@ The CVO continually reconciles the cluster with the target state declared in the When the desired release differs from the actual release, that reconciliation updates the cluster. //to-do: this might be heading overload, consider deleting this heading if the context switch from the previous paragraph to this content is smooth enough to not require one. -[discrete] + == Update availability data The `ClusterVersion` resource also contains information about updates that are available to the cluster. diff --git a/modules/vsphere-problem-detector-operator.adoc b/modules/vsphere-problem-detector-operator.adoc index 740f9ce9cbca..6579ca0bd778 100644 --- a/modules/vsphere-problem-detector-operator.adoc +++ b/modules/vsphere-problem-detector-operator.adoc @@ -14,12 +14,12 @@ The {operator-name} checks clusters that are deployed on vSphere for common inst The {operator-name} is only started by the Cluster Storage Operator when the Cluster Storage Operator detects that the cluster is deployed on vSphere. ==== -[discrete] + == Configuration No configuration is required. -[discrete] + == Notes * The Operator supports {product-title} installations on vSphere. diff --git a/modules/ztp-ztp-building-blocks.adoc b/modules/ztp-ztp-building-blocks.adoc index 5c07abd5b45f..b5afa92a2c37 100644 --- a/modules/ztp-ztp-building-blocks.adoc +++ b/modules/ztp-ztp-building-blocks.adoc @@ -20,7 +20,7 @@ The deployment of the clusters includes: * Deploying profile Operators and performing any needed software-related configuration, such as performance profile, PTP, and SR-IOV -[discrete] + [id="ztp-overview-managed-site-installation-process_{context}"] == Overview of the managed site installation process diff --git a/networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-ingress-controller.adoc b/networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-ingress-controller.adoc index bce0fc59978f..ee2cb3cf1812 100644 --- a/networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-ingress-controller.adoc +++ b/networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-ingress-controller.adoc @@ -73,7 +73,7 @@ include::modules/nw-ingress-sharding-namespace-labels.adoc[leveloffset=+2] // Creating a route for Ingress Controller sharding include::modules/nw-ingress-sharding-route-configuration.adoc[leveloffset=+2] -[discrete] + [id="additional-resources_ingress-sharding"] === Additional resources diff --git a/networking/enable-cluster-wide-proxy.adoc b/networking/enable-cluster-wide-proxy.adoc index 29d1a907193f..5a04119d732b 100644 --- a/networking/enable-cluster-wide-proxy.adoc +++ b/networking/enable-cluster-wide-proxy.adoc @@ -68,7 +68,7 @@ include::modules/nw-proxy-remove.adoc[leveloffset=+1] // Verifying the cluster-wide proxy configuration include::modules/nw-verify-proxy-configuration.adoc[leveloffset=+1] -[discrete] + [role="_additional-resources"] == Additional resources diff --git a/networking/k8s_nmstate/k8s-nmstate-updating-node-network-config.adoc b/networking/k8s_nmstate/k8s-nmstate-updating-node-network-config.adoc index ca1596a6f37b..c752c01bed6c 100644 --- a/networking/k8s_nmstate/k8s-nmstate-updating-node-network-config.adoc +++ b/networking/k8s_nmstate/k8s-nmstate-updating-node-network-config.adoc @@ -17,7 +17,7 @@ include::modules/virt-viewing-network-state-of-node-console.adoc[leveloffset=+1] // The `NodeNetworkConfigurationPolicy` manifest file include::modules/node-network-configuration-policy-file.adoc[leveloffset=+1] -[discrete] + [role="_additional-resources"] == Additional resources * xref:../../networking/k8s_nmstate/k8s-nmstate-updating-node-network-config.adoc#virt-nmstate-example-policy-configurations_{context}[Example policy configurations for different interfaces] @@ -37,7 +37,7 @@ include::modules/virt-delete-node-network-config.adoc[leveloffset=+2] == Managing policy by using the CLI include::modules/virt-creating-interface-on-nodes.adoc[leveloffset=+2] -[discrete] + [role="_additional-resources"] == Additional resources * xref:../../networking/k8s_nmstate/k8s-nmstate-updating-node-network-config.adoc#virt-example-nmstate-multiple-interfaces_{context}[Example for creating multiple interfaces in the same policy] diff --git a/nodes/index.adoc b/nodes/index.adoc index fe1323080547..e94a197c8c8c 100644 --- a/nodes/index.adoc +++ b/nodes/index.adoc @@ -29,7 +29,7 @@ DNS:: Cluster DNS is a DNS server which serves DNS records for Kubernetes servic image::295_OpenShift_Nodes_Overview_1222.png[Overview of control plane and worker node] -[discrete] + === Read operations The read operations allow an administrator or a developer to get information about nodes in an {product-title} cluster. @@ -39,7 +39,7 @@ The read operations allow an administrator or a developer to get information abo * xref:../nodes/nodes/nodes-nodes-viewing.adoc#nodes-nodes-viewing-listing-pods_nodes-nodes-viewing[List pods running on a node]. ifndef::openshift-rosa,openshift-dedicated[] -[discrete] + === Management operations As an administrator, you can easily manage a node in an {product-title} cluster @@ -54,7 +54,7 @@ through several tasks: * xref:../nodes/nodes/nodes-nodes-working.adoc#deleting-nodes[Delete a node from a cluster] by scaling down the cluster using a compute machine set. To delete a node from a bare-metal cluster, you must first drain all pods on the node and then manually delete the node. endif::openshift-rosa,openshift-dedicated[] -[discrete] + === Enhancement operations {product-title} allows you to do more than just access and manage nodes; as an administrator, you can perform the following tasks on nodes to make the cluster more efficient, application-friendly, and to provide a better environment for your developers. @@ -75,7 +75,7 @@ endif::openshift-rosa,openshift-dedicated[] A pod is one or more containers deployed together on a node. As a cluster administrator, you can define a pod, assign it to run on a healthy node that is ready for scheduling, and manage. A pod runs as long as the containers are running. You cannot change a pod once it is defined and is running. Some operations you can perform when working with pods are: -[discrete] + === Read operations As an administrator, you can get information about pods in a project through the following tasks: @@ -83,7 +83,6 @@ As an administrator, you can get information about pods in a project through the * xref:../nodes/pods/nodes-pods-viewing.adoc#nodes-pods-viewing-project_nodes-pods-viewing[List pods associated with a project], including information such as the number of replicas and restarts, current status, and age. * xref:../nodes/pods/nodes-pods-viewing.adoc#nodes-pods-viewing-usage_nodes-pods-viewing[View pod usage statistics] such as CPU, memory, and storage consumption. -[discrete] === Management operations The following list of tasks provides an overview of how an administrator can manage pods in an {product-title} cluster. @@ -102,7 +101,7 @@ endif::openshift-rosa,openshift-dedicated[] * xref:../nodes/pods/nodes-pods-configuring.adoc#nodes-pods-configuring-bandwidth_nodes-pods-configuring[Limit both egress and ingress traffic on a pod]. * xref:../nodes/containers/nodes-containers-volumes.adoc#nodes-containers-volumes[Add and remove volumes to and from any object that has a pod template]. A volume is a mounted file system available to all the containers in a pod. Container storage is ephemeral; you can use volumes to persist container data. -[discrete] + === Enhancement operations You can work with pods more easily and efficiently with the help of various tools and features available in {product-title}. The following operations involve using those tools and features to better manage pods. @@ -183,76 +182,76 @@ endif::openshift-rosa,openshift-dedicated[] This glossary defines common terms that are used in the _node_ content. -[discrete] + [id="commonterms-node-container"] Container:: It is a lightweight and executable image that comprises software and all its dependencies. Containers virtualize the operating system, as a result, you can run containers anywhere from a data center to a public or private cloud to even a developer's laptop. -[discrete] + [id="commonterms-node-daemonset"] Daemon set:: Ensures that a replica of the pod runs on eligible nodes in an {product-title} cluster. -[discrete] + [id="commonterms-node-egress"] egress:: The process of data sharing externally through a network’s outbound traffic from a pod. -[discrete] + [id="commonterms-node-gc"] garbage collection:: The process of cleaning up cluster resources, such as terminated containers and images that are not referenced by any running pods. //cannot create the required namespace for these operators ifndef::openshift-rosa,openshift-dedicated[] -[discrete] + [id="commonterms-node-hpa"] Horizontal Pod Autoscaler(HPA):: Implemented as a Kubernetes API resource and a controller. You can use the HPA to specify the minimum and maximum number of pods that you want to run. You can also specify the CPU or memory utilization that your pods should target. The HPA scales out and scales in pods when a given CPU or memory threshold is crossed. endif::openshift-rosa,openshift-dedicated[] -[discrete] + [id="commonterms-node-ingress"] Ingress:: Incoming traffic to a pod. -[discrete] + [id="commonterms-node-job"] Job:: A process that runs to completion. A job creates one or more pod objects and ensures that the specified pods are successfully completed. -[discrete] + [id="commonterms-node-label"] Labels:: You can use labels, which are key-value pairs, to organise and select subsets of objects, such as a pod. -[discrete] + [id="commonterms-node-nodenew"] Node:: A worker machine in the {product-title} cluster. A node can be either be a virtual machine (VM) or a physical machine. -[discrete] + [id="commonterms-node-tuningop"] Node Tuning Operator:: You can use the Node Tuning Operator to manage node-level tuning by using the TuneD daemon. It ensures custom tuning specifications are passed to all containerized TuneD daemons running in the cluster in the format that the daemons understand. The daemons run on all nodes in the cluster, one per node. -[discrete] + [id="commonterms-node-self-remediationop"] Self Node Remediation Operator:: The Operator runs on the cluster nodes and identifies and reboots nodes that are unhealthy. -[discrete] + [id="commonterms-node-podnew"] Pod:: One or more containers with shared resources, such as volume and IP addresses, running in your {product-title} cluster. A pod is the smallest compute unit defined, deployed, and managed. -[discrete] + [id="commonterms-node-toleration"] Toleration:: Indicates that the pod is allowed (but not required) to be scheduled on nodes or node groups with matching taints. You can use tolerations to enable the scheduler to schedule pods with matching taints. -[discrete] + [id="commonterms-node-taint"] Taint:: A core object that comprises a key,value, and effect. Taints and tolerations work together to ensure that pods are not scheduled on irrelevant nodes. diff --git a/openshift_images/configuring-samples-operator.adoc b/openshift_images/configuring-samples-operator.adoc index 9416855d47df..610a245ec39e 100644 --- a/openshift_images/configuring-samples-operator.adoc +++ b/openshift_images/configuring-samples-operator.adoc @@ -50,7 +50,7 @@ endif::openshift-rosa,openshift-dedicated[] include::modules/samples-operator-overview.adoc[leveloffset=+1] -[discrete] + [role="_additional-resources"] == Additional resources @@ -77,7 +77,7 @@ endif::openshift-rosa,openshift-dedicated[] include::modules/images-samples-operator-deprecated-image-stream.adoc[leveloffset=+1] -[discrete] + [role="_additional-resources"] == Additional resources diff --git a/operators/operator-reference.adoc b/operators/operator-reference.adoc index d2362fb76d13..3cd96929570d 100644 --- a/operators/operator-reference.adoc +++ b/operators/operator-reference.adoc @@ -28,7 +28,7 @@ include::modules/baremetal-event-relay.adoc[leveloffset=+1] include::modules/cloud-credential-operator.adoc[leveloffset=+1] [role="_additional-resources"] -[discrete] + [id="additional-resources_cluster-op-ref-cco"] === Additional resources * xref:../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc#about-cloud-credential-operator[About the Cloud Credential Operator] @@ -86,7 +86,7 @@ include::modules/console-operator.adoc[leveloffset=+1] include::modules/control-plane-machine-set-operator.adoc[leveloffset=+1] [role="_additional-resources"] -[discrete] + [id="additional-resources_cluster-op-ref-cpmso"] === Additional resources @@ -126,7 +126,7 @@ include::modules/operator-marketplace.adoc[leveloffset=+1] include::modules/node-tuning-operator.adoc[leveloffset=+1] -[discrete] + [role="_additional-resources"] [id="cluster-operators-ref-nto-addtl-resources"] === Additional resources @@ -138,19 +138,19 @@ include::modules/cluster-openshift-controller-manager-operators.adoc[leveloffset [id="cluster-operators-ref-olm"] == Operator Lifecycle Manager Operators -[discrete] + include::modules/olm-overview.adoc[leveloffset=+2] -[discrete] + include::modules/olm-architecture.adoc[leveloffset=+2] -[discrete] + include::modules/olm-arch-olm-operator.adoc[leveloffset=+2] -[discrete] + include::modules/olm-arch-catalog-operator.adoc[leveloffset=+2] -[discrete] + include::modules/olm-arch-catalog-registry.adoc[leveloffset=+2] [role="_additional-resources"] -[discrete] + [id="cluster-operators-ref-olm-addtl-resources"] === Additional resources * For more information, see the sections on xref:../operators/understanding/olm/olm-understanding-olm.adoc#olm-understanding-olm[understanding Operator Lifecycle Manager (OLM)]. diff --git a/rest_api/objects/index.adoc b/rest_api/objects/index.adoc index b2ff1218cb3d..53796d5802ef 100644 --- a/rest_api/objects/index.adoc +++ b/rest_api/objects/index.adoc @@ -23,7 +23,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -64,7 +64,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -105,7 +105,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -146,7 +146,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -187,7 +187,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -228,7 +228,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -269,7 +269,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -310,7 +310,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -351,7 +351,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -392,7 +392,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -433,7 +433,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -474,7 +474,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -515,7 +515,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -556,7 +556,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -597,7 +597,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -638,7 +638,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -681,7 +681,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -724,7 +724,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -767,7 +767,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -810,7 +810,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -853,7 +853,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -896,7 +896,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -939,7 +939,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -982,7 +982,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1025,7 +1025,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1068,7 +1068,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1111,7 +1111,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1154,7 +1154,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1197,7 +1197,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1240,7 +1240,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1283,7 +1283,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1326,7 +1326,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1369,7 +1369,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1412,7 +1412,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1455,7 +1455,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1498,7 +1498,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1541,7 +1541,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1584,7 +1584,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1627,7 +1627,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1670,7 +1670,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1713,7 +1713,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1756,7 +1756,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1810,7 +1810,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1843,7 +1843,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1877,7 +1877,7 @@ Required:: - `type` - `supported` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1910,7 +1910,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1951,7 +1951,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -1992,7 +1992,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2033,7 +2033,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2074,7 +2074,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2115,7 +2115,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2156,7 +2156,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2197,7 +2197,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2238,7 +2238,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2279,7 +2279,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2320,7 +2320,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2361,7 +2361,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2402,7 +2402,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2443,7 +2443,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2484,7 +2484,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2525,7 +2525,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2566,7 +2566,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2607,7 +2607,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2648,7 +2648,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2689,7 +2689,7 @@ Type:: Required:: - `driver` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2734,7 +2734,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2775,7 +2775,7 @@ Type:: Required:: - `name` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2812,7 +2812,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2851,7 +2851,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2884,7 +2884,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2923,7 +2923,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2953,7 +2953,7 @@ Required:: - `type` - `status` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -2998,7 +2998,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -3039,7 +3039,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -3078,7 +3078,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -3129,7 +3129,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -3497,7 +3497,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -3538,7 +3538,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -3577,7 +3577,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -3731,7 +3731,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -3772,7 +3772,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -3811,7 +3811,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -3844,7 +3844,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -3885,7 +3885,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -3924,7 +3924,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -3959,7 +3959,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -3990,7 +3990,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4029,7 +4029,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4082,7 +4082,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4123,7 +4123,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4164,7 +4164,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4205,7 +4205,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4244,7 +4244,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4296,7 +4296,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4326,7 +4326,7 @@ Required:: - `kind` - `name` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4363,7 +4363,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4404,7 +4404,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4445,7 +4445,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4486,7 +4486,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4527,7 +4527,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4568,7 +4568,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4609,7 +4609,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4650,7 +4650,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4691,7 +4691,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4730,7 +4730,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4759,7 +4759,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4800,7 +4800,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4841,7 +4841,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4882,7 +4882,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4923,7 +4923,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -4964,7 +4964,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5005,7 +5005,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5046,7 +5046,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5087,7 +5087,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5128,7 +5128,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5169,7 +5169,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5208,7 +5208,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5499,7 +5499,7 @@ Required:: - `reason` - `message` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5546,7 +5546,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5601,7 +5601,7 @@ Required:: - `version` - `kind` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5636,7 +5636,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5667,7 +5667,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5721,7 +5721,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5833,7 +5833,7 @@ Type:: `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -5906,7 +5906,7 @@ Required:: - `type` - `object` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6010,7 +6010,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6051,7 +6051,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6092,7 +6092,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6133,7 +6133,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6174,7 +6174,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6215,7 +6215,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6256,7 +6256,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6297,7 +6297,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6338,7 +6338,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6379,7 +6379,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6420,7 +6420,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6461,7 +6461,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6502,7 +6502,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6543,7 +6543,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6584,7 +6584,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6625,7 +6625,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6666,7 +6666,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6707,7 +6707,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6748,7 +6748,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6789,7 +6789,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6830,7 +6830,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6871,7 +6871,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6912,7 +6912,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6953,7 +6953,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -6994,7 +6994,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7035,7 +7035,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7076,7 +7076,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7117,7 +7117,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7158,7 +7158,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7199,7 +7199,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7240,7 +7240,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7281,7 +7281,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7322,7 +7322,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7363,7 +7363,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7404,7 +7404,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7445,7 +7445,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7486,7 +7486,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7527,7 +7527,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7568,7 +7568,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7609,7 +7609,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7650,7 +7650,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7691,7 +7691,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7732,7 +7732,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7773,7 +7773,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7814,7 +7814,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7855,7 +7855,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7896,7 +7896,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7937,7 +7937,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -7978,7 +7978,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8019,7 +8019,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8060,7 +8060,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8101,7 +8101,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8142,7 +8142,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8183,7 +8183,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8224,7 +8224,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8265,7 +8265,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8306,7 +8306,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8347,7 +8347,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8388,7 +8388,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8429,7 +8429,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8470,7 +8470,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8511,7 +8511,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8552,7 +8552,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8593,7 +8593,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8634,7 +8634,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8675,7 +8675,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8716,7 +8716,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8757,7 +8757,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8798,7 +8798,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8839,7 +8839,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8880,7 +8880,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8921,7 +8921,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -8962,7 +8962,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -9003,7 +9003,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -9044,7 +9044,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -9085,7 +9085,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -9126,7 +9126,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -9167,7 +9167,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -9208,7 +9208,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -9249,7 +9249,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -9290,7 +9290,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -9331,7 +9331,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -9372,7 +9372,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -9413,7 +9413,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -9454,7 +9454,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -9495,7 +9495,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -9536,7 +9536,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -9577,7 +9577,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -9618,7 +9618,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -9659,7 +9659,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -9700,7 +9700,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -9741,7 +9741,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -9782,7 +9782,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -9823,7 +9823,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -9864,7 +9864,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -9905,7 +9905,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -9946,7 +9946,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -9987,7 +9987,7 @@ Type:: Required:: - `items` -[discrete] + === Schema [cols="1,1,1",options="header"] @@ -10011,4 +10011,3 @@ Required:: | Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds |=== - diff --git a/scalability_and_performance/index.adoc b/scalability_and_performance/index.adoc index dc11d250ba42..e1566167fdb8 100644 --- a/scalability_and_performance/index.adoc +++ b/scalability_and_performance/index.adoc @@ -16,7 +16,7 @@ To contact Red Hat support, see xref:../support/getting-support.adoc#getting-sup Some performance and scalability Operators have release cycles that are independent from {product-title} release cycles. For more information, see link:access.redhat.com/support/policy/updates/openshift_operators[OpenShift Operators]. ==== -[discrete] + == Recommended performance and scalability practices xref:../scalability_and_performance/recommended-performance-scale-practices/recommended-control-plane-practices.adoc#recommended-control-plane-practices[Recommended control plane practices] @@ -25,12 +25,11 @@ xref:../scalability_and_performance/recommended-performance-scale-practices/reco xref:../scalability_and_performance/recommended-performance-scale-practices/recommended-etcd-practices.adoc#recommended-etcd-practices[Recommended etcd practices] -[discrete] == Telco reference design specifications xref:../scalability_and_performance/telco_ref_design_specs/ran/telco-ran-ref-design-spec.adoc#telco-ran-architecture-overview_ran-ref-design-spec[Telco RAN DU specification] -[discrete] + == Planning, optimization, and measurement xref:../scalability_and_performance/planning-your-environment-according-to-object-maximums.adoc#cluster-maximums-major-releases_object-limits[Planning your environment according to object maximums] diff --git a/security/certificate_types_descriptions/etcd-certificates.adoc b/security/certificate_types_descriptions/etcd-certificates.adoc index 7be1ffc1c387..e115c043404b 100644 --- a/security/certificate_types_descriptions/etcd-certificates.adoc +++ b/security/certificate_types_descriptions/etcd-certificates.adoc @@ -27,7 +27,7 @@ etcd certificates are used for encrypted communication between etcd member peers * Server certificates: Used by the etcd server for authenticating client requests. * Metric certificates: All metric consumers connect to proxy with metric-client certificates. -[discrete] + [role="_additional-resources"] == Additional resources diff --git a/security/certificate_types_descriptions/node-certificates.adoc b/security/certificate_types_descriptions/node-certificates.adoc index b4942f1bef82..5ba3309ddabd 100644 --- a/security/certificate_types_descriptions/node-certificates.adoc +++ b/security/certificate_types_descriptions/node-certificates.adoc @@ -33,7 +33,7 @@ Cluster administrators can manually renew the kubelet CA certificate by running $ oc annotate -n openshift-kube-apiserver-operator secret kube-apiserver-to-kubelet-signer auth.openshift.io/certificate-not-after- ---- -[discrete] + [role="_additional-resources"] == Additional resources diff --git a/security/certificate_types_descriptions/proxy-certificates.adoc b/security/certificate_types_descriptions/proxy-certificates.adoc index 50622d1076f5..b6337b65f02b 100644 --- a/security/certificate_types_descriptions/proxy-certificates.adoc +++ b/security/certificate_types_descriptions/proxy-certificates.adoc @@ -28,7 +28,7 @@ data: -----END CERTIFICATE----- ---- -[discrete] + [role="_additional-resources"] === Additional resources diff --git a/security/certificate_types_descriptions/service-ca-certificates.adoc b/security/certificate_types_descriptions/service-ca-certificates.adoc index e6041ab81afa..5d4243d200cb 100644 --- a/security/certificate_types_descriptions/service-ca-certificates.adoc +++ b/security/certificate_types_descriptions/service-ca-certificates.adoc @@ -57,7 +57,7 @@ Services that use service CA certificates include: This is not a comprehensive list. -[discrete] + [role="_additional-resources"] == Additional resources diff --git a/security/certificate_types_descriptions/user-provided-certificates-for-api-server.adoc b/security/certificate_types_descriptions/user-provided-certificates-for-api-server.adoc index 7b8eca667216..5371585d7fed 100644 --- a/security/certificate_types_descriptions/user-provided-certificates-for-api-server.adoc +++ b/security/certificate_types_descriptions/user-provided-certificates-for-api-server.adoc @@ -28,7 +28,7 @@ User-provided certificates are managed by the user. Update the secret containing the user-managed certificate as needed. -[discrete] + [role="_additional-resources"] == Additional resources diff --git a/security/certificate_types_descriptions/user-provided-certificates-for-default-ingress.adoc b/security/certificate_types_descriptions/user-provided-certificates-for-default-ingress.adoc index 9d840308c609..c73998e9e9d5 100644 --- a/security/certificate_types_descriptions/user-provided-certificates-for-default-ingress.adoc +++ b/security/certificate_types_descriptions/user-provided-certificates-for-default-ingress.adoc @@ -35,7 +35,7 @@ Applications deployed on the cluster use user-provided certificates for default Update the secret containing the user-managed certificate as needed. -[discrete] + [role="_additional-resources"] == Additional resources diff --git a/security/certificates/replacing-default-ingress-certificate.adoc b/security/certificates/replacing-default-ingress-certificate.adoc index d97e13c3d846..38cb84480280 100644 --- a/security/certificates/replacing-default-ingress-certificate.adoc +++ b/security/certificates/replacing-default-ingress-certificate.adoc @@ -10,7 +10,7 @@ include::modules/customize-certificates-understanding-default-router.adoc[levelo include::modules/customize-certificates-replace-default-router.adoc[leveloffset=+1] -[discrete] + [role="_additional-resources"] == Additional resources diff --git a/security/certificates/updating-ca-bundle.adoc b/security/certificates/updating-ca-bundle.adoc index 96ad6e686adf..52de8cc7e4ae 100644 --- a/security/certificates/updating-ca-bundle.adoc +++ b/security/certificates/updating-ca-bundle.adoc @@ -11,7 +11,7 @@ include::modules/ca-bundle-understanding.adoc[leveloffset=+1] include::modules/ca-bundle-replacing.adoc[leveloffset=+1] -[discrete] + [role="_additional-resources"] == Additional resources diff --git a/security/index.adoc b/security/index.adoc index c68a66f5b2ff..366a40d60b2f 100644 --- a/security/index.adoc +++ b/security/index.adoc @@ -13,7 +13,7 @@ toc::[] It is important to understand how to properly secure various aspects of your {product-title} cluster. -[discrete] + [id="container-security"] === Container security @@ -28,13 +28,13 @@ which are provided by {product-title}. * How networking and attached storage are secured in {product-title}. * Containerized solutions for API management and SSO. -[discrete] + [id="auditing"] === Auditing {product-title} auditing provides a security-relevant chronological set of records documenting the sequence of activities that have affected the system by individual users, administrators, or other components of the system. Administrators can xref:../security/audit-log-policy-config.adoc#audit-log-policy-config[configure the audit log policy] and xref:../security/audit-log-view.adoc#audit-log-view[view audit logs]. -[discrete] + [id="certificates"] === Certificates @@ -57,13 +57,13 @@ You can also review more details about the types of certificates used by the clu * xref:../security/certificate_types_descriptions/monitoring-and-cluster-logging-operator-component-certificates.adoc#cert-types-monitoring-and-cluster-logging-operator-component-certificates[Monitoring and cluster logging Operator component certificates] * xref:../security/certificate_types_descriptions/control-plane-certificates.adoc#cert-types-control-plane-certificates[Control plane certificates] -[discrete] + [id="encrypting-data"] === Encrypting data You can xref:../security/encrypting-etcd.adoc#encrypting-etcd[enable etcd encryption] for your cluster to provide an additional layer of data security. For example, it can help protect the loss of sensitive data if an etcd backup is exposed to the incorrect parties. -[discrete] + [id="vulnerability-scanning"] === Vulnerability scanning @@ -74,13 +74,13 @@ Administrators can use the {rhq-cso} to run xref:../security/pod-vulnerability-s For many {product-title} customers, regulatory readiness, or compliance, on some level is required before any systems can be put into production. That regulatory readiness can be imposed by national standards, industry standards, or the organization's corporate governance framework. -[discrete] + [id="compliance-checking"] === Compliance checking Administrators can use the xref:../security/compliance_operator/co-concepts/compliance-operator-understanding.adoc#understanding-compliance-operator[Compliance Operator] to run compliance scans and recommend remediations for any issues found. The xref:../security/compliance_operator/co-scans/oc-compliance-plug-in-using.adoc#using-oc-compliance-plug-in[`oc-compliance` plugin] is an OpenShift CLI (`oc`) plugin that provides a set of utilities to easily interact with the Compliance Operator. -[discrete] + [id="file-integrity-checking"] === File integrity checking diff --git a/security/security_profiles_operator/spo-release-notes.adoc b/security/security_profiles_operator/spo-release-notes.adoc index f2a70e9588ab..b3ea2b1f5f04 100644 --- a/security/security_profiles_operator/spo-release-notes.adoc +++ b/security/security_profiles_operator/spo-release-notes.adoc @@ -123,7 +123,7 @@ SPO memory optimization is not enabled by default. * Previously, a Security Profiles Operator (SPO) SELinux policy did not inherit low-level policy definitions from the container template. If you selected another template, such as net_container, the policy would not work because it required low-level policy definitions that only existed in the container template. This issue occurred when the SPO SELinux policy attempted to translate SELinux policies from the SPO custom format to the Common Intermediate Language (CIL) format. With this update, the container template appends to any SELinux policies that require translation from SPO to CIL. Additionally, the SPO SELinux policy can inherit low-level policy definitions from any supported policy template. (link:https://issues.redhat.com/browse/OCPBUGS-12879[*OCPBUGS-12879*]) -[discrete] + [id="spo-0-7-1-known-issue"] === Known issue @@ -138,7 +138,7 @@ The following advisory is available for the Security Profiles Operator 0.5.2: This update addresses a CVE in an underlying dependency. -[discrete] + [id="spo-0-5-2-known-issue"] === Known issue @@ -151,7 +151,7 @@ The following advisory is available for the Security Profiles Operator 0.5.0: * link:https://access.redhat.com/errata/RHBA-2022:8762[RHBA-2022:8762 - OpenShift Security Profiles Operator bug fix update] -[discrete] + [id="spo-0-5-0-known-issue"] === Known issue diff --git a/security/security_profiles_operator/spo-seccomp.adoc b/security/security_profiles_operator/spo-seccomp.adoc index ae4373bd75d8..898bad10d8df 100644 --- a/security/security_profiles_operator/spo-seccomp.adoc +++ b/security/security_profiles_operator/spo-seccomp.adoc @@ -23,7 +23,7 @@ include::modules/spo-recording-profiles.adoc[leveloffset=+1] include::modules/spo-container-profile-instances.adoc[leveloffset=+2] -[discrete] + [role="_additional-resources"] [id="additional-resources_spo-seccomp"] == Additional resources diff --git a/security/security_profiles_operator/spo-selinux.adoc b/security/security_profiles_operator/spo-selinux.adoc index c017003205a5..e57d36830054 100644 --- a/security/security_profiles_operator/spo-selinux.adoc +++ b/security/security_profiles_operator/spo-selinux.adoc @@ -29,7 +29,7 @@ include::modules/spo-container-profile-instances.adoc[leveloffset=+2] include::modules/spo-selinux-runasany.adoc[leveloffset=+2] -[discrete] + [role="_additional-resources"] [id="additional-resources_spo-selinux"] == Additional resources diff --git a/support/gathering-cluster-data.adoc b/support/gathering-cluster-data.adoc index 95c633e2be00..60fb2bc624f8 100644 --- a/support/gathering-cluster-data.adoc +++ b/support/gathering-cluster-data.adoc @@ -92,9 +92,9 @@ endif::openshift-origin[] include::modules/about-toolbox.adoc[leveloffset=+1] // Installing packages to a toolbox container -[discrete] + include::modules/support-installing-packages-to-a-toolbox-container.adoc[leveloffset=+2] // Starting an alternative image with toolbox -[discrete] + include::modules/support-starting-an-alternative-image-with-toolbox.adoc[leveloffset=+2] diff --git a/support/troubleshooting/troubleshooting-operating-system-issues.adoc b/support/troubleshooting/troubleshooting-operating-system-issues.adoc index f5cf34a39988..7f647497b440 100644 --- a/support/troubleshooting/troubleshooting-operating-system-issues.adoc +++ b/support/troubleshooting/troubleshooting-operating-system-issues.adoc @@ -42,7 +42,7 @@ endif::[] It is recommended to perform vmcore analysis on a separate {op-system-base} system. ==== -[discrete] + [role="_additional-resources"] [id="additional-resources_investigating-kernel-crashes"] === Additional resources diff --git a/updating/updating_a_cluster/update-using-custom-machine-config-pools.adoc b/updating/updating_a_cluster/update-using-custom-machine-config-pools.adoc index 5d56acd7c998..94b96dd09ee3 100644 --- a/updating/updating_a_cluster/update-using-custom-machine-config-pools.adoc +++ b/updating/updating_a_cluster/update-using-custom-machine-config-pools.adoc @@ -33,7 +33,7 @@ The previous values are an example only. The time it takes to drain a node might vary depending on factors such as workloads. ==== -[discrete] + [id="defining-custom-mcps_{context}"] === Defining custom machine config pools @@ -45,7 +45,7 @@ In order to organize the worker node updates into separate stages, you can begin * *workerpool-C* with 30 nodes -[discrete] + [id="updating-canary-worker-pool_{context}"] === Updating the canary worker pool @@ -53,7 +53,7 @@ During your first maintenance window, you pause the MCPs for *workerpool-A*, *wo This updates components that run on top of {product-title} and the 10 nodes that are part of the unpaused *workerpool-canary* MCP. The other three MCPs are not updated because they were paused. -[discrete] + [id="determining-remaining-worker-pools_{context}"] === Determining whether to proceed with the remaining worker pool updates @@ -92,7 +92,7 @@ Draining and cordoning a node deschedules all pods on the node and marks the nod After the node is drained, the Machine Config Daemon applies a new machine configuration, which can include updating the operating system (OS). Updating the OS requires the host to reboot. -[discrete] + [id="using-custom-mcps_{context}"] === Using custom machine config pools @@ -112,7 +112,7 @@ The default setting for `maxUnavailable` is `1` for all the machine config pools To ensure the stability of the control plane, creating a custom MCP from the control plane nodes is not supported. The Machine Config Operator (MCO) ignores any custom MCP created for the control plane nodes. ==== -[discrete] + [id="custom-mcp-considerations_{context}"] === Considerations when using custom machine config pools diff --git a/web_console/dynamic-plugin/overview-dynamic-plugin.adoc b/web_console/dynamic-plugin/overview-dynamic-plugin.adoc index ad80184d37d5..a3d475064ec7 100644 --- a/web_console/dynamic-plugin/overview-dynamic-plugin.adoc +++ b/web_console/dynamic-plugin/overview-dynamic-plugin.adoc @@ -40,7 +40,7 @@ conster Header: React.FC = () => { * Avoid selectors that could affect markup outside of your plugins components, such as element selectors. These are not APIs and are subject to change. Using them might break your plugin. Avoid selectors like element selectors that could affect markup outside of your plugins components. -[discrete] + == PatternFly guidelines When creating your plugin, follow these guidelines for using PatternFly: diff --git a/web_console/web_terminal/installing-web-terminal.adoc b/web_console/web_terminal/installing-web-terminal.adoc index f8a954095a22..39d89bfec679 100644 --- a/web_console/web_terminal/installing-web-terminal.adoc +++ b/web_console/web_terminal/installing-web-terminal.adoc @@ -8,14 +8,14 @@ toc::[] You can install the web terminal by using the {web-terminal-op} listed in the {product-title} OperatorHub. When you install the {web-terminal-op}, the custom resource definitions (CRDs) that are required for the command line configuration, such as the `DevWorkspace` CRD, are automatically installed. The web console creates the required resources when you open the web terminal. -[discrete] + [id="prerequisites_installing-web-terminal"] == Prerequisites * You are logged into the {product-title} web console. * You have cluster administrator permissions. -[discrete] + [id="installing-web-terminal-procedure"] == Procedure diff --git a/windows_containers/creating_windows_machinesets/creating-windows-machineset-aws.adoc b/windows_containers/creating_windows_machinesets/creating-windows-machineset-aws.adoc index 246709d379e5..c926bd13734c 100644 --- a/windows_containers/creating_windows_machinesets/creating-windows-machineset-aws.adoc +++ b/windows_containers/creating_windows_machinesets/creating-windows-machineset-aws.adoc @@ -8,7 +8,7 @@ toc::[] You can create a Windows `MachineSet` object to serve a specific purpose in your {product-title} cluster on Amazon Web Services (AWS). For example, you might create infrastructure Windows machine sets and related machines so that you can move supporting Windows workloads to the new Windows machines. -[discrete] + == Prerequisites * You installed the Windows Machine Config Operator (WMCO) using Operator Lifecycle Manager (OLM). diff --git a/windows_containers/creating_windows_machinesets/creating-windows-machineset-azure.adoc b/windows_containers/creating_windows_machinesets/creating-windows-machineset-azure.adoc index 5d3703e6c5e8..fd7c890cbdc7 100644 --- a/windows_containers/creating_windows_machinesets/creating-windows-machineset-azure.adoc +++ b/windows_containers/creating_windows_machinesets/creating-windows-machineset-azure.adoc @@ -8,7 +8,7 @@ toc::[] You can create a Windows `MachineSet` object to serve a specific purpose in your {product-title} cluster on Microsoft Azure. For example, you might create infrastructure Windows machine sets and related machines so that you can move supporting Windows workloads to the new Windows machines. -[discrete] + == Prerequisites * You installed the Windows Machine Config Operator (WMCO) using Operator Lifecycle Manager (OLM). diff --git a/windows_containers/creating_windows_machinesets/creating-windows-machineset-gcp.adoc b/windows_containers/creating_windows_machinesets/creating-windows-machineset-gcp.adoc index 2e34751c39fd..35ed8735e092 100644 --- a/windows_containers/creating_windows_machinesets/creating-windows-machineset-gcp.adoc +++ b/windows_containers/creating_windows_machinesets/creating-windows-machineset-gcp.adoc @@ -8,7 +8,7 @@ toc::[] You can create a Windows `MachineSet` object to serve a specific purpose in your {product-title} cluster on Google Cloud Platform (GCP). For example, you might create infrastructure Windows machine sets and related machines so that you can move supporting Windows workloads to the new Windows machines. -[discrete] + == Prerequisites * You installed the Windows Machine Config Operator (WMCO) using Operator Lifecycle Manager (OLM). diff --git a/windows_containers/creating_windows_machinesets/creating-windows-machineset-nutanix.adoc b/windows_containers/creating_windows_machinesets/creating-windows-machineset-nutanix.adoc index d86b338692c1..56fe72a30b23 100644 --- a/windows_containers/creating_windows_machinesets/creating-windows-machineset-nutanix.adoc +++ b/windows_containers/creating_windows_machinesets/creating-windows-machineset-nutanix.adoc @@ -8,7 +8,7 @@ toc::[] You can create a Windows `MachineSet` object to serve a specific purpose in your {product-title} cluster on Nutanix. For example, you might create infrastructure Windows machine sets and related machines so that you can move supporting Windows workloads to the new Windows machines. -[discrete] + == Prerequisites * You installed the Windows Machine Config Operator (WMCO) using Operator Lifecycle Manager (OLM). diff --git a/windows_containers/creating_windows_machinesets/creating-windows-machineset-vsphere.adoc b/windows_containers/creating_windows_machinesets/creating-windows-machineset-vsphere.adoc index d0b0b6ce2305..add8ffb814cf 100644 --- a/windows_containers/creating_windows_machinesets/creating-windows-machineset-vsphere.adoc +++ b/windows_containers/creating_windows_machinesets/creating-windows-machineset-vsphere.adoc @@ -8,7 +8,7 @@ toc::[] You can create a Windows `MachineSet` object to serve a specific purpose in your {product-title} cluster on VMware vSphere. For example, you might create infrastructure Windows machine sets and related machines so that you can move supporting Windows workloads to the new Windows machines. -[discrete] + == Prerequisites * You installed the Windows Machine Config Operator (WMCO) using Operator Lifecycle Manager (OLM). diff --git a/windows_containers/disabling-windows-container-workloads.adoc b/windows_containers/disabling-windows-container-workloads.adoc index d1b3aeb4e19b..2e8a3193684c 100644 --- a/windows_containers/disabling-windows-container-workloads.adoc +++ b/windows_containers/disabling-windows-container-workloads.adoc @@ -12,7 +12,7 @@ include::modules/uninstalling-wmco.adoc[leveloffset=+1] include::modules/deleting-wmco-namespace.adoc[leveloffset=+1] -[discrete] + [role="_additional-resources"] == Additional resources diff --git a/windows_containers/enabling-windows-container-workloads.adoc b/windows_containers/enabling-windows-container-workloads.adoc index f76b8dfbf416..c5876da1b614 100644 --- a/windows_containers/enabling-windows-container-workloads.adoc +++ b/windows_containers/enabling-windows-container-workloads.adoc @@ -13,7 +13,7 @@ Before adding Windows workloads to your cluster, you must install the Windows Ma Dual NIC is not supported on WMCO-managed Windows instances. ==== -[discrete] + == Prerequisites * You have access to an {product-title} cluster using an account with `cluster-admin` permissions. diff --git a/windows_containers/scheduling-windows-workloads.adoc b/windows_containers/scheduling-windows-workloads.adoc index 7726e8516050..9c67af67e9fe 100644 --- a/windows_containers/scheduling-windows-workloads.adoc +++ b/windows_containers/scheduling-windows-workloads.adoc @@ -8,7 +8,7 @@ toc::[] You can schedule Windows workloads to Windows compute nodes. -[discrete] + == Prerequisites * You installed the Windows Machine Config Operator (WMCO) using Operator Lifecycle Manager (OLM). @@ -17,7 +17,7 @@ You can schedule Windows workloads to Windows compute nodes. include::modules/windows-pod-placement.adoc[leveloffset=+1] -[discrete] + [role="_additional-resources"] === Additional resources