From da17135d1abfff2a08403383741feea8bffed931 Mon Sep 17 00:00:00 2001 From: Andrea Hoffer Date: Tue, 11 Nov 2025 10:49:39 -0500 Subject: [PATCH] Callout rewrite examples --- .../ai-adding-worker-nodes-to-cluster.adoc | 45 ++++++----- ...de-workloads-using-taints-tolerations.adoc | 25 +++--- ...e-certificates-replace-default-router.adoc | 25 +++--- ...gcp-user-infra-shared-vpc-config-yaml.adoc | 81 ++++++++++--------- modules/installation-initializing.adoc | 25 +++--- modules/lws-config.adoc | 79 +++++++++++------- modules/nw-metallb-configure-svc.adoc | 18 ++--- modules/update-upgrading-cli.adoc | 8 +- modules/using-must-gather.adoc | 10 ++- 9 files changed, 183 insertions(+), 133 deletions(-) diff --git a/modules/ai-adding-worker-nodes-to-cluster.adoc b/modules/ai-adding-worker-nodes-to-cluster.adoc index 097423b49965..7eeac76d23ae 100644 --- a/modules/ai-adding-worker-nodes-to-cluster.adoc +++ b/modules/ai-adding-worker-nodes-to-cluster.adoc @@ -26,9 +26,10 @@ You can add worker nodes to clusters using the Assisted Installer REST API. + [source,terminal] ---- -$ export API_URL= <1> +$ export API_URL= ---- -<1> Replace `` with the Assisted Installer API URL, for example, `https://api.openshift.com` ++ +Replace `` with the Assisted Installer API URL, for example, `https://api.openshift.com` . Import the {sno} cluster by running the following commands: + @@ -44,14 +45,17 @@ $ export OPENSHIFT_CLUSTER_ID=$(oc get clusterversion -o jsonpath='{.items[].spe [source,terminal] ---- $ export CLUSTER_REQUEST=$(jq --null-input --arg openshift_cluster_id "$OPENSHIFT_CLUSTER_ID" '{ - "api_vip_dnsname": "", <1> + "api_vip_dnsname": "", "openshift_cluster_id": $openshift_cluster_id, - "name": "" <2> + "name": "" }') ---- -<1> Replace `` with the hostname for the cluster's API server. This can be the DNS domain for the API server or the IP address of the single node which the worker node can reach. For example, `api.compute-1.example.com`. -<2> Replace `` with the plain text name for the cluster. The cluster name should match the cluster name that was set during the Day 1 cluster installation. + +where: + +``:: Specifies the hostname for the cluster's API server. This can be the DNS domain for the API server or the IP address of the single node which the worker node can reach. For example, `api.compute-1.example.com`. +``:: Specifies the plain text name for the cluster. The cluster name should match the cluster name that was set during the Day 1 cluster installation. + .. Import the cluster and set the `$CLUSTER_ID` variable. Run the following command: + [source,terminal] @@ -69,20 +73,23 @@ $ CLUSTER_ID=$(curl "$API_URL/api/assisted-install/v2/clusters/import" -H "Autho [source,terminal] ---- export INFRA_ENV_REQUEST=$(jq --null-input \ - --slurpfile pull_secret \//<1> - --arg ssh_pub_key "$(cat )" \//<2> + --slurpfile pull_secret \ + --arg ssh_pub_key "$(cat )" \ --arg cluster_id "$CLUSTER_ID" '{ - "name": "", <3> + "name": "", "pull_secret": $pull_secret[0] | tojson, "cluster_id": $cluster_id, "ssh_authorized_key": $ssh_pub_key, - "image_type": "" <4> + "image_type": "" }') ---- -<1> Replace `` with the path to the local file containing the downloaded pull secret from Red Hat OpenShift Cluster Manager at link:console.redhat.com/openshift/install/pull-secret[console.redhat.com]. -<2> Replace `` with the path to the public SSH key required to access the host. If you do not set this value, you cannot access the host while in discovery mode. -<3> Replace `` with the plain text name for the `InfraEnv` resource. -<4> Replace `` with the ISO image type, either `full-iso` or `minimal-iso`. ++ +where: + +``:: Specifies the path to the local file containing the downloaded pull secret from Red Hat OpenShift Cluster Manager at link:console.redhat.com/openshift/install/pull-secret[console.redhat.com]. +``:: Specifies the path to the public SSH key required to access the host. If you do not set this value, you cannot access the host while in discovery mode. +``:: Specifies the plain text name for the `InfraEnv` resource. +``:: Specifies the ISO image type, either `full-iso` or `minimal-iso`. + .. Post the `$INFRA_ENV_REQUEST` to the link:https://api.openshift.com/?urls.primaryName=assisted-service%20service#/installer/RegisterInfraEnv[/v2/infra-envs] API and set the `$INFRA_ENV_ID` variable: + @@ -108,9 +115,10 @@ https://api.openshift.com/api/assisted-images/images/41b91e72-c33e-42ee-b80f-b5c + [source,terminal] ---- -$ curl -L -s '' --output rhcos-live-minimal.iso <1> +$ curl -L -s '' --output rhcos-live-minimal.iso ---- -<1> Replace `` with the URL for the ISO from the previous step. ++ +Replace `` with the URL for the ISO from the previous step. . Boot the new worker host from the downloaded `rhcos-live-minimal.iso`. @@ -131,9 +139,10 @@ $ curl -s "$API_URL/api/assisted-install/v2/clusters/$CLUSTER_ID" -H "Authorizat + [source,terminal] ---- -$ HOST_ID= <1> +$ HOST_ID= ---- -<1> Replace `` with the host ID from the previous step. ++ +Replace `` with the host ID from the previous step. . Check that the host is ready to install by running the following command: + diff --git a/modules/binding-infra-node-workloads-using-taints-tolerations.adoc b/modules/binding-infra-node-workloads-using-taints-tolerations.adoc index 99beddac3f3e..c25fb04dd39a 100644 --- a/modules/binding-infra-node-workloads-using-taints-tolerations.adoc +++ b/modules/binding-infra-node-workloads-using-taints-tolerations.adoc @@ -76,7 +76,7 @@ spec: ---- ==== + -These examples place a taint on `node1` that has the `node-role.kubernetes.io/infra` key and the `NoSchedule` taint effect. Nodes with the `NoSchedule` effect schedule only pods that tolerate the taint, but allow existing pods to remain scheduled on the node. +These examples place a taint on `node1` that has the `node-role.kubernetes.io/infra` key and the `NoSchedule` taint effect. Nodes with the `NoSchedule` effect schedule only pods that tolerate the taint, but allow existing pods to remain scheduled on the node. + If you added a `NoSchedule` taint to the infrastructure node, any pods that are controlled by a daemon set on that node are marked as `misscheduled`. You must either delete the pods or add a toleration to the pods as shown in the Red Hat Knowledgebase solution link:https://access.redhat.com/solutions/6592171[add toleration on `misscheduled` DNS pods]. Note that you cannot add a toleration to a daemon set object that is managed by an operator. + @@ -98,15 +98,20 @@ metadata: spec: # ... tolerations: - - key: node-role.kubernetes.io/infra <1> - value: reserved <2> - effect: NoSchedule <3> - operator: Equal <4> + - key: node-role.kubernetes.io/infra + value: reserved + effect: NoSchedule + operator: Equal ---- -<1> Specify the key that you added to the node. -<2> Specify the value of the key-value pair taint that you added to the node. -<3> Specify the effect that you added to the node. -<4> Specify the `Equal` Operator to require a taint with the key `node-role.kubernetes.io/infra` to be present on the node. ++ +where: ++ +-- +`spec.tolerations.key`:: Specifies the key that you added to the node. +`spec.tolerations.value`:: Specifies the value of the key-value pair taint that you added to the node. +`spec.tolerations.effect`:: Specifies the effect that you added to the node. +`spec.tolerations.operator`:: Specifies the `Equal` Operator to require a taint with the key `node-role.kubernetes.io/infra` to be present on the node. +-- + This toleration matches the taint created by the `oc adm taint` command. A pod with this toleration can be scheduled onto the infrastructure node. + @@ -117,4 +122,4 @@ Moving pods for an Operator installed via OLM to an infrastructure node is not a . Schedule the pod to the infrastructure node by using a scheduler. See the documentation for "Controlling pod placement using the scheduler" for details. -. Remove any workloads that you do not want, or that do not belong, on the new infrastructure node. See the list of workloads supported for use on infrastructure nodes in "{product-title} infrastructure components". +. Remove any workloads that you do not want, or that do not belong, on the new infrastructure node. See the list of workloads supported for use on infrastructure nodes in "{product-title} infrastructure components". diff --git a/modules/customize-certificates-replace-default-router.adoc b/modules/customize-certificates-replace-default-router.adoc index 5db5f2c7d513..7e33574320fe 100644 --- a/modules/customize-certificates-replace-default-router.adoc +++ b/modules/customize-certificates-replace-default-router.adoc @@ -24,10 +24,11 @@ You can replace the default ingress certificate for all applications under the ` [source,terminal] ---- $ oc create configmap custom-ca \ - --from-file=ca-bundle.crt= \//<1> + --from-file=ca-bundle.crt= \ -n openshift-config ---- -<1> `` is the path to the root CA certificate file on your local file system. For example, `/etc/pki/ca-trust/source/anchors`. ++ +`` is the path to the root CA certificate file on your local file system. For example, `/etc/pki/ca-trust/source/anchors`. . Update the cluster-wide proxy configuration with the newly created config map: + @@ -49,14 +50,17 @@ If you change any other parameter in the `openshift-config-user-ca-bundle.crt` f + [source,terminal] ---- -$ oc create secret tls \//<1> - --cert= \//<2> - --key= \//<3> +$ oc create secret tls \ + --cert= \ + --key= \ -n openshift-ingress ---- -<1> `` is the name of the secret that will contain the certificate chain and private key. -<2> `` is the path to the certificate chain on your local file system. -<3> `` is the path to the private key associated with this certificate. ++ +where: + +``:: Specifies the name of the secret that will contain the certificate chain and private key. +``:: Specifies the path to the certificate chain on your local file system. +``:: Specifies the path to the private key associated with this certificate. . Update the Ingress Controller configuration with the newly created secret: + @@ -64,10 +68,11 @@ $ oc create secret tls \//<1> ---- $ oc patch ingresscontroller.operator default \ --type=merge -p \ - '{"spec":{"defaultCertificate": {"name": ""}}}' \//<1> + '{"spec":{"defaultCertificate": {"name": ""}}}' \ -n openshift-ingress-operator ---- -<1> Replace `` with the name used for the secret in the previous step. ++ +Replace `` with the name used for the secret in the previous step. + [IMPORTANT] ==== diff --git a/modules/installation-gcp-user-infra-shared-vpc-config-yaml.adoc b/modules/installation-gcp-user-infra-shared-vpc-config-yaml.adoc index b4da0a97c812..a18c0d4a0b8b 100644 --- a/modules/installation-gcp-user-infra-shared-vpc-config-yaml.adoc +++ b/modules/installation-gcp-user-infra-shared-vpc-config-yaml.adoc @@ -16,9 +16,9 @@ This sample YAML file is provided for reference only. You must obtain your `inst [source,yaml] ---- apiVersion: v1 -baseDomain: example.com <1> -controlPlane: <2> - hyperthreading: Enabled <3> <4> +baseDomain: example.com +controlPlane: + hyperthreading: Enabled name: master platform: gcp: @@ -26,12 +26,12 @@ controlPlane: <2> zones: - us-central1-a - us-central1-c - tags: <5> + tags: - control-plane-tag1 - control-plane-tag2 replicas: 3 -compute: <2> -- hyperthreading: Enabled <3> +compute: +- hyperthreading: Enabled name: worker platform: gcp: @@ -39,7 +39,7 @@ compute: <2> zones: - us-central1-a - us-central1-c - tags: <5> + tags: - compute-tag1 - compute-tag2 replicas: 0 @@ -51,62 +51,65 @@ networking: hostPrefix: 23 machineNetwork: - cidr: 10.0.0.0/16 - networkType: OVNKubernetes <6> + networkType: OVNKubernetes serviceNetwork: - 172.30.0.0/16 platform: gcp: defaultMachinePlatform: - tags: <5> + tags: - global-tag1 - global-tag2 - projectID: openshift-production <7> - region: us-central1 <8> + projectID: openshift-production + region: us-central1 pullSecret: '{"auths": ...}' ifndef::openshift-origin[] -fips: false <9> -sshKey: ssh-ed25519 AAAA... <10> -publish: Internal <11> -endif::openshift-origin[] -ifdef::openshift-origin[] -sshKey: ssh-ed25519 AAAA... <9> -publish: Internal <10> +fips: false endif::openshift-origin[] +sshKey: ssh-ed25519 AAAA... +publish: Internal ---- -<1> Specify the public DNS on the host project. -<2> If you do not provide these parameters and values, the installation program provides the default value. -<3> The `controlPlane` section is a single mapping, but the compute section is a sequence of mappings. To meet the requirements of the different data structures, the first line of the `compute` section must begin with a hyphen, `-`, and the first line of the `controlPlane` section must not. Although both sections currently define a single machine pool, it is possible that future versions of {product-title} will support defining multiple compute pools during installation. Only one control plane pool is used. -<4> Whether to enable or disable simultaneous multithreading, or `hyperthreading`. By default, simultaneous multithreading is enabled to increase the performance of your machines' cores. You can disable it by setting the parameter value to `Disabled`. If you disable simultaneous multithreading in some cluster machines, you must disable it in all cluster machines. + +where: + +`baseDomain`:: Specifies the public DNS on the host project. + +`controlPlane`:: Specifies the configuration for the machines that form the control plane. The `controlPlane` section is a single mapping and the first line of the `controlPlane` section must not begin with a hyphen (`-`). Only one control plane pool is used. If you do not provide parameters and values for this sections, the installation program provides the default values. + +`controlPlane.hyperthreading`:: Specifies whether to enable or disable simultaneous multithreading, or `hyperthreading`. By default, simultaneous multithreading is enabled to increase the performance of your machines' cores. You can disable it by setting the parameter value to `Disabled`. If you disable simultaneous multithreading in some cluster machines, you must disable it in all cluster machines. + [IMPORTANT] ==== If you disable simultaneous multithreading, ensure that your capacity planning accounts for the dramatically decreased machine performance. Use larger machine types, such as `n1-standard-8`, for your machines if you disable simultaneous multithreading. ==== -<5> Optional: A set of network tags to apply to the control plane or compute machine sets. The `platform.gcp.defaultMachinePlatform.tags` parameter applies to both control plane and compute machines. If the `compute.platform.gcp.tags` or `controlPlane.platform.gcp.tags` parameters are set, they override the `platform.gcp.defaultMachinePlatform.tags` parameter. -<6> The cluster network plugin to install. The default value `OVNKubernetes` is the only supported value. -<7> Specify the main project where the VM instances reside. -<8> Specify the region that your VPC network is in. + +`controlPlane.platform.gcp.tags`:: Specifies a set of network tags to apply to the control plane machine sets. If the `controlPlane.platform.gcp.tags` parameter is set, it overrides the `platform.gcp.defaultMachinePlatform.tags` parameter. This value is optional. + +`compute`:: Specifies the configuration for the machines that comprise the compute nodes. The `compute` section is a sequence of mappings and the first line of the `compute` section must begin with a hyphen (`-`). Although this section currently defines a single machine pool, it is possible that future versions of {product-title} will support defining multiple compute pools during installation. If you do not provide parameters and values for this section, the installation program provides the default values. + +`compute.platform.gcp.tags`:: Specifies a set of network tags to apply to the compute machine sets. If the `compute.platform.gcp.tags` parameter is set, it overrides the `platform.gcp.defaultMachinePlatform.tags` parameter. This value is optional. + +`networking.networkType`:: Specifies the cluster network plugin to install. The default value `OVNKubernetes` is the only supported value. + +`platform.gcp.defaultMachinePlatform.tags`:: Specifies a default set of network tags to apply to the control plane or compute machine sets. The `platform.gcp.defaultMachinePlatform.tags` parameter applies to both control plane and compute machines. If the `compute.platform.gcp.tags` or `controlPlane.platform.gcp.tags` parameters are set, they override the `platform.gcp.defaultMachinePlatform.tags` parameter. This value is optional. + +`platform.gcp.projectID`:: Specifies the main project where the VM instances reside. + +`platform.gcp.region`:: Specifies the region that your VPC network is in. + ifndef::openshift-origin[] -<9> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. +`fips`:: Specifies whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. + -- include::snippets/fips-snippet.adoc[] -- -<10> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -ifdef::openshift-origin[] -<9> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. endif::openshift-origin[] + +`sshKey`:: Specifies the `sshKey` value that you use to access the machines in your cluster. This value is optional. + [NOTE] ==== For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. ==== -ifndef::openshift-origin[] -<11> How to publish the user-facing endpoints of your cluster. Set `publish` to `Internal` to deploy a private cluster, which cannot be accessed from the internet. The default value is `External`. -To use a shared VPC in a cluster that uses infrastructure that you provision, you must set `publish` to `Internal`. The installation program will no longer be able to access the public DNS zone for the base domain in the host project. -endif::openshift-origin[] -ifdef::openshift-origin[] -<10> How to publish the user-facing endpoints of your cluster. Set `publish` to `Internal` to deploy a private cluster, which cannot be accessed from the internet. The default value is `External`. -To use a shared VPC in a cluster that uses infrastructure that you provision, you must set `publish` to `Internal`. The installation program will no longer be able to access the public DNS zone for the base domain in the host project. -endif::openshift-origin[] + +`publish`:: Specifies to publish the user-facing endpoints of your cluster. Set `publish` to `Internal` to deploy a private cluster, which cannot be accessed from the internet. The default value is `External`. To use a shared VPC in a cluster that uses infrastructure that you provision, you must set `publish` to `Internal`. The installation program will no longer be able to access the public DNS zone for the base domain in the host project. diff --git a/modules/installation-initializing.adoc b/modules/installation-initializing.adoc index 14aad08ae912..3204e357309a 100644 --- a/modules/installation-initializing.adoc +++ b/modules/installation-initializing.adoc @@ -231,10 +231,10 @@ endif::azure[] + [source,terminal] ---- -$ ./openshift-install create install-config --dir <1> +$ ./openshift-install create install-config --dir ---- -<1> For ``, specify the directory name to store the -files that the installation program creates. ++ +For ``, specify the directory name to store the files that the installation program creates. + When specifying the directory: ifndef::ibm-power-vs[] @@ -499,15 +499,18 @@ ifdef::azure+restricted[] + [source,yaml] ---- -networkResourceGroupName: <1> -virtualNetwork: <2> -controlPlaneSubnet: <3> -computeSubnet: <4> +networkResourceGroupName: +virtualNetwork: +controlPlaneSubnet: +computeSubnet: ---- -<1> Replace `` with the resource group name that contains the existing virtual network (VNet). -<2> Replace `` with the existing virtual network name. -<3> Replace `` with the existing subnet name to deploy the control plane machines. -<4> Replace `` with the existing subnet name to deploy compute machines. ++ +where: + +``:: Specifies the resource group name that contains the existing virtual network (VNet). +``:: Specifies the existing virtual network name. +``:: Specifies the existing subnet name to deploy the control plane machines. +``:: Specifies the existing subnet name to deploy compute machines. endif::azure+restricted[] ifdef::gcp+restricted[] .. Define the network and subnets for the VPC to install the cluster in under the parent `platform.gcp` field: diff --git a/modules/lws-config.adoc b/modules/lws-config.adoc index f666d8470f35..58d15adcf7dc 100644 --- a/modules/lws-config.adoc +++ b/modules/lws-config.adoc @@ -29,20 +29,20 @@ apiVersion: leaderworkerset.x-k8s.io/v1 kind: LeaderWorkerSet metadata: generation: 1 - name: my-lws <1> - namespace: my-namespace <2> + name: my-lws + namespace: my-namespace spec: leaderWorkerTemplate: - leaderTemplate: <3> + leaderTemplate: metadata: {} spec: containers: - image: nginxinc/nginx-unprivileged:1.27 name: leader resources: {} - restartPolicy: RecreateGroupOnPodRestart <4> - size: 3 <5> - workerTemplate: <6> + restartPolicy: RecreateGroupOnPodRestart + size: 3 + workerTemplate: metadata: {} spec: containers: @@ -53,24 +53,45 @@ spec: protocol: TCP resources: {} networkConfig: - subdomainPolicy: Shared <7> - replicas: 2 <8> + subdomainPolicy: Shared + replicas: 2 rolloutStrategy: rollingUpdateConfiguration: - maxSurge: 1 <9> + maxSurge: 1 maxUnavailable: 1 type: RollingUpdate startupPolicy: LeaderCreated ---- -<1> Specify the name of the leader worker set resource. -<2> Specify the namespace for the leader worker set to run in. -<3> Specify the pod template for the leader pods. -<4> Specify the restart policy for when pod failures occur. Allowed values are `RecreateGroupOnPodRestart` to restart the whole group or `None` to not restart the group. -<5> Specify the number of pods to create for each group, including the leader pod. For example, a value of `3` creates 1 leader pod and 2 worker pods. The default value is `1`. -<6> Specify the pod template for the worker pods. -<7> Specify the policy to use when creating the headless service. Allowed values are `UniquePerReplica` or `Shared`. The default value is `Shared`. -<8> Specify the number of replicas, or leader-worker groups. The default value is `1`. -<9> Specify the maximum number of replicas that can be scheduled above the `replicas` value during rolling updates. The value can be specified as an integer or a percentage. ++ +where: + +`metadata.name`:: +Specifies the name of the leader worker set resource. + +`metadata.namespace`:: +Specifies the namespace for the leader worker set to run in. + +`spec.leaderWorkerTemplate.leaderTemplate`:: +Specifies the pod template for the leader pods. + +`spec.leaderWorkerTemplate.restartPolicy`:: +Specifies the restart policy for when pod failures occur. Allowed values are `RecreateGroupOnPodRestart` to restart the whole group or `None` to not restart the group. + +`spec.leaderWorkerTemplate.size`:: +Specifies the number of pods to create for each group, including the leader pod. For example, a value of `3` creates 1 leader pod and 2 worker pods. The default value is `1`. + +`spec.leaderWorkerTemplate.workerTemplate`:: +Specifies the pod template for the worker pods. + +`spec.networkConfig.subdomainPolicy`:: +Specifies the policy to use when creating the headless service. Allowed values are `UniquePerReplica` or `Shared`. The default value is `Shared`. + +`spec.replicas`:: +Specifies the number of replicas, or leader-worker groups. The default value is `1`. + +`spec.rolloutStrategy.rollingUpdateConfiguration.maxSurge`:: +Specifies the maximum number of replicas that can be scheduled above the `replicas` value during rolling updates. The value can be specified as an integer or a percentage. + + For more information on all available fields to configure, see link:https://lws.sigs.k8s.io/docs/reference/leaderworkerset.v1/[LeaderWorkerSet API] upstream documentation. @@ -94,15 +115,16 @@ $ oc get pods -n my-namespace [source,terminal] ---- NAME READY STATUS RESTARTS AGE -my-lws-0 1/1 Running 0 4s <1> +my-lws-0 1/1 Running 0 4s my-lws-0-1 1/1 Running 0 3s my-lws-0-2 1/1 Running 0 3s -my-lws-1 1/1 Running 0 7s <2> +my-lws-1 1/1 Running 0 7s my-lws-1-1 1/1 Running 0 6s my-lws-1-2 1/1 Running 0 6s ---- -<1> The leader pod for the first group. -<2> The leader pod for the second group. ++ +** `my-lws-0` is the leader pod for the first group. +** `my-lws-1` is the leader pod for the second group. . Review the stateful sets by running the following command: + @@ -115,10 +137,11 @@ $ oc get statefulsets [source,terminal] ---- NAME READY AGE -my-lws 4/4 111s <1> -my-lws-0 2/2 57s <2> -my-lws-1 2/2 60s <3> +my-lws 4/4 111s +my-lws-0 2/2 57s +my-lws-1 2/2 60s ---- -<1> The leader stateful set for all leader-worker groups. -<2> The worker stateful set for the first group. -<3> The worker stateful set for the second group. ++ +** `my-lws` is the leader stateful set for all leader-worker groups. +** `my-lws-0` is the worker stateful set for the first group. +** `my-lws-1` is the worker stateful set for the second group. diff --git a/modules/nw-metallb-configure-svc.adoc b/modules/nw-metallb-configure-svc.adoc index 8820d3d89b74..56d99750c27f 100644 --- a/modules/nw-metallb-configure-svc.adoc +++ b/modules/nw-metallb-configure-svc.adoc @@ -51,27 +51,27 @@ $ oc describe service Name: Namespace: default Labels: -Annotations: metallb.io/address-pool: doc-example <1> +Annotations: metallb.io/address-pool: doc-example Selector: app=service_name -Type: LoadBalancer <2> +Type: LoadBalancer IP Family Policy: SingleStack IP Families: IPv4 IP: 10.105.237.254 IPs: 10.105.237.254 -LoadBalancer Ingress: 192.168.100.5 <3> +LoadBalancer Ingress: 192.168.100.5 Port: 80/TCP TargetPort: 8080/TCP NodePort: 30550/TCP Endpoints: 10.244.0.50:8080 Session Affinity: None External Traffic Policy: Cluster -Events: <4> +Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal nodeAssigned 32m (x2 over 32m) metallb-speaker announcing from node "" ---- -<1> The annotation is present if you request an IP address from a specific pool. -<2> The service type must indicate `LoadBalancer`. -<3> The load-balancer ingress field indicates the external IP address if the service is assigned correctly. -<4> The events field indicates the node name that is assigned to announce the external IP address. -If you experience an error, the events field indicates the reason for the error. ++ +** The `Annotations` field shows the `metallb.io/address-pool` annotation if you request an IP address from a specific pool. +** The `Type` field must indicate `LoadBalancer`. +** The `LoadBalancer Ingress` field indicates the external IP address if the service is assigned correctly. +** The `Events` field indicates the node name that is assigned to announce the external IP address. If you experience an error, the events field indicates the reason for the error. diff --git a/modules/update-upgrading-cli.adoc b/modules/update-upgrading-cli.adoc index 874409b9fbb3..4481a4f0f0dc 100644 --- a/modules/update-upgrading-cli.adoc +++ b/modules/update-upgrading-cli.adoc @@ -120,10 +120,10 @@ $ oc adm upgrade --to-latest=true + [source,terminal] ---- -$ oc adm upgrade --to= <1> +$ oc adm upgrade --to= ---- -<1> `` is the update version that you obtained from the output of the -`oc adm upgrade recommend` command. ++ +Replace `` with the update version that you obtained from the output of the `oc adm upgrade recommend` command. + [IMPORTANT] ==== @@ -212,4 +212,4 @@ ip-10-0-179-95.ec2.internal Ready worker 70m v1.33.4 ip-10-0-182-134.ec2.internal Ready worker 70m v1.33.4 ip-10-0-211-16.ec2.internal Ready master 82m v1.33.4 ip-10-0-250-100.ec2.internal Ready worker 69m v1.33.4 ----- \ No newline at end of file +---- diff --git a/modules/using-must-gather.adoc b/modules/using-must-gather.adoc index ce764eaa8fde..fd75cd42e078 100644 --- a/modules/using-must-gather.adoc +++ b/modules/using-must-gather.adoc @@ -44,9 +44,10 @@ $ oc adm must-gather --image={must-gather-v1-5} + [source,terminal,subs="attributes+"] ---- -$ oc adm must-gather --image={must-gather-v1-5} -- /usr/bin/gather --request-timeout 1m # <1> +$ oc adm must-gather --image={must-gather-v1-5} -- /usr/bin/gather --request-timeout 1m ---- -<1> In this example, the timeout is 1 minute. ++ +In this example, the timeout is 1 minute. * To use the insecure TLS connection flag with the `must-gather` tool, run the following command: + [source,terminal,subs="attributes+"] @@ -57,9 +58,10 @@ $ oc adm must-gather --image={must-gather-v1-5} -- /usr/bin/gather --skip-tls + [source,terminal,subs="attributes+"] ---- -$ oc adm must-gather --image={must-gather-v1-5} -- /usr/bin/gather --request-timeout 15s --skip-tls #<1> +$ oc adm must-gather --image={must-gather-v1-5} -- /usr/bin/gather --request-timeout 15s --skip-tls ---- -<1> In this example, the timeout is 15 seconds. By default, the `--skip-tls` flag value is `false`. Set the value to `true` to allow insecure TLS connections. ++ +In this example, the timeout is 15 seconds. By default, the `--skip-tls` flag value is `false`. Set the value to `true` to allow insecure TLS connections. .Verification