diff --git a/modules/dedicated-logging-in-and-verifying-permissions.adoc b/modules/dedicated-logging-in-and-verifying-permissions.adoc index cc077569a2cd..2c36628240af 100644 --- a/modules/dedicated-logging-in-and-verifying-permissions.adoc +++ b/modules/dedicated-logging-in-and-verifying-permissions.adoc @@ -80,5 +80,9 @@ verbs and resources associated with the `dedicated-admins-cluster` and [source,terminal] ---- $ oc describe clusterrole.rbac dedicated-admins-cluster +---- + +[source,terminal] +---- $ oc describe clusterrole.rbac dedicated-admins-project ---- diff --git a/modules/developer-cli-odo-installing-odo-on-linux.adoc b/modules/developer-cli-odo-installing-odo-on-linux.adoc index 241f64294f19..c6fe0da95f1b 100644 --- a/modules/developer-cli-odo-installing-odo-on-linux.adoc +++ b/modules/developer-cli-odo-installing-odo-on-linux.adoc @@ -21,25 +21,33 @@ The `{odo-title}` CLI is available to download as a binary and as a tarball for .Procedure . Navigate to the link:https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/odo/latest/[content gateway] and download the appropriate file for your operating system and architecture. ++ ** If you download the binary, rename it to `odo`: + [source,terminal] ---- $ curl -L https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/odo/latest/odo-linux-amd64 -o odo ---- ++ ** If you download the tarball, extract the binary: + [source,terminal] ---- $ curl -L https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/odo/latest/odo-linux-amd64.tar.gz -o odo.tar.gz +---- ++ +[source,terminal] +---- $ tar xvzf odo.tar.gz ---- + . Change the permissions on the binary: + [source,terminal] ---- $ chmod +x ---- + . Place the `{odo-title}` binary in a directory that is on your `PATH`. + To check your `PATH`, execute the following command: @@ -48,6 +56,7 @@ To check your `PATH`, execute the following command: ---- $ echo $PATH ---- + . Verify that `{odo-title}` is now available on your system: + [source,terminal] diff --git a/modules/developer-cli-odo-installing-odo-on-macos.adoc b/modules/developer-cli-odo-installing-odo-on-macos.adoc index 246937212852..14675c8e4c30 100644 --- a/modules/developer-cli-odo-installing-odo-on-macos.adoc +++ b/modules/developer-cli-odo-installing-odo-on-macos.adoc @@ -18,25 +18,33 @@ The `{odo-title}` CLI for macOS is available to download as a binary and as a ta .Procedure . Navigate to the link:https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/odo/latest/[content gateway] and download the appropriate file: ++ ** If you download the binary, rename it to `odo`: + [source,terminal] ---- $ curl -L https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/odo/latest/odo-darwin-amd64 -o odo ---- ++ ** If you download the tarball, extract the binary: + [source,terminal] ---- $ curl -L https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/odo/latest/odo-darwin-amd64.tar.gz -o odo.tar.gz +---- ++ +[source,terminal] +---- $ tar xvzf odo.tar.gz ---- + . Change the permissions on the binary: + [source,terminal] ---- # chmod +x odo ---- + . Place the `{odo-title}` binary in a directory that is on your `PATH`. + To check your `PATH`, execute the following command: @@ -45,6 +53,7 @@ To check your `PATH`, execute the following command: ---- $ echo $PATH ---- + . Verify that `{odo-title}` is now available on your system: + [source,terminal] diff --git a/modules/developer-cli-odo-pushing-the-odo-init-image-to-an-internal-registry-directly.adoc b/modules/developer-cli-odo-pushing-the-odo-init-image-to-an-internal-registry-directly.adoc index b0c225db154e..de7a52c72a25 100644 --- a/modules/developer-cli-odo-pushing-the-odo-init-image-to-an-internal-registry-directly.adoc +++ b/modules/developer-cli-odo-pushing-the-odo-init-image-to-an-internal-registry-directly.adoc @@ -60,9 +60,17 @@ $ sudo cp ca.crt /etc/pki/ca-trust/source/anchors/externalroute.crt && sudo upd [source,terminal] ---- $ oc get route -n openshift-image-registry +---- ++ +.Example output +[source,terminal] +---- NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD default-route image-registry reencrypt None - +---- ++ +[source,terminal] +---- $ docker login -u kubeadmin -p $(oc whoami -t) ---- @@ -71,9 +79,15 @@ $ docker login -u kubeadmin -p $(oc whoami -t) [source,terminal] ---- $ docker pull registry.access.redhat.com/openshiftdo/odo-init-image-rhel7: - +---- ++ +[source,terminal] +---- $ docker tag registry.access.redhat.com/openshiftdo/odo-init-image-rhel7: /openshiftdo/odo-init-image-rhel7: - +---- ++ +[source,terminal] +---- $ docker push /openshiftdo/odo-init-image-rhel7: ---- @@ -84,7 +98,6 @@ $ docker push /openshiftdo/odo-init-image-rhel7: $ export ODO_BOOTSTRAPPER_IMAGE=/openshiftdo/odo-init-image-rhel7:1.0.1 ---- - [id="pushing-the-init-image-directly-on-macos_{context}"] == Pushing the init image directly on MacOS @@ -106,7 +119,7 @@ $ oc get secret router-certs-default -n openshift-ingress -o yaml ---- + .Example output -[source,terminal] +[source,terminal,subs="attributes+"] ---- apiVersion: v1 data: @@ -137,9 +150,17 @@ $ sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.ke [source,terminal] ---- $ oc get route -n openshift-image-registry +----- ++ +.Example output +[source,terminal] +---- NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD default-route image-registry reencrypt None - +---- ++ +[source,terminal] +---- $ docker login -u kubeadmin -p $(oc whoami -t) ---- @@ -148,9 +169,15 @@ $ docker login -u kubeadmin -p $(oc whoami -t) [source,terminal] ---- $ docker pull registry.access.redhat.com/openshiftdo/odo-init-image-rhel7: - +---- ++ +[source,terminal] +---- $ docker tag registry.access.redhat.com/openshiftdo/odo-init-image-rhel7: /openshiftdo/odo-init-image-rhel7: - +---- ++ +[source,terminal] +---- $ docker push /openshiftdo/odo-init-image-rhel7: ---- @@ -170,20 +197,20 @@ $ export ODO_BOOTSTRAPPER_IMAGE=/openshiftdo/odo-init-image-rhel7 . Enable the default route: + -[source,terminal] +[source,terminal,subs="attributes+"] ---- PS C:\> oc patch configs.imageregistry.operator.openshift.io cluster -p '{"spec":{"defaultRoute":true}}' --type='merge' -n openshift-image-registry ---- . Get a wildcard route CA: + -[source,terminal] +[source,terminal,subs="attributes+"] ---- PS C:\> oc get secret router-certs-default -n openshift-ingress -o yaml ---- + .Example output -[source,terminal] +[source,terminal,subs="attributes+"] ---- apiVersion: v1 data: @@ -197,43 +224,57 @@ type: kubernetes.io/tls . Use `base64` to encode the root certification authority (CA) content of your mirror registry: + -[source,terminal] +[source,terminal,subs="attributes+"] ---- PS C:\> echo | base64 --decode > ca.crt ---- . As an administrator, trust a CA in your client platform by executing the following command: + -[source,terminal] +[source,terminal,subs="attributes+"] ---- PS C:\WINDOWS\system32> certutil -addstore -f "ROOT" ca.crt ---- . Log in to the {product-registry}: + -[source,terminal] +[source,terminal,subs="attributes+"] ---- PS C:\> oc get route -n openshift-image-registry +---- ++ +.Example output +[source,terminal,subs="attributes+"] +---- NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD default-route image-registry reencrypt None - +---- ++ +[source,terminal,subs="attributes+"] +---- PS C:\> docker login -u kubeadmin -p $(oc whoami -t) ---- . Push the `odo` init image: + -[source,terminal] +[source,terminal,subs="attributes+"] ---- PS C:\> docker pull registry.access.redhat.com/openshiftdo/odo-init-image-rhel7: - +---- ++ +[source,terminal,subs="attributes+"] +---- PS C:\> docker tag registry.access.redhat.com/openshiftdo/odo-init-image-rhel7: /openshiftdo/odo-init-image-rhel7: - +---- ++ +[source,terminal,subs="attributes+"] +---- PS C:\> docker push /openshiftdo/odo-init-image-rhel7: ---- . Override the default `odo` init image path by setting the `ODO_BOOTSTRAPPER_IMAGE` environment variable: + -[source,terminal] +[source,terminal,subs="attributes+"] ---- PS C:\> $env:ODO_BOOTSTRAPPER_IMAGE="/openshiftdo/odo-init-image-rhel7:" ---- diff --git a/modules/developer-cli-odo-ref-link.adoc b/modules/developer-cli-odo-ref-link.adoc index 10d408bbfb86..a9afaaaa86c2 100644 --- a/modules/developer-cli-odo-ref-link.adoc +++ b/modules/developer-cli-odo-ref-link.adoc @@ -121,7 +121,7 @@ The correct path for the URL would be \http://8080-tcp.192.168.39.112.nip.io/api + [source,terminal] ---- -$ odo describe +$ odo describe ---- + .Example output @@ -183,12 +183,11 @@ Linked Services: + Some of these variables are used in the backend component's `src/main/resources/application.properties` file so that the Java Spring Boot application can connect to the PostgreSQL database service. -. Lastly, `odo` has created a directory called `kubernetes/` in your backend component's directory that contains the following files: +. Lastly, `odo` has created a directory called `kubernetes/` in your backend component's directory that contains the `odo-service-backend-postgrescluster-hippo.yaml` and `odo-service-hippo.yaml` files: + [source,terminal] ---- $ ls kubernetes -odo-service-backend-postgrescluster-hippo.yaml odo-service-hippo.yaml ---- + These files contain the information (YAML manifests) for two resources: @@ -215,12 +214,11 @@ $ odo unlink PostgresCluster/hippo To apply the changes, please use `odo push` ---- -To unlink them on the cluster, run `odo push`. Now if you inspect the `kubernetes/` directory, you see only one file: +To unlink them on the cluster, run `odo push`. Now if you inspect the `kubernetes/` directory, you see only one file, such as `odo-service-hippo.yaml`: [source,terminal] ---- $ ls kubernetes -odo-service-hippo.yaml ---- Next, use the `--inlined` flag to create a link: @@ -338,6 +336,10 @@ Unlink the service from the component using: [source,terminal] ---- $ odo unlink PostgresCluster/hippo +---- + +[source,terminal] +---- $ odo push ---- @@ -350,6 +352,10 @@ By default, `odo` creates the manifest file under the `kubernetes/` directory, f [source,terminal] ---- $ odo link PostgresCluster/hippo --bind-as-files +---- + +[source,terminal] +---- $ odo push ---- @@ -357,7 +363,11 @@ $ odo push [source,terminal] ---- $ odo describe +---- +.Example output +[source,terminal] +---- Component Name: backend Type: spring Environment Variables: @@ -461,21 +471,19 @@ When you pass custom bindings while linking the backend component with the Postg [source,terminal] ---- $ odo link PostgresCluster/hippo --map pgVersion='{{ .hippo.spec.postgresVersion }}' --map pgImage='{{ .hippo.spec.image }}' --bind-as-files -$ odo push ---- -These custom bindings get mounted as files instead of being injected as environment variables. To validate that this worked, run the following command: - -.Example command [source,terminal] ---- -$ odo exec -- cat /bindings/backend-postgrescluster-hippo/pgVersion +$ odo push ---- -.Example output +These custom bindings get mounted as files instead of being injected as environment variables. To validate that this worked, run the following command. Example output would be `13`. + +.Example command [source,terminal] ---- -13 +$ odo exec -- cat /bindings/backend-postgrescluster-hippo/pgVersion ---- .Example command diff --git a/modules/developer-cli-odo-ref-service.adoc b/modules/developer-cli-odo-ref-service.adoc index af6daf5021ce..68160f26b11d 100644 --- a/modules/developer-cli-odo-ref-service.adoc +++ b/modules/developer-cli-odo-ref-service.adoc @@ -24,15 +24,27 @@ $ odo service create For example, to create an instance of a Redis service named `my-redis-service`, you can run the following command: -.Example output [source,terminal] ---- $ odo catalog list services +---- + +.Example output +[source,terminal] +---- Services available through Operators NAME CRDs redis-operator.v0.8.0 RedisCluster, Redis +---- +[source,terminal] +---- $ odo service create redis-operator.v0.8.0/Redis my-redis-service +---- + +.Example output +[source,terminal] +---- Successfully added service to the configuration; do 'odo push' to create service on the cluster ---- @@ -75,7 +87,6 @@ $ cat kubernetes/odo-service-my-redis-service.yaml storage: 1Gi ---- -.Example command [source,terminal] ---- $ cat devfile.yaml @@ -92,7 +103,6 @@ components: [...] ---- - Note that the name of the created instance is optional. If you do not provide a name, it will be the lowercase name of the service. For example, the following command creates an instance of a Redis service named `redis`: [source,terminal] @@ -107,9 +117,13 @@ By default, a new manifest is created in the `kubernetes/` directory, referenced [source,terminal] ---- $ odo service create redis-operator.v0.8.0/Redis my-redis-service --inlined -Successfully added service to the configuration; do 'odo push' to create service on the cluster ---- +.Example output +[source,terminal] +---- +Successfully added service to the configuration; do 'odo push' to create service on the cluster +---- .Example command [source,terminal] @@ -171,6 +185,11 @@ $ odo service create redis-operator.v0.8.0/Redis my-redis-service \ -p kubernetesConfig.image=quay.io/opstree/redis:v6.2.5 \ -p kubernetesConfig.serviceType=ClusterIP \ -p redisExporter.image=quay.io/opstree/redis-exporter:1.0 +---- + +.Example output +[source,terminal] +---- Successfully added service to the configuration; do 'odo push' to create service on the cluster ---- @@ -224,6 +243,11 @@ EOF [source,terminal] ---- $ odo service create --from-file my-redis.yaml +---- ++ +.Example output +[source,terminal] +---- Successfully added service to the configuration; do 'odo push' to create service on the cluster ---- @@ -236,10 +260,14 @@ To delete a service, run the command: $ odo service delete ---- -.Example output [source,terminal] ---- $ odo service list +---- + +.Example output +[source,terminal] +---- NAME MANAGED BY ODO STATE AGE Redis/my-redis-service Yes (api) Deleted locally 5m39s ---- @@ -247,6 +275,11 @@ Redis/my-redis-service Yes (api) Deleted locally 5m39s [source,terminal] ---- $ odo service delete Redis/my-redis-service +---- + +.Example output +[source,terminal] +---- ? Are you sure you want to delete Redis/my-redis-service Yes Service "Redis/my-redis-service" has been successfully deleted; do 'odo push' to delete service from the cluster ---- @@ -262,10 +295,14 @@ To list the services created for your component, run the command: $ odo service list ---- -.Example output [source,terminal] ---- $ odo service list +---- + +.Example output +[source,terminal] +---- NAME MANAGED BY ODO STATE AGE Redis/my-redis-service-1 Yes (api) Not pushed Redis/my-redis-service-2 Yes (api) Pushed 52s @@ -287,6 +324,11 @@ $ odo service describe [source,terminal] ---- $ odo service describe Redis/my-redis-service +---- + +.Example output +[source,terminal] +---- Version: redis.redis.opstreelabs.in/v1beta1 Kind: Redis Name: my-redis-service @@ -295,4 +337,4 @@ NAME VALUE kubernetesConfig.image quay.io/opstree/redis:v6.2.5 kubernetesConfig.serviceType ClusterIP redisExporter.image quay.io/opstree/redis-exporter:1.0 ----- \ No newline at end of file +---- diff --git a/modules/developer-cli-odo-ref-storage.adoc b/modules/developer-cli-odo-ref-storage.adoc index 537b35e6f4a9..d02de97b84f1 100644 --- a/modules/developer-cli-odo-ref-storage.adoc +++ b/modules/developer-cli-odo-ref-storage.adoc @@ -14,19 +14,30 @@ To add a storage volume to the cluster, run the command: $ odo storage create ---- -.Example output [source,terminal] ---- $ odo storage create store --path /data --size 1Gi +---- + +.Example output +[source,terminal] +---- ✓ Added storage store to nodejs-project-ufyy +---- +[source,terminal] +---- $ odo storage create tempdir --path /tmp --size 2Gi --ephemeral +---- + +.Example output +[source,terminal] +---- ✓ Added storage tempdir to nodejs-project-ufyy Please use `odo push` command to make the storage accessible to the component ---- - In the above example, the first storage volume has been mounted to the `/data` path and has a size of `1Gi`, and the second volume has been mounted to `/tmp` and is ephemeral. == Listing the storage volumes @@ -38,10 +49,14 @@ To check the storage volumes currently used by the component, run the command: $ odo storage list ---- -.Example output [source,terminal] ---- $ odo storage list +---- + +.Example output +[source,terminal] +---- The component 'nodejs-project-ufyy' has the following storage attached: NAME SIZE PATH STATE store 1Gi /data Not Pushed @@ -57,10 +72,14 @@ To delete a storage volume, run the command: $ odo storage delete ---- -.Example output [source,terminal] ---- $ odo storage delete store -f +---- + +.Example output +[source,terminal] +---- Deleted storage store from nodejs-project-ufyy Please use `odo push` command to delete the storage from the cluster @@ -98,10 +117,14 @@ In the example, there are two containers,`nodejs1` and `nodejs2`. To attach stor $ odo storage create --container ---- -.Example output [source,terminal] ---- $ odo storage create store --path /data --size 1Gi --container nodejs2 +---- + +.Example output +[source,terminal] +---- ✓ Added storage store to nodejs-testing-xnfg Please use `odo push` command to make the storage accessible to the component diff --git a/modules/developer-cli-odo-sample-applications-binary.adoc b/modules/developer-cli-odo-sample-applications-binary.adoc index b921553ac80f..5b9e5ffd03d5 100644 --- a/modules/developer-cli-odo-sample-applications-binary.adoc +++ b/modules/developer-cli-odo-sample-applications-binary.adoc @@ -15,28 +15,24 @@ Java can be used to deploy a binary artifact as follows: [source,terminal] ---- $ git clone https://github.com/spring-projects/spring-petclinic.git -$ cd spring-petclinic -$ mvn package -$ odo create java test3 --binary target/*.jar -$ odo push ---- +[source,terminal] +---- +$ cd spring-petclinic +---- -//Commenting out as it doesn't work for now. https://github.com/openshift/odo/issues/4623 -//// -[id="odo-sample-applications-binary-wildfly_{context}"] -== wildfly +[source,terminal] +---- +$ mvn package +---- -WildFly can be used to deploy a binary application as follows: +[source,terminal] +---- +$ odo create java test3 --binary target/*.jar +---- [source,terminal] ---- -$ git clone https://github.com/openshiftdemos/os-sample-java-web.git -$ cd os-sample-java-web -$ mvn package -$ cd .. -$ mkdir example && cd example -$ mv ../os-sample-java-web/target/ROOT.war example.war -$ odo create wildfly --binary example.war +$ odo push ---- -//// diff --git a/modules/dr-hosted-cluster-within-aws-region-delete.adoc b/modules/dr-hosted-cluster-within-aws-region-delete.adoc index 4dbbc08b428c..700fa792dacc 100644 --- a/modules/dr-hosted-cluster-within-aws-region-delete.adoc +++ b/modules/dr-hosted-cluster-within-aws-region-delete.adoc @@ -30,12 +30,22 @@ As a workaround, update the value of the `spec.persistentVolumeClaimRetentionPol + [source,terminal] ---- -# Just in case $ export KUBECONFIG=${MGMT_KUBECONFIG} - -# Scale down deployments +---- ++ +.Scale down deployment commands +[source,terminal] +---- $ oc scale deployment -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} --replicas=0 --all +---- ++ +[source,terminal] +---- $ oc scale statefulset.apps -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} --replicas=0 --all +---- ++ +[source,terminal] +---- $ sleep 15 ---- @@ -59,7 +69,10 @@ for m in $(oc get machines -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -o name); do oc patch -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} ${m} --type=json --patch='[ { "op":"remove", "path": "/metadata/finalizers" }]' || true oc delete -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} ${m} || true done - +---- ++ +[source,terminal] +---- $ oc delete machineset -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} --all || true ---- @@ -67,9 +80,16 @@ $ oc delete machineset -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} --all || true + [source,terminal] ---- -# Cluster $ C_NAME=$(oc get cluster -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -o name) +---- ++ +[source,terminal] +---- $ oc patch -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} ${C_NAME} --type=json --patch='[ { "op":"remove", "path": "/metadata/finalizers" }]' +---- ++ +[source,terminal] +---- $ oc delete cluster.cluster.x-k8s.io -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} --all ---- @@ -77,7 +97,6 @@ $ oc delete cluster.cluster.x-k8s.io -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} --al + [source,terminal] ---- -# AWS Machines for m in $(oc get awsmachine.infrastructure.cluster.x-k8s.io -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -o name) do oc patch -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} ${m} --type=json --patch='[ { "op":"remove", "path": "/metadata/finalizers" }]' || true @@ -87,21 +106,37 @@ done . Delete the `HostedControlPlane` and `ControlPlane` HC namespace objects by entering these commands: + +.Delete HCP and ControlPlane HC NS commands [source,terminal] ---- -# Delete HCP and ControlPlane HC NS $ oc patch -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} hostedcontrolplane.hypershift.openshift.io ${HC_CLUSTER_NAME} --type=json --patch='[ { "op":"remove", "path": "/metadata/finalizers" }]' +---- ++ +[source,terminal] +---- $ oc delete hostedcontrolplane.hypershift.openshift.io -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} --all +---- ++ +[source,terminal] +---- $ oc delete ns ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} || true ---- . Delete the `HostedCluster` and HC namespace objects by entering these commands: + +.Delete HC and HC Namespace commands [source,terminal] ---- -# Delete HC and HC Namespace $ oc -n ${HC_CLUSTER_NS} patch hostedclusters ${HC_CLUSTER_NAME} -p '{"metadata":{"finalizers":null}}' --type merge || true +---- ++ +[source,terminal] +---- $ oc delete hc -n ${HC_CLUSTER_NS} ${HC_CLUSTER_NAME} || true +---- ++ +[source,terminal] +---- $ oc delete ns ${HC_CLUSTER_NS} || true ---- @@ -109,19 +144,45 @@ $ oc delete ns ${HC_CLUSTER_NS} || true * To verify that everything works, enter these commands: + +.Validations commands [source,terminal] ---- -# Validations $ export KUBECONFIG=${MGMT2_KUBECONFIG} - +---- ++ +[source,terminal] +---- $ oc get hc -n ${HC_CLUSTER_NS} +---- ++ +[source,terminal] +---- $ oc get np -n ${HC_CLUSTER_NS} +---- ++ +[source,terminal] +---- $ oc get pod -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} +---- ++ +[source,terminal] +---- $ oc get machines -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} - -# Inside the HostedCluster +---- ++ +.Commands for inside the HostedCluster +[source,terminal] +---- $ export KUBECONFIG=${HC_KUBECONFIG} +---- ++ +[source,terminal] +---- $ oc get clusterversion +---- ++ +[source,terminal] +---- $ oc get nodes ---- diff --git a/modules/dr-hosted-cluster-within-aws-region-restore.adoc b/modules/dr-hosted-cluster-within-aws-region-restore.adoc index da530326f5ec..c003cbf4f087 100644 --- a/modules/dr-hosted-cluster-within-aws-region-restore.adoc +++ b/modules/dr-hosted-cluster-within-aws-region-restore.adoc @@ -23,21 +23,35 @@ Ensure that the `kubeconfig` file of the destination management cluster is place + [source,terminal] ---- -# Just in case $ export KUBECONFIG=${MGMT2_KUBECONFIG} +---- ++ +[source,terminal] +---- $ BACKUP_DIR=${HC_CLUSTER_DIR}/backup - -# Namespace deletion in the destination Management cluster +---- ++ +.Namespace deletion in the destination Management cluster +[source,terminal] +---- $ oc delete ns ${HC_CLUSTER_NS} || true +---- ++ +[source,terminal] +---- $ oc delete ns ${HC_CLUSTER_NS}-{HC_CLUSTER_NAME} || true ---- . Re-create the deleted namespaces by entering these commands: + +.Namespace creation commands [source,terminal] ---- -# Namespace creation $ oc new-project ${HC_CLUSTER_NS} +---- ++ +[source,terminal] +---- $ oc new-project ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} ---- @@ -50,28 +64,54 @@ $ oc apply -f ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}/secret-* . Restore the objects in the `HostedCluster` control plane namespace by entering these commands: + +.Restore secret command [source,terminal] ---- -# Secrets $ oc apply -f ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/secret-* - -# Cluster +---- ++ +.Cluster restore commands +[source,terminal] +---- $ oc apply -f ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/hcp-* +---- ++ +[source,terminal] +---- $ oc apply -f ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/cl-* ---- . If you are recovering the nodes and the node pool to reuse AWS instances, restore the objects in the HC control plane namespace by entering these commands: + +.Commands for AWS [source,terminal] ---- -# AWS $ oc apply -f ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/awscl-* +---- ++ +[source,terminal] +---- $ oc apply -f ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/awsmt-* +---- ++ +[source,terminal] +---- $ oc apply -f ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/awsm-* - -# Machines +---- ++ +.Commands for machines +[source,terminal] +---- $ oc apply -f ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/machinedeployment-* +---- ++ +[source,terminal] +---- $ oc apply -f ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/machineset-* +---- ++ +[source,terminal] +---- $ oc apply -f ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/machine-* ---- diff --git a/modules/microshift-olm-build-op-catalogs.adoc b/modules/microshift-olm-build-op-catalogs.adoc index bfa1f40aa104..fec4d4be9157 100644 --- a/modules/microshift-olm-build-op-catalogs.adoc +++ b/modules/microshift-olm-build-op-catalogs.adoc @@ -18,5 +18,5 @@ You can create catalogs for your custom Operators or filter catalogs of widely a [IMPORTANT] ==== -* When link:https://access.redhat.com/documentation/en-us/openshift_container_platform/{ocp-version}/html/operators/administrator-tasks#olm-creating-catalog-from-index_olm-restricted-networks[adding a catalog source to a cluster], set the `securityContextConfig` value to `restricted` in the `catalogSource.yaml` file. Ensure that your catalog can run with `restricted` permissions. +When link:https://access.redhat.com/documentation/en-us/openshift_container_platform/{ocp-version}/html/operators/administrator-tasks#olm-creating-catalog-from-index_olm-restricted-networks[adding a catalog source to a cluster], set the `securityContextConfig` value to `restricted` in the `catalogSource.yaml` file. Ensure that your catalog can run with `restricted` permissions. ==== \ No newline at end of file diff --git a/modules/microshift-olm-deploy-ops-global-ns.adoc b/modules/microshift-olm-deploy-ops-global-ns.adoc index 824cd366df44..03ff42773171 100644 --- a/modules/microshift-olm-deploy-ops-global-ns.adoc +++ b/modules/microshift-olm-deploy-ops-global-ns.adoc @@ -47,7 +47,7 @@ $ oc -n openshift-operator-lifecycle-manager get pod -l app=catalog-operator NAME READY STATUS RESTARTS AGE catalog-operator-5fc7f857b6-tj8cf 1/1 Running 0 2m33s ---- - ++ [NOTE] ==== The following steps assume you are using the global namespace, `openshift-marketplace`. The catalog must run in the same namespace as the Operator. The Operator must support the *AllNamespaces* mode. @@ -187,7 +187,8 @@ subscription.operators.coreos.com/my-cert-manager created . You can create a configuration file for the specific Operand you want to use and apply it now. .Verification -. Verify that your Operator is running by using the following command: + +* Verify that your Operator is running by using the following command: + [source,terminal] ---- diff --git a/modules/microshift-olm-deploy-ops-spec-ns.adoc b/modules/microshift-olm-deploy-ops-spec-ns.adoc index d65e55d8d7bb..3a615d43d3dc 100644 --- a/modules/microshift-olm-deploy-ops-spec-ns.adoc +++ b/modules/microshift-olm-deploy-ops-spec-ns.adoc @@ -58,9 +58,10 @@ kind: Namespace metadata: name: olm-microshift ---- -+ + . Apply the namespace configuration using the following command: + +[source,terminal,subs="+quotes"] ---- $ oc apply -f __ <1> ---- @@ -90,7 +91,7 @@ spec: <1> . Apply the Operator group configuration by running the following command: + -[source,terminal] +[source,terminal,subs="+quotes"] ---- $ oc apply -f __ <1> ---- @@ -129,7 +130,7 @@ spec: . Apply the `CatalogSource` configuration by running the following command: + -[source,terminal] +[source,terminal,subs="+quotes"] ---- $ oc apply -f __ <1> ---- @@ -216,7 +217,7 @@ spec: . Apply the Subscription CR configuration by running the following command: + -[source,terminal] +[source,terminal,subs="+quotes"] ---- $ oc apply -f __ ---- @@ -230,7 +231,8 @@ subscription.operators.coreos.com/my-gitlab-operator-kubernetes . You can create a configuration file for the specific Operand you want to use and apply it now. .Verification -. Verify that your Operator is running by using the following command: + +* Verify that your Operator is running by using the following command: + [source,terminal] ---- diff --git a/modules/microshift-rpm-ostree-https.adoc b/modules/microshift-rpm-ostree-https.adoc index 1ba0e3d987e2..ebfc97d28225 100644 --- a/modules/microshift-rpm-ostree-https.adoc +++ b/modules/microshift-rpm-ostree-https.adoc @@ -19,13 +19,14 @@ Environment="http_proxy=http://$PROXY_USER:$PROXY_PASSWORD@$PROXY_SERVER:$PROXY_ ---- . Next, reload the configuration settings and restart the service to apply your changes. - ++ .. Reload the configuration settings by running the following command: + [source,terminal] ---- $ sudo systemctl daemon-reload ---- ++ .. Restart the `rpm-ostreed` service by running the following command: + [source,terminal] diff --git a/modules/microshift-security-context-constraints-opting.adoc b/modules/microshift-security-context-constraints-opting.adoc index 6b323cd5a60f..c1200e8538dc 100644 --- a/modules/microshift-security-context-constraints-opting.adoc +++ b/modules/microshift-security-context-constraints-opting.adoc @@ -27,10 +27,8 @@ If an Operator is installed in a user-created `openshift-*` namespace, synchroni .Procedure -* To enable pod security admission label synchronization in a namespace, set the value of the `security.openshift.io/scc.podSecurityLabelSync` label to `true`. -+ -Run the following command: -+ +* To enable pod security admission label synchronization in a namespace, set the value of the `security.openshift.io/scc.podSecurityLabelSync` label to `true` by running the following command: + [source,terminal] ---- $ oc label namespace security.openshift.io/scc.podSecurityLabelSync=true