diff --git a/modules/oadp-using-ca-certificates-with-velero-command.adoc b/modules/oadp-using-ca-certificates-with-velero-command.adoc index 7d5f4bff1f86..7a0d0fdd05e1 100644 --- a/modules/oadp-using-ca-certificates-with-velero-command.adoc +++ b/modules/oadp-using-ca-certificates-with-velero-command.adoc @@ -29,9 +29,12 @@ $ alias velero='oc -n openshift-adp exec deployment/velero -c velero -it -- ./ve . Check that the alias is working by running the following command: + [source,terminal] -.Example ---- $ velero version +---- ++ +[source,terminal] +---- Client: Version: v1.12.1-OADP Git commit: - diff --git a/modules/op-using-tekton-chains-to-sign-and-verify-image-and-provenance.adoc b/modules/op-using-tekton-chains-to-sign-and-verify-image-and-provenance.adoc index 020c350c9e2f..bec4d3b64c13 100644 --- a/modules/op-using-tekton-chains-to-sign-and-verify-image-and-provenance.adoc +++ b/modules/op-using-tekton-chains-to-sign-and-verify-image-and-provenance.adoc @@ -142,15 +142,18 @@ $ cosign verify-attestation --key cosign.pub $REGISTRY/kaniko-chains + [source,terminal] ---- -$ rekor-cli search --sha <1> - +$ rekor-cli search --sha +---- +* ``: Substitute with the `sha256` digest of the image. ++ +[source,terminal] +---- <2> <3> ... ---- -<1> Substitute with the `sha256` digest of the image. -<2> The first matching universally unique identifier (UUID). -<3> The second matching UUID. +* ``: The first matching universally unique identifier (UUID). +* ``: The second matching UUID. + The search result displays UUIDs of the matching entries. One of those UUIDs holds the attestation. + diff --git a/modules/ossm-cert-manage-verify-cert.adoc b/modules/ossm-cert-manage-verify-cert.adoc index 6ed1cf68bdb7..20a9ee59cca2 100644 --- a/modules/ossm-cert-manage-verify-cert.adoc +++ b/modules/ossm-cert-manage-verify-cert.adoc @@ -80,12 +80,9 @@ $ diff -s /tmp/ca-cert.crt.txt /tmp/pod-cert-chain-ca.crt.txt You should see the following result: `Files /tmp/ca-cert.crt.txt and /tmp/pod-cert-chain-ca.crt.txt are identical.` -. Verify the certificate chain from the root certificate to the workload certificate. Replace `` with the path to your certificates. +. Verify the certificate chain from the root certificate to the workload certificate. Replace `` with the path to your certificates. After you run the command, the expected output shows `./proxy-cert-1.pem: OK`. + [source,terminal] ---- $ openssl verify -CAfile <(cat /ca-cert.pem /root-cert.pem) ./proxy-cert-1.pem ---- -+ -You should see the following result: -`./proxy-cert-1.pem: OK` \ No newline at end of file diff --git a/modules/preparing-aws-credentials-for-oadp.adoc b/modules/preparing-aws-credentials-for-oadp.adoc index cab454eace0d..859ef05ed052 100644 --- a/modules/preparing-aws-credentials-for-oadp.adoc +++ b/modules/preparing-aws-credentials-for-oadp.adoc @@ -11,6 +11,7 @@ An {aws-full} account must be prepared and configured to accept an {oadp-first} installation. .Procedure + . Create the following environment variables by running the following commands: + [IMPORTANT] @@ -20,33 +21,73 @@ Change the cluster name to match your ROSA cluster, and ensure you are logged in + [source,terminal] ---- -$ export CLUSTER_NAME=my-cluster <1> - export ROSA_CLUSTER_ID=$(rosa describe cluster -c ${CLUSTER_NAME} --output json | jq -r .id) - export REGION=$(rosa describe cluster -c ${CLUSTER_NAME} --output json | jq -r .region.id) - export OIDC_ENDPOINT=$(oc get authentication.config.openshift.io cluster -o jsonpath='{.spec.serviceAccountIssuer}' | sed 's|^https://||') - export AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) - export CLUSTER_VERSION=$(rosa describe cluster -c ${CLUSTER_NAME} -o json | jq -r .version.raw_id | cut -f -2 -d '.') - export ROLE_NAME="${CLUSTER_NAME}-openshift-oadp-aws-cloud-credentials" - export SCRATCH="/tmp/${CLUSTER_NAME}/oadp" - mkdir -p ${SCRATCH} - echo "Cluster ID: ${ROSA_CLUSTER_ID}, Region: ${REGION}, OIDC Endpoint: - ${OIDC_ENDPOINT}, AWS Account ID: ${AWS_ACCOUNT_ID}" +$ export CLUSTER_NAME=my-cluster +---- ++ +-- +* `my-cluster`: Replace `my-cluster` with your cluster name. +-- ++ +[source,terminal] +---- +$ export ROSA_CLUSTER_ID=$(rosa describe cluster -c ${CLUSTER_NAME} --output json | jq -r .id) +---- ++ +[source,terminal] +---- +$ export REGION=$(rosa describe cluster -c ${CLUSTER_NAME} --output json | jq -r .region.id) +---- ++ +[source,terminal] +---- +$ export OIDC_ENDPOINT=$(oc get authentication.config.openshift.io cluster -o jsonpath='{.spec.serviceAccountIssuer}' | sed 's|^https://||') +---- ++ +[source,terminal] +---- +$ export AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) +---- ++ +[source,terminal] +---- +$ export CLUSTER_VERSION=$(rosa describe cluster -c ${CLUSTER_NAME} -o json | jq -r .version.raw_id | cut -f -2 -d '.') +---- ++ +[source,terminal] +---- +$ export ROLE_NAME="${CLUSTER_NAME}-openshift-oadp-aws-cloud-credentials" ---- + -<1> Replace `my-cluster` with your ROSA cluster name. +[source,terminal] +---- +$ export SCRATCH="/tmp/${CLUSTER_NAME}/oadp" +---- ++ +[source,terminal] +---- +$ mkdir -p ${SCRATCH} +---- ++ +[source,terminal] +---- +$ echo "Cluster ID: ${ROSA_CLUSTER_ID}, Region: ${REGION}, OIDC Endpoint: + ${OIDC_ENDPOINT}, AWS Account ID: ${AWS_ACCOUNT_ID}" +---- . On the {aws-short} account, create an IAM policy to allow access to {aws-short} S3: - ++ .. Check to see if the policy exists by running the following command: + [source,terminal] ---- -$ POLICY_ARN=$(aws iam list-policies --query "Policies[?PolicyName=='RosaOadpVer1'].{ARN:Arn}" --output text) <1> +$ POLICY_ARN=$(aws iam list-policies --query "Policies[?PolicyName=='RosaOadpVer1'].{ARN:Arn}" --output text) ---- + -<1> Replace `RosaOadp` with your policy name. - -.. Enter the following command to create the policy JSON file and then create the policy in ROSA: +-- +* `RosaOadp`: Replace `RosaOadp` with your policy name. +-- ++ +.. Enter the following command to create the policy JSON file and then create the policy: + [NOTE] ==== @@ -56,7 +97,7 @@ If the policy ARN is not found, the command creates the policy. If the policy AR [source,terminal] ---- $ if [[ -z "${POLICY_ARN}" ]]; then - cat << EOF > ${SCRATCH}/policy.json <1> + cat << EOF > ${SCRATCH}/policy.json { "Version": "2012-10-17", "Statement": [ @@ -101,8 +142,10 @@ EOF fi ---- + -<1> `SCRATCH` is a name for a temporary directory created for the environment variables. - +-- +* `SCRATCH`: `SCRATCH` is a name for a temporary directory created for the environment variables. +-- ++ .. View the policy ARN by running the following command: + [source,terminal] @@ -110,9 +153,8 @@ EOF $ echo ${POLICY_ARN} ---- - . Create an IAM role trust policy for the cluster: - ++ .. Create the trust policy file by running the following command: + [source,terminal] @@ -137,7 +179,7 @@ $ cat < ${SCRATCH}/trust-policy.json } EOF ---- - ++ .. Create the role by running the following command: + [source,terminal] @@ -152,7 +194,7 @@ $ ROLE_ARN=$(aws iam create-role --role-name \ Key=operator_name,Value=openshift-oadp \ --query Role.Arn --output text) ---- - ++ .. View the role ARN by running the following command: + [source,terminal] diff --git a/modules/preparing-aws-sts-credentials-for-oadp.adoc b/modules/preparing-aws-sts-credentials-for-oadp.adoc index be4d0c38d049..a9870e75e77c 100644 --- a/modules/preparing-aws-sts-credentials-for-oadp.adoc +++ b/modules/preparing-aws-sts-credentials-for-oadp.adoc @@ -10,6 +10,7 @@ An {aws-full} account must be prepared and configured to accept an {oadp-first} installation. Prepare the {aws-short} credentials by using the following procedure. .Procedure + . Define the `cluster_name` environment variable by running the following command: + [source,terminal] @@ -23,17 +24,33 @@ $ export CLUSTER_NAME= <1> [source,terminal] ---- $ export CLUSTER_VERSION=$(oc get clusterversion version -o jsonpath='{.status.desired.version}{"\n"}') - -export AWS_CLUSTER_ID=$(oc get clusterversion version -o jsonpath='{.spec.clusterID}{"\n"}') - -export OIDC_ENDPOINT=$(oc get authentication.config.openshift.io cluster -o jsonpath='{.spec.serviceAccountIssuer}' | sed 's|^https://||') - -export REGION=$(oc get infrastructures cluster -o jsonpath='{.status.platformStatus.aws.region}' --allow-missing-template-keys=false || echo us-east-2) - -export AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) - -export ROLE_NAME="${CLUSTER_NAME}-openshift-oadp-aws-cloud-credentials" ---- ++ +[source,terminal] +---- +$ export AWS_CLUSTER_ID=$(oc get clusterversion version -o jsonpath='{.spec.clusterID}{"\n"}') +---- ++ +[source,terminal] +---- +$ export OIDC_ENDPOINT=$(oc get authentication.config.openshift.io cluster -o jsonpath='{.spec.serviceAccountIssuer}' | sed 's|^https://||') +---- ++ +[source,terminal] +---- +$ export REGION=$(oc get infrastructures cluster -o jsonpath='{.status.platformStatus.aws.region}' --allow-missing-template-keys=false || echo us-east-2) +---- ++ +[source,terminal] +---- +$ export AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) +---- ++ +[source,terminal] +---- +$ export ROLE_NAME="${CLUSTER_NAME}-openshift-oadp-aws-cloud-credentials" +---- + . Create a temporary directory to store all of the files by running the following command: + [source,terminal] @@ -41,6 +58,7 @@ export ROLE_NAME="${CLUSTER_NAME}-openshift-oadp-aws-cloud-credentials" $ export SCRATCH="/tmp/${CLUSTER_NAME}/oadp" mkdir -p ${SCRATCH} ---- + . Display all of the gathered details by running the following command: + [source,terminal] @@ -48,20 +66,25 @@ mkdir -p ${SCRATCH} $ echo "Cluster ID: ${AWS_CLUSTER_ID}, Region: ${REGION}, OIDC Endpoint: ${OIDC_ENDPOINT}, AWS Account ID: ${AWS_ACCOUNT_ID}" ---- -. On the {aws-short} account, create an IAM policy to allow access to {aws-short} S3: +. On the {aws-short} account, create an IAM policy to allow access to {aws-short} S3: ++ .. Check to see if the policy exists by running the following commands: + [source,terminal] ---- -$ export POLICY_NAME="OadpVer1" <1> +$ export POLICY_NAME="OadpVer1" ---- -<1> The variable can be set to any value. ++ +-- +* `POLICY_NAME`: The variable can be set to any value. +-- + [source,terminal] ---- $ POLICY_ARN=$(aws iam list-policies --query "Policies[?PolicyName=='$POLICY_NAME'].{ARN:Arn}" --output text) ---- ++ .. Enter the following command to create the policy JSON file and then create the policy: + [NOTE] @@ -113,12 +136,11 @@ EOF POLICY_ARN=$(aws iam create-policy --policy-name $POLICY_NAME \ --policy-document file:///${SCRATCH}/policy.json --query Policy.Arn \ --tags Key=openshift_version,Value=${CLUSTER_VERSION} Key=operator_namespace,Value=openshift-adp Key=operator_name,Value=oadp \ ---output text) <1> +--output text) fi ---- +* `SCRATCH`: The name for a temporary directory created for storing the files. + -<1> `SCRATCH` is a name for a temporary directory created for storing the files. - .. View the policy ARN by running the following command: + [source,terminal] @@ -127,7 +149,7 @@ $ echo ${POLICY_ARN} ---- . Create an IAM role trust policy for the cluster: - ++ .. Create the trust policy file by running the following command: + [source,terminal] @@ -152,7 +174,7 @@ $ cat < ${SCRATCH}/trust-policy.json } EOF ---- - ++ .. Create an IAM role trust policy for the cluster by running the following command: + [source,terminal] @@ -162,7 +184,7 @@ $ ROLE_ARN=$(aws iam create-role --role-name \ --assume-role-policy-document file://${SCRATCH}/trust-policy.json \ --tags Key=cluster_id,Value=${AWS_CLUSTER_ID} Key=openshift_version,Value=${CLUSTER_VERSION} Key=operator_namespace,Value=openshift-adp Key=operator_name,Value=oadp --query Role.Arn --output text) ---- - ++ .. View the role ARN by running the following command: + [source,terminal] diff --git a/modules/querying-cluster-node-journal-logs.adoc b/modules/querying-cluster-node-journal-logs.adoc index e09bcacada84..83b34cb73b7c 100644 --- a/modules/querying-cluster-node-journal-logs.adoc +++ b/modules/querying-cluster-node-journal-logs.adoc @@ -35,19 +35,23 @@ ifdef::openshift-rosa-hcp[] * Query `kubelet` `journald` unit logs from {product-title} cluster nodes. The following example queries worker nodes only: endif::openshift-rosa-hcp[] + +ifndef::openshift-rosa-hcp[] [source,terminal] ---- -ifndef::openshift-rosa-hcp[] $ oc adm node-logs --role=master -u kubelet <1> +---- endif::openshift-rosa-hcp[] ifdef::openshift-rosa-hcp[] -$ oc adm node-logs --role=worker -u kubelet <1> -endif::openshift-rosa-hcp[] +[source,terminal] +---- +$ oc adm node-logs --role=worker -u kubelet ---- -<1> Replace `kubelet` as appropriate to query other unit logs. +endif::openshift-rosa-hcp[] +* `kubelet`: Replace as appropriate to query other unit logs. ifndef::openshift-rosa-hcp[] . Collect logs from specific subdirectories under `/var/log/` on cluster nodes. ++ .. Retrieve a list of logs contained within a `/var/log/` subdirectory. The following example lists files in `/var/log/openshift-apiserver/` on all control plane nodes: + [source,terminal] diff --git a/modules/rdma-creating-persistent-naming-rules.adoc b/modules/rdma-creating-persistent-naming-rules.adoc index f62b4a3e5488..d4038734c90a 100644 --- a/modules/rdma-creating-persistent-naming-rules.adoc +++ b/modules/rdma-creating-persistent-naming-rules.adoc @@ -28,8 +28,15 @@ EOF [source,terminal] ---- $ PERSIST=`cat 70-persistent-net.rules| base64 -w 0` - +---- ++ +[source,terminal] +---- $ echo $PERSIST +---- ++ +[source,terminal] +---- U1VCU1lTVEVNPT0ibmV0IixBQ1RJT049PSJhZGQiLEFUVFJ7YWRkcmVzc309PSJiODozZjpkMjozYjo1MToyOCIsQVRUUnt0eXBlfT09IjEiLE5BTUU9ImliczJmMCIKU1VCU1lTVEVNPT0ibmV0IixBQ1RJT049PSJhZGQiLEFUVFJ7YWRkcmVzc309PSJiODozZjpkMjozYjo1MToyOSIsQVRUUnt0eXBlfT09IjEiLE5BTUU9ImVuczhmMG5wMCIKU1VCU1lTVEVNPT0ibmV0IixBQ1RJT049PSJhZGQiLEFUVFJ7YWRkcmVzc309PSJiODozZjpkMjpmMDozNjpkMCIsQVRUUnt0eXBlfT09IjEiLE5BTUU9ImliczJmMCIKU1VCU1lTVEVNPT0ibmV0IixBQ1RJT049PSJhZGQiLEFUVFJ7YWRkcmVzc309PSJiODozZjpkMjpmMDozNjpkMSIsQVRUUnt0eXBlfT09IjEiLE5BTUU9ImVuczhmMG5wMCIK ---- @@ -61,18 +68,12 @@ spec: path: /etc/udev/rules.d/70-persistent-net.rules ---- -. Create the machine configuration on the cluster by running the following command: +. Create the machine configuration on the cluster by running the following command. After running the command, the expected output shows `machineconfig.machineconfiguration.openshift.io/99-machine-config-udev-network created`. + [source,terminal] ---- $ oc create -f 99-machine-config-udev-network.yaml ---- -+ -.Example output -[source,terminal] ----- -machineconfig.machineconfiguration.openshift.io/99-machine-config-udev-network created ----- . Use the `get mcp` command to view the machine configuration status: + diff --git a/modules/rhcos-enabling-multipath.adoc b/modules/rhcos-enabling-multipath.adoc index b1cd2bee2cea..9e00e26b7216 100644 --- a/modules/rhcos-enabling-multipath.adoc +++ b/modules/rhcos-enabling-multipath.adoc @@ -45,53 +45,66 @@ The following procedure enables multipath at installation time and appends kerne ---- $ mpathconf --enable && systemctl start multipathd.service ---- -** Optional: If booting the PXE or ISO, you can instead enable multipath by adding `rd.multipath=default` from the kernel command line. ++ +.. Optional: If booting the PXE or ISO, you can instead enable multipath by adding `rd.multipath=default` from the kernel command line. . Append the kernel arguments by invoking the `coreos-installer` program: + * If there is only one multipath device connected to the machine, it should be available at path `/dev/mapper/mpatha`. For example: + +ifndef::restricted[] [source,terminal] ---- -ifndef::restricted[] -$ coreos-installer install /dev/mapper/mpatha \// <1> +$ coreos-installer install /dev/mapper/mpatha \// --ignition-url=http://host/worker.ign \ --append-karg rd.multipath=default \ --append-karg root=/dev/disk/by-label/dm-mpath-root \ --append-karg rw +---- endif::[] ifdef::restricted[] -$ coreos-installer install /dev/mapper/mpatha \// <1> +[source,terminal] +---- +$ coreos-installer install /dev/mapper/mpatha \// --ignition-url=http://host/worker.ign \ --append-karg rd.multipath=default \ --append-karg root=/dev/disk/by-label/dm-mpath-root \ --append-karg rw \ --offline -endif::[] ---- -<1> Indicates the path of the single multipathed device. +endif::[] ++ +-- +* `/dev/mapper/mpatha`: Indicates the path of the single multipathed device. +-- + * If there are multiple multipath devices connected to the machine, or to be more explicit, instead of using `/dev/mapper/mpatha`, it is recommended to use the World Wide Name (WWN) symlink available in `/dev/disk/by-id`. For example: + +ifndef::restricted[] [source,terminal] ---- -ifndef::restricted[] $ coreos-installer install /dev/disk/by-id/wwn- \// <1> --ignition-url=http://host/worker.ign \ --append-karg rd.multipath=default \ --append-karg root=/dev/disk/by-label/dm-mpath-root \ --append-karg rw +---- endif::[] ifdef::restricted[] +[source,terminal] +---- $ coreos-installer install /dev/disk/by-id/wwn- \// <1> --ignition-url=http://host/worker.ign \ --append-karg rd.multipath=default \ --append-karg root=/dev/disk/by-label/dm-mpath-root \ --append-karg rw \ --offline -endif::[] ---- -<1> Indicates the WWN ID of the target multipathed device. For example, `0xx194e957fcedb4841`. +endif::[] ++ +-- +* ``: Indicates the WWN ID of the target multipathed device. For example, `0xx194e957fcedb4841`. +-- + This symlink can also be used as the `coreos.inst.install_dev` kernel argument when using special `coreos.inst.*` arguments to direct the live installer. For more information, see "Installing {op-system} and starting the {product-title} bootstrap process". diff --git a/modules/rosa-creating-node-tuning.adoc b/modules/rosa-creating-node-tuning.adoc index 8f7bfb3890a6..efc1125b80af 100644 --- a/modules/rosa-creating-node-tuning.adoc +++ b/modules/rosa-creating-node-tuning.adoc @@ -25,10 +25,13 @@ $ rosa create tuning-config -c --name --spec-path + You must supply the path to the `spec.json` file or the command returns an error. + -.Example output [source,terminal] ---- $ I: Tuning config 'sample-tuning' has been created on cluster 'cluster-example'. +---- ++ +[source,terminal] +---- $ I: To view all tuning configs, run 'rosa list tuning-configs -c cluster-example' ---- diff --git a/modules/rosa-quickstart-instructions.adoc b/modules/rosa-quickstart-instructions.adoc index c2905c737377..5eb8bb83d95c 100644 --- a/modules/rosa-quickstart-instructions.adoc +++ b/modules/rosa-quickstart-instructions.adoc @@ -12,16 +12,28 @@ If you have already created your first cluster and users, this list can serve as ---- ## Configures your AWS account and ensures everything is setup correctly $ rosa init +---- +[source,terminal] +---- ## Starts the cluster creation process (~30-40minutes) $ rosa create cluster --cluster-name= +---- +[source,terminal] +---- ## Connect your IDP to your cluster $ rosa create idp --cluster= --interactive +---- +[source,terminal] +---- ## Promotes a user from your IDP to dedicated-admin level $ rosa grant user dedicated-admin --user= --cluster= +---- +[source,terminal] +---- ## Checks if your install is ready (look for State: Ready), ## and provides your Console URL to login to the web console. $ rosa describe cluster --cluster= diff --git a/modules/rosa-troubleshooting-awsinsufficientpermission-failure-deployment.adoc b/modules/rosa-troubleshooting-awsinsufficientpermission-failure-deployment.adoc index 9297216bd62f..875a6c815d03 100644 --- a/modules/rosa-troubleshooting-awsinsufficientpermission-failure-deployment.adoc +++ b/modules/rosa-troubleshooting-awsinsufficientpermission-failure-deployment.adoc @@ -23,14 +23,26 @@ Ensure that the prerequisites are met by reviewing _Detailed requirements for de include::snippets/rosa-sts.adoc[] . If needed, you can re-create the permissions and policies by using the `-f` flag: + -.Example output [source,terminal] ---- $ rosa create ocm-role -f +---- ++ +[source,terminal] +---- $ rosa create user-role -f +---- ++ +[source,terminal] +---- $ rosa create account-roles -f +---- ++ +[source,terminal] +---- $ rosa create operator-roles -c ${CLUSTER} -f ---- + . Validate all the prerequisites and attempt cluster reinstallation. diff --git a/modules/update-release-images.adoc b/modules/update-release-images.adoc index 8e159b0d1f47..aa6edf8349f0 100644 --- a/modules/update-release-images.adoc +++ b/modules/update-release-images.adoc @@ -2,7 +2,7 @@ // // * updating/understanding_updates/how-updates-work.adoc -:_mod-docs-content-type: CONCEPT +:_mod-docs-content-type: REFERENCE [id="update-release-images_{context}"] = Release images @@ -16,13 +16,19 @@ You can inspect the content of a specific release image by running the following $ oc adm release extract ---- -.Example output [source,terminal] ---- $ oc adm release extract quay.io/openshift-release-dev/ocp-release:4.12.6-x86_64 Extracted release payload from digest sha256:800d1e39d145664975a3bb7cbc6e674fbf78e3c45b5dde9ff2c5a11a8690c87b created at 2023-03-01T12:46:29Z +---- +[source,terminal] +---- $ ls +---- + +[source,terminal] +---- 0000_03_authorization-openshift_01_rolebindingrestriction.crd.yaml 0000_03_config-operator_01_proxy.crd.yaml 0000_03_marketplace-operator_01_operatorhub.crd.yaml diff --git a/modules/virt-configuring-vm-with-node-exporter-service.adoc b/modules/virt-configuring-vm-with-node-exporter-service.adoc index c722c7f74c43..956e246e5e74 100644 --- a/modules/virt-configuring-vm-with-node-exporter-service.adoc +++ b/modules/virt-configuring-vm-with-node-exporter-service.adoc @@ -56,10 +56,15 @@ WantedBy=multi-user.target [source,terminal] ---- $ sudo systemctl enable node_exporter.service +---- ++ +[source,terminal] +---- $ sudo systemctl start node_exporter.service ---- .Verification + * Verify that the node-exporter agent is reporting metrics from the virtual machine. + [source,terminal]