From 63cf5a8f2fef029a9b76c42d1669f3968505327c Mon Sep 17 00:00:00 2001 From: Gabriel McGoldrick Date: Thu, 4 May 2023 12:08:19 +0100 Subject: [PATCH] SRVCOM-2431 handle conditionals for distros --- _attributes/common-attributes.adoc | 5 +- _topic_maps/_topic_map.yml | 15 ++- about/about-serverless.adoc | 4 +- about/serverless-release-notes.adoc | 19 +-- about/serverless-support.adoc | 9 +- .../serverless-functions-getting-started.adoc | 10 +- functions/serverless-functions-setup.adoc | 37 ++---- install/configuring-serverless-functions.adoc | 36 +----- install/install-serverless-operator.adoc | 12 +- install/installing-kn.adoc | 6 +- install/preparing-serverless-install.adoc | 48 ++++---- install/serverless-kafka-admin.adoc | 15 +-- integrations/gpu-resources.adoc | 6 +- .../serverless-autoscaling-developer.adoc | 12 +- .../config-applications/pvcs-for-serving.adoc | 6 +- .../serverless-config-tls.adoc | 7 +- .../routing-overview.adoc | 7 +- .../using-http2-gRPC.adoc | 2 +- .../serverless-applications.adoc | 5 +- modules/distr-tracing-product-overview.adoc | 30 +++++ ...rving-controller-custom-certs-secrets.adoc | 8 +- ...ng-operators-from-a-cluster-using-cli.adoc | 59 ---------- ...tors-from-a-cluster-using-web-console.adoc | 36 ------ modules/olm-refresh-subs.adoc | 109 ------------------ modules/serverless-admin-init-containers.adoc | 8 +- modules/serverless-channel-default.adoc | 2 +- modules/serverless-cluster-sizing-req.adoc | 4 +- .../serverless-config-replicas-eventing.adoc | 8 +- modules/serverless-config-replicas-kafka.adoc | 14 +-- .../serverless-config-replicas-serving.adoc | 8 +- .../serverless-create-kafka-channel-yaml.adoc | 2 +- ...erverless-creating-a-kafka-event-sink.adoc | 10 +- ...ess-creating-broker-admin-web-console.adoc | 8 +- .../serverless-creating-broker-labeling.adoc | 4 +- ...ss-creating-channel-admin-web-console.adoc | 10 +- ...eating-event-source-admin-web-console.adoc | 8 +- ...eating-subscription-admin-web-console.adoc | 8 +- .../serverless-creating-subscriptions-kn.adoc | 2 +- ...ss-creating-trigger-admin-web-console.adoc | 8 +- modules/serverless-deleting-crds.adoc | 8 +- ...erverless-deprecated-removed-features.adoc | 12 +- .../serverless-domain-mapping-odc-admin.adoc | 11 +- modules/serverless-enable-scale-to-zero.adoc | 8 +- ...ss-event-delivery-component-behaviors.adoc | 2 +- modules/serverless-gpu-resources-kn.adoc | 4 +- modules/serverless-install-cli.adoc | 11 +- ...rverless-install-eventing-web-console.adoc | 8 +- modules/serverless-install-eventing-yaml.adoc | 8 +- modules/serverless-install-kafka-odc.adoc | 19 +-- ...erverless-install-serving-web-console.adoc | 8 +- modules/serverless-install-serving-yaml.adoc | 8 +- modules/serverless-install-web-console.adoc | 13 +-- ...talling-cli-linux-rpm-package-manager.adoc | 2 - modules/serverless-installing-cli-linux.adoc | 2 - modules/serverless-jaeger-config.adoc | 10 +- .../serverless-kafka-broker-configmap.adoc | 2 +- ...less-kafka-broker-sasl-default-config.adoc | 2 +- ...rless-kafka-broker-tls-default-config.adoc | 4 +- ...verless-kafka-broker-with-kafka-topic.adoc | 2 +- modules/serverless-kafka-broker.adoc | 4 +- modules/serverless-kafka-developer.adoc | 8 +- modules/serverless-kafka-event-delivery.adoc | 4 +- modules/serverless-kafka-sasl-channels.adoc | 2 +- modules/serverless-kafka-sasl-source.adoc | 2 +- ...serverless-kafka-sink-security-config.adoc | 4 +- modules/serverless-kafka-sink.adoc | 4 +- modules/serverless-kafka-source-kn.adoc | 2 +- modules/serverless-kafka-source-odc.adoc | 4 +- modules/serverless-kafka-source-yaml.adoc | 2 +- modules/serverless-kafka-tls-channels.adoc | 4 +- modules/serverless-list-source-types-kn.adoc | 6 +- ...verless-ossm-enabling-serving-metrics.adoc | 10 +- modules/serverless-ossm-external-certs.adoc | 10 +- ...rless-ossm-secret-filtering-net-istio.adoc | 10 +- ...ess-ossm-secret-filtering-net-kourier.adoc | 10 +- .../serverless-ossm-setup-with-kourier.adoc | 10 +- modules/serverless-ossm-setup.adoc | 10 +- modules/serverless-ossm-v1x-jwt.adoc | 4 +- modules/serverless-ossm-v2x-jwt.adoc | 4 +- modules/serverless-rn-1-24-0.adoc | 4 +- modules/serverless-rn-1-28-0.adoc | 2 +- ...serverless-scale-to-zero-grace-period.adoc | 8 +- modules/serverless-tech-preview-features.adoc | 10 +- ...verless-uninstalling-knative-eventing.adoc | 8 +- ...rverless-uninstalling-knative-serving.adoc | 8 +- modules/support-knowledgebase-about.adoc | 12 ++ modules/support-knowledgebase-search.adoc | 32 +++++ modules/support-submitting-a-case.adoc | 86 ++++++++++++++ .../serverless-admin-metrics.adoc | 22 +--- .../cluster-logging-serverless.adoc | 2 + .../serverless-developer-metrics.adoc | 21 +--- .../serverless-tracing-open-telemetry.adoc | 3 - observability/tracing/serverless-tracing.adoc | 11 +- removing/removing-serverless-operator.adoc | 10 +- 94 files changed, 382 insertions(+), 722 deletions(-) create mode 100644 modules/distr-tracing-product-overview.adoc delete mode 100644 modules/olm-deleting-operators-from-a-cluster-using-cli.adoc delete mode 100644 modules/olm-deleting-operators-from-a-cluster-using-web-console.adoc delete mode 100644 modules/olm-refresh-subs.adoc create mode 100644 modules/support-knowledgebase-about.adoc create mode 100644 modules/support-knowledgebase-search.adoc create mode 100644 modules/support-submitting-a-case.adoc diff --git a/_attributes/common-attributes.adoc b/_attributes/common-attributes.adoc index 736ec00ffbac..949fc8c017e1 100644 --- a/_attributes/common-attributes.adoc +++ b/_attributes/common-attributes.adoc @@ -156,4 +156,7 @@ :3no-caps: Three-node OpenShift -:ocp-product-title: OpenShift Container Platform \ No newline at end of file +:ocp-product-title: OpenShift Container Platform +:dedicated-product-title: OpenShift Dedicated +:aro-product-title: Azure Red Hat OpenShift +:rosa-product-title: Red Hat OpenShift Service on AWS diff --git a/_topic_maps/_topic_map.yml b/_topic_maps/_topic_map.yml index f4a56d1b06c7..923e22e41da7 100644 --- a/_topic_maps/_topic_map.yml +++ b/_topic_maps/_topic_map.yml @@ -31,7 +31,7 @@ Topics: File: installing-knative-serving - Name: Installing Knative Eventing File: installing-knative-eventing -- Name: Configuring Knative Kafka +- Name: Configuring Knative for Apache Kafka File: serverless-kafka-admin - Name: Configuring Serverless Functions File: configuring-serverless-functions @@ -165,7 +165,7 @@ Topics: File: serverless-apiserversource - Name: Creating a ping source File: serverless-pingsource - - Name: Kafka source + - Name: Source for Apache Kafka File: serverless-kafka-developer-source - Name: Custom event sources File: serverless-custom-event-sources @@ -176,7 +176,9 @@ Topics: Topics: - Name: Event sinks overview File: serverless-event-sinks - - Name: Kafka sink + - Name: Creating event sinks + File: serverless-creating-sinks + - Name: Sink for Apache Kafka File: serverless-kafka-developer-sink - Name: Brokers Dir: brokers @@ -191,7 +193,7 @@ Topics: File: serverless-broker-backing-channel-default - Name: Configuring the default broker class File: serverless-global-config-broker-class-default - - Name: Kafka broker + - Name: Knative broker for Apache Kafka File: kafka-broker - Name: Managing brokers File: serverless-using-brokers-managing-brokers @@ -227,7 +229,7 @@ Topics: File: connecting-channels-sinks - Name: Default channel implementation File: serverless-channel-default - - Name: Security configuration for Knative Kafka channels + - Name: Security configuration for channels File: serverless-kafka-admin-security-channels - Name: Subscriptions Dir: subscriptions @@ -367,11 +369,8 @@ Topics: File: serverless-tracing - Name: Using Red Hat OpenShift distributed tracing File: serverless-tracing-open-telemetry - Distros: openshift-enterprise - Name: Using Jaeger distributed tracing File: serverless-tracing-jaeger - - --- # Integrations Name: Integrations diff --git a/about/about-serverless.adoc b/about/about-serverless.adoc index 0ace7dce102e..96cab5e6b81a 100644 --- a/about/about-serverless.adoc +++ b/about/about-serverless.adoc @@ -23,6 +23,6 @@ For additional information about the {ServerlessProductName} life cycle and supp [role="_additional-resources"] == Additional resources -* link:https://docs.openshift.com/container-platform/latest/operators/understanding/crds/crd-extending-api-with-crds.adoc#crd-extending-api-with-crds[Extending the Kubernetes API with custom resource definitions] -* link:https://docs.openshift.com/container-platform/latest/operators/understanding/crds/crd-managing-resources-from-crds.adoc#crd-managing-resources-from-crds[Managing resources from custom resource definitions] +* link:https://docs.openshift.com/container-platform/latest/operators/understanding/crds/crd-extending-api-with-crds.html#crd-extending-api-with-crds[Extending the Kubernetes API with custom resource definitions] +* link:https://docs.openshift.com/container-platform/latest/operators/understanding/crds/crd-managing-resources-from-crds.html#crd-managing-resources-from-crds[Managing resources from custom resource definitions] * link:https://www.redhat.com/en/topics/cloud-native-apps/what-is-serverless[What is serverless?] diff --git a/about/serverless-release-notes.adoc b/about/serverless-release-notes.adoc index 0bc7e887dd35..6bd1a36cde1c 100644 --- a/about/serverless-release-notes.adoc +++ b/about/serverless-release-notes.adoc @@ -43,30 +43,23 @@ include::modules/serverless-rn-1-26-0.adoc[leveloffset=+1] // OCP + OSD + ROSA include::modules/serverless-rn-1-25-0.adoc[leveloffset=+1] // 1.25.0 additional resources, OCP docs -ifdef::openshift-enterprise[] + [role="_additional-resources"] -.Additional resources -* xref:../serverless/knative-serving/config-applications/serverless-config-tls.adoc#serverless-config-tls[Configuring TLS authentication] -endif::[] +.Additional resources for {ocp-product-title} +* xref:../knative-serving/config-applications/serverless-config-tls.adoc#serverless-config-tls[Configuring TLS authentication] include::modules/serverless-rn-1-24-0.adoc[leveloffset=+1] include::modules/serverless-rn-1-23-0.adoc[leveloffset=+1] // 1.23.0 additional resources, OCP docs -ifdef::openshift-enterprise[] + [role="_additional-resources"] -.Additional resources -* link:https://docs.openshift.com/container-platform/latest/openshift_images/using_images/using-s21-images.adoc#using-s21-images[Source-to-Image] -endif::[] +.Additional resources for {ocp-product-title} +* link:https://docs.openshift.com/container-platform/latest/openshift_images/using_images/using-s21-images.html#using-s21-images[Source-to-Image] -// OSD + OCP -ifdef::openshift-enterprise,openshift-dedicated[] include::modules/serverless-rn-1-22-0.adoc[leveloffset=+1] include::modules/serverless-rn-1-21-0.adoc[leveloffset=+1] include::modules/serverless-rn-1-20-0.adoc[leveloffset=+1] -endif::[] -ifdef::openshift-enterprise[] include::modules/serverless-rn-1-19-0.adoc[leveloffset=+1] include::modules/serverless-rn-1-18-0.adoc[leveloffset=+1] -endif::[] diff --git a/about/serverless-support.adoc b/about/serverless-support.adoc index 7cd1a5e5a7f4..56dfc404b5f9 100644 --- a/about/serverless-support.adoc +++ b/about/serverless-support.adoc @@ -12,13 +12,18 @@ If you have a suggestion for improving this guide or have found an error, you ca // TODO: Update once https://issues.redhat.com/browse/OSDOCS-3730 is done to update this to Jira // Generic help topics -ifdef::openshift-enterprise,openshift-dedicated[] +[NOTE] +==== +The following section on defining cluster size requirements applies to these distributions: + +* {ocp-product-title} +* {dedicated-product-title} +==== include::modules/support-knowledgebase-about.adoc[leveloffset=+1] include::modules/support-knowledgebase-search.adoc[leveloffset=+1] include::modules/support-submitting-a-case.adoc[leveloffset=+1] -endif::openshift-enterprise,openshift-dedicated[] [id="serverless-support-gather-info"] == Gathering diagnostic information for support diff --git a/functions/serverless-functions-getting-started.adoc b/functions/serverless-functions-getting-started.adoc index a5b07fce0e49..bbe243c83f88 100644 --- a/functions/serverless-functions-getting-started.adoc +++ b/functions/serverless-functions-getting-started.adoc @@ -20,16 +20,16 @@ include::modules/serverless-deploy-func-kn.adoc[leveloffset=+1] include::modules/serverless-kn-func-invoke.adoc[leveloffset=+1] include::modules/serverless-kn-func-delete.adoc[leveloffset=+1] -ifdef::openshift-enterprise[] + [id="additional-resources_serverless-functions-getting-started"] [role="_additional-resources"] -== Additional resources -* link:https://docs.openshift.com/container-platform/latest/registry/securing-exposing-registry.adoc#securing-exposing-registry[Exposing a default registry manually] +== Additional resources for {ocp-product-title} +* link:https://docs.openshift.com/container-platform/latest/registry/securing-exposing-registry.html#securing-exposing-registry[Exposing a default registry manually] * link:https://plugins.jetbrains.com/plugin/16476-knative\--serverless-functions-by-red-hat[Marketplace page for the Intellij Knative plugin] * link:https://marketplace.visualstudio.com/items?itemName=redhat.vscode-knative&utm_source=VSCode.pro&utm_campaign=AhmadAwais[Marketplace page for the Visual Studio Code Knative plugin] -* link:https://docs.openshift.com/container-platform/latest/applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-the-developer-perspective[Creating applications using the Developer perspective] +* link:https://docs.openshift.com/container-platform/latest/applications/creating_applications/odc-creating-applications-using-developer-perspective.html#odc-creating-applications-using-the-developer-perspective[Creating applications using the Developer perspective] // This Additional resource applies only to OCP, but not to OSD nor ROSA. -endif::[] + [id="next-steps_serverless-functions-getting-started"] == Next steps diff --git a/functions/serverless-functions-setup.adoc b/functions/serverless-functions-setup.adoc index 3a00ef43a54c..3d773872b98d 100644 --- a/functions/serverless-functions-setup.adoc +++ b/functions/serverless-functions-setup.adoc @@ -20,14 +20,8 @@ To enable the use of {FunctionsProductName} on your cluster, you must complete t Functions are deployed as a Knative service. If you want to use event-driven architecture with your functions, you must also install Knative Eventing. ==== -ifdef::openshift-enterprise[] -* You have the link:https://docs.openshift.com/container-platform/latest/cli_reference/openshift_cli/getting-started-cli.adoc#cli-getting-started[`oc` CLI] installed. -endif::[] -// need to wait til CLI docs are added to OSD and ROSA for this link to work -// TODO: remove these conditionals once this is available -ifdef::openshift-dedicated,openshift-rosa[] -* You have the `oc` CLI installed. -endif::[] + +* You have the link:https://docs.openshift.com/container-platform/latest/cli_reference/openshift_cli/getting-started-cli.html#cli-getting-started[`oc` CLI] installed. * You have the xref:../install/installing-kn.adoc#installing-kn[Knative (`kn`) CLI] installed. Installing the Knative CLI enables the use of `kn func` commands which you can use to create and manage functions. @@ -35,23 +29,10 @@ endif::[] * You have access to an available image registry, such as the OpenShift Container Registry. -ifdef::openshift-enterprise[] -* If you are using link:https://quay.io/[Quay.io] as the image registry, you must ensure that either the repository is not private, or that you have followed the {ocp-product-title} documentation on link:https://docs.openshift.com/container-platform/latest/openshift_images/managing_images/using-image-pull-secrets.adoc#images-allow-pods-to-reference-images-from-secure-registries_using-image-pull-secrets[Allowing pods to reference images from other secured registries]. -endif::[] -// need to wait til images docs are added to OSD and ROSA for this link to work -// TODO: remove these conditionals once this is available -ifdef::openshift-dedicated,openshift-rosa[] -* If you are using link:https://quay.io/[Quay.io] as the image registry, you must ensure that either the repository is not private, or that you have allowed pods on your cluster to reference images from other secured registries. -endif::[] - -ifdef::openshift-enterprise[] -* If you are using the OpenShift Container Registry, a cluster administrator must link:https://docs.openshift.com/container-platform/latest/registry/securing-exposing-registry.adoc#securing-exposing-registry[expose the registry]. -endif::[] -// need to wait til registry docs are added to OSD and ROSA for this link to work -// TODO: remove these conditionals once this is available -ifdef::openshift-dedicated,openshift-rosa[] -* If you are using the OpenShift Container Registry, a cluster or dedicated administrator must expose the registry. -endif::[] +* If you are using link:https://quay.io/[Quay.io] as the image registry, you must ensure that either the repository is not private, or that you have followed the {ocp-product-title} documentation on link:https://docs.openshift.com/container-platform/latest/openshift_images/managing_images/using-image-pull-secrets.html#images-allow-pods-to-reference-images-from-secure-registries_using-image-pull-secrets[Allowing pods to reference images from other secured registries]. + + +* If you are using the OpenShift Container Registry, a cluster administrator must link:https://docs.openshift.com/container-platform/latest/registry/securing-exposing-registry.html#securing-exposing-registry[expose the registry]. include::modules/serverless-functions-podman.adoc[leveloffset=+1] include::modules/serverless-functions-podman-macos.adoc[leveloffset=+1] @@ -59,10 +40,6 @@ include::modules/serverless-functions-podman-macos.adoc[leveloffset=+1] [id="next-steps_serverless-functions-setup"] == Next steps -ifdef::openshift-enterprise[] -* For more information about Docker Container Engine or Podman, see link:https://docs.openshift.com/container-platform/latest/architecture/understanding-development.adoc#container-build-tool-options[Container build tool options]. -endif::[] -// need to wait til build tool docs are added to OSD and ROSA for this link to work -// TODO: remove these conditionals once this is available +* For more information about Docker Container Engine or Podman, see link:https://docs.openshift.com/container-platform/latest/architecture/understanding-development.html#container-build-tool-options[Container build tool options]. * See xref:../functions/serverless-functions-getting-started.adoc#serverless-functions-getting-started[Getting started with functions]. diff --git a/install/configuring-serverless-functions.adoc b/install/configuring-serverless-functions.adoc index cf1b33614452..dc62cbb59973 100644 --- a/install/configuring-serverless-functions.adoc +++ b/install/configuring-serverless-functions.adoc @@ -20,14 +20,7 @@ To enable the use of {FunctionsProductName} on your cluster, you must complete t Functions are deployed as a Knative service. If you want to use event-driven architecture with your functions, you must also install Knative Eventing. ==== -ifdef::openshift-enterprise[] -* You have the link:https://docs.openshift.com/container-platform/latest/cli_reference/openshift_cli/getting-started-cli.adoc#cli-getting-started[`oc` CLI] installed. -endif::[] -// need to wait til CLI docs are added to OSD and ROSA for this link to work -// TODO: remove these conditionals once this is available -ifdef::openshift-dedicated,openshift-rosa[] -* You have the `oc` CLI installed. -endif::[] +* You have the link:https://docs.openshift.com/container-platform/latest/cli_reference/openshift_cli/getting-started-cli.html#cli-getting-started[`oc` CLI] installed. * You have the xref:../install/installing-kn.adoc#installing-kn[Knative (`kn`) CLI] installed. Installing the Knative CLI enables the use of `kn func` commands which you can use to create and manage functions. @@ -35,23 +28,10 @@ endif::[] * You have access to an available image registry, such as the OpenShift Container Registry. -ifdef::openshift-enterprise[] -* If you are using link:https://quay.io/[Quay.io] as the image registry, you must ensure that either the repository is not private, or that you have followed the {ocp-product-title} documentation on link:https://docs.openshift.com/container-platform/latest/openshift_images/managing_images/using-image-pull-secrets.adoc#images-allow-pods-to-reference-images-from-secure-registries_using-image-pull-secrets[Allowing pods to reference images from other secured registries]. -endif::[] -// need to wait til images docs are added to OSD and ROSA for this link to work -// TODO: remove these conditionals once this is available -ifdef::openshift-dedicated,openshift-rosa[] -* If you are using link:https://quay.io/[Quay.io] as the image registry, you must ensure that either the repository is not private, or that you have allowed pods on your cluster to reference images from other secured registries. -endif::[] - -ifdef::openshift-enterprise[] -* If you are using the OpenShift Container Registry, a cluster administrator must link:https://docs.openshift.com/container-platform/latest/registry/securing-exposing-registry.adoc#securing-exposing-registry[expose the registry]. -endif::[] -// need to wait til registry docs are added to OSD and ROSA for this link to work -// TODO: remove these conditionals once this is available -ifdef::openshift-dedicated,openshift-rosa[] -* If you are using the OpenShift Container Registry, a cluster or dedicated administrator must expose the registry. -endif::[] +* If you are using link:https://quay.io/[Quay.io] as the image registry, you must ensure that either the repository is not private, or that you have followed the {ocp-product-title} documentation on link:https://docs.openshift.com/container-platform/latest/openshift_images/managing_images/using-image-pull-secrets.html#images-allow-pods-to-reference-images-from-secure-registries_using-image-pull-secrets[Allowing pods to reference images from other secured registries]. + + +* If you are using the OpenShift Container Registry, a cluster administrator must link:https://docs.openshift.com/container-platform/latest/registry/securing-exposing-registry.html#securing-exposing-registry[expose the registry]. include::modules/serverless-functions-podman.adoc[leveloffset=+1] include::modules/serverless-functions-podman-macos.adoc[leveloffset=+1] @@ -59,10 +39,6 @@ include::modules/serverless-functions-podman-macos.adoc[leveloffset=+1] [id="next-steps_configuring-serverless-functions"] == Next steps -ifdef::openshift-enterprise[] -* For more information about Docker Container Engine or Podman, see link:https://docs.openshift.com/container-platform/latest/architecture/understanding-development.adoc#container-build-tool-options[Container build tool options]. -endif::[] -// need to wait til build tool docs are added to OSD and ROSA for this link to work -// TODO: remove these conditionals once this is available +* For more information about Docker Container Engine or Podman, see link:https://docs.openshift.com/container-platform/latest/architecture/understanding-development.html#container-build-tool-options[Container build tool options]. * See xref:../functions/serverless-functions-getting-started.adoc#serverless-functions-getting-started[Getting started with functions]. diff --git a/install/install-serverless-operator.adoc b/install/install-serverless-operator.adoc index 18b37dbf8f1f..6610a6dea57d 100644 --- a/install/install-serverless-operator.adoc +++ b/install/install-serverless-operator.adoc @@ -33,16 +33,12 @@ Knative has multiple config maps that are named with the prefix `config-`. All K The `spec.config` in the Knative custom resources have one `` entry for each config map, named `config-`, with a value which is be used for the config map `data`. - - -ifdef::openshift-enterprise[] [id="additional-resources_knative-serving-CR-config"] [role="_additional-resources"] -== Additional resources -* link:https://docs.openshift.com/container-platform/latest/operators/understanding/crds/crd-managing-resources-from-crds.adoc[Managing resources from custom resource definitions] -* link:https://docs.openshift.com/container-platform/latest/storage/understanding-persistent-storage.adoc#understanding-persistent-storage[Understanding persistent storage] -* link:https://docs.openshift.com/container-platform/latest/networking/configuring-a-custom-pki.adoc[Configuring a custom PKI] -endif::[] +== Additional resources for {ocp-product-title} +* link:https://docs.openshift.com/container-platform/latest/operators/understanding/crds/crd-managing-resources-from-crds.html[Managing resources from custom resource definitions] +* link:https://docs.openshift.com/container-platform/latest/storage/understanding-persistent-storage.html#understanding-persistent-storage[Understanding persistent storage] +* link:https://docs.openshift.com/container-platform/latest/networking/configuring-a-custom-pki.html[Configuring a custom PKI] [id="next-steps_install-serverless-operator"] == Next steps diff --git a/install/installing-kn.adoc b/install/installing-kn.adoc index a47d4d040c96..9df98aa9f62b 100644 --- a/install/installing-kn.adoc +++ b/install/installing-kn.adoc @@ -7,11 +7,7 @@ toc::[] The Knative (`kn`) CLI does not have its own login mechanism. To log in to the cluster, you must install the OpenShift CLI (`oc`) and use the `oc login` command. Installation options for the CLIs may vary depending on your operating system. -ifdef::openshift-enterprise[] -For more information on installing the OpenShift CLI (`oc`) for your operating system and logging in with `oc`, see the link:https://docs.openshift.com/container-platform/latest/cli_reference/openshift_cli/getting-started-cli.adoc#cli-getting-started[OpenShift CLI getting started] documentation. -endif::[] -// need to wait til CLI docs are added to OSD and ROSA for this link to work -// TODO: remove this conditional once this is available +For more information on installing the OpenShift CLI (`oc`) for your operating system and logging in with `oc`, see the link:https://docs.openshift.com/container-platform/latest/cli_reference/openshift_cli/getting-started-cli.html#cli-getting-started[OpenShift CLI getting started] documentation. {ServerlessProductName} cannot be installed using the Knative (`kn`) CLI. A cluster administrator must install the {ServerlessOperatorName} and set up the Knative components, as described in the xref:../install/install-serverless-operator.adoc#install-serverless-operator[Installing the {ServerlessOperatorName}] documentation. diff --git a/install/preparing-serverless-install.adoc b/install/preparing-serverless-install.adoc index 2b9d21be82be..9b9e9b208b9b 100644 --- a/install/preparing-serverless-install.adoc +++ b/install/preparing-serverless-install.adoc @@ -9,58 +9,62 @@ toc::[] Read the following information about supported configurations and prerequisites before you install {ServerlessProductName}. // OCP specific docs -ifdef::openshift-enterprise[] -[id="install-serverless-operator-before-you-begin"] +For {ocp-product-title}: * {ServerlessProductName} is supported for installation in a restricted network environment. * {ServerlessProductName} currently cannot be used in a multi-tenant configuration on a single cluster. -endif::[] + [id="about-serverless-supported-configs"] == Supported configurations The set of supported features, configurations, and integrations for {ServerlessProductName}, current and past versions, are available at the link:https://access.redhat.com/articles/4912821[Supported Configurations page]. -ifdef::openshift-enterprise[] + [id="about-serverless-scalability-performance"] -== Scalability and performance +== Scalability and performance on {ocp-product-title} {ServerlessProductName} has been tested with a configuration of 3 main nodes and 3 worker nodes, each of which has 64 CPUs, 457 GB of memory, and 394 GB of storage each. -The maximum number of Knative services that can be created using this configuration is 3,000. This corresponds to the link:https://docs.openshift.com/container-platform/latest/scalability_and_performance/planning-your-environment-according-to-object-maximums.adoc#cluster-maximums-major-releases_object-limits[{ocp-product-title} Kubernetes services limit of 10,000], since 1 Knative service creates 3 Kubernetes services. +The maximum number of Knative services that can be created using this configuration is 3,000. This corresponds to the link:https://docs.openshift.com/container-platform/latest/scalability_and_performance/planning-your-environment-according-to-object-maximums.html#cluster-maximums-major-releases_object-limits[{ocp-product-title} Kubernetes services limit of 10,000], since 1 Knative service creates 3 Kubernetes services. The average scale from zero response time was approximately 3.4 seconds, with a maximum response time of 8 seconds, and a 99.9th percentile of 4.5 seconds for a simple Quarkus application. These times might vary depending on the application and the runtime of the application. -endif::[] + // OCP specific docs -ifdef::openshift-enterprise[] + [id="install-serverless-operator-before-you-begin"] +// OSD and ROSA docs +[NOTE] +==== +The following section on defining cluster size requirements applies to these distributions: + +* {ocp-product-title} +* {dedicated-product-title} +* {rosa-product-title} +==== + include::modules/serverless-cluster-sizing-req.adoc[leveloffset=+1] + [id="install-serverless-operator-scaling-with-machinesets"] -== Scaling your cluster using compute machine sets +== Scaling your cluster using compute machine sets on {ocp-product-title} -You can use the {ocp-product-title} `MachineSet` API to manually scale your cluster up to the desired size. The minimum requirements usually mean that you must scale up one of the default compute machine sets by two additional machines. See link:https://docs.openshift.com/container-platform/latest/machine_management/manually-scaling-machineset.adoc#manually-scaling-machineset[Manually scaling a compute machine set]. +You can use the {ocp-product-title} `MachineSet` API to manually scale your cluster up to the desired size. The minimum requirements usually mean that you must scale up one of the default compute machine sets by two additional machines. See link:https://docs.openshift.com/container-platform/latest/machine_management/manually-scaling-machineset.html#manually-scaling-machineset[Manually scaling a compute machine set]. include::modules/serverless-cluster-sizing-req-additional.adoc[leveloffset=+2] // TODO: Add OSD specific docs for auto scaling compute machine sets? These docs aren't available for OSD so we need to look into what's required to doc here. // QE thread related: https://coreos.slack.com/archives/CD87JDUB0/p1643986092796179 -endif::[] - -// OSD and ROSA docs -ifdef::openshift-dedicated,openshift-rosa[] -include::modules/serverless-cluster-sizing-req.adoc[leveloffset=+1] -endif::[] [id="additional-resources_preparing-serverless-install"] [role="_additional-resources"] -== Additional resources -ifdef::openshift-enterprise[] -* link:https://docs.openshift.com/container-platform/latest/operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks] -* link:https://docs.openshift.com/container-platform/latest/operators/understanding/olm-understanding-operatorhub.adoc#olm-operatorhub-overview[Understanding OperatorHub] -* link:https://docs.openshift.com/container-platform/latest/installing/cluster-capabilities.adoc#cluster-capabilities[Cluster capabilities] -endif::[] +== Additional resources in {ocp-product-title} documentation + +* link:https://docs.openshift.com/container-platform/latest/operators/admin/olm-restricted-networks.html#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks] +* link:https://docs.openshift.com/container-platform/latest/operators/understanding/olm-understanding-operatorhub.html#olm-operatorhub-overview[Understanding OperatorHub] +* link:https://docs.openshift.com/container-platform/latest/installing/cluster-capabilities.html#cluster-capabilities[Cluster capabilities] + diff --git a/install/serverless-kafka-admin.adoc b/install/serverless-kafka-admin.adoc index 092beae46bf2..6745fb99d869 100644 --- a/install/serverless-kafka-admin.adoc +++ b/install/serverless-kafka-admin.adoc @@ -8,20 +8,15 @@ toc::[] The Knative broker implementation for Apache Kafka provides integration options for you to use supported versions of the Apache Kafka message streaming platform with {ServerlessProductName}. Kafka provides options for event source, channel, broker, and event sink capabilities. -// OCP -ifdef::openshift-enterprise[] -In addition to the Knative Eventing components that are provided as part of a core {ServerlessProductName} installation, cluster administrators can install the `KnativeKafka` custom resource (CR). +In addition to the Knative Eventing components that are provided as part of a core {ServerlessProductName} installation, the `KnativeKafka` custom resource (CR) can be installed by: + +* Cluster administrators, for {ocp-product-title} +* Cluster or dedicated administrators, for {rosa-product-title} or for {dedicated-product-title}. [NOTE] ==== -Knative broker for Apache Kafka is not currently supported for {ibmzProductName} and {ibmpowerProductName}. +Knative broker for Apache Kafka is not currently supported on {ocp-product-title} for {ibmzProductName} and {ibmpowerProductName}. ==== -endif::[] - -// OSD and ROSA -ifdef::openshift-dedicated,openshift-rosa[] -In addition to the Knative Eventing components that are provided as part of a core {ServerlessProductName} installation, cluster or dedicated administrators can install the `KnativeKafka` custom resource (CR). -endif::[] The `KnativeKafka` CR provides users with additional options, such as: diff --git a/integrations/gpu-resources.adoc b/integrations/gpu-resources.adoc index e52fb1edbd73..c5793c46aaa0 100644 --- a/integrations/gpu-resources.adoc +++ b/integrations/gpu-resources.adoc @@ -11,9 +11,7 @@ See link:https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/openshift/ include::modules/serverless-gpu-resources-kn.adoc[leveloffset=+1] -ifdef::openshift-enterprise[] [id="additional-requirements_gpu-resources"] [role="_additional-resources"] -== Additional resources -* link:https://docs.openshift.com/container-platform/latest/applications/quotas/quotas-setting-per-project.adoc#quotas-setting-per-project[Setting resource quotas for extended resources] -endif::[] +== Additional resources for {ocp-product-title} +* link:https://docs.openshift.com/container-platform/latest/applications/quotas/quotas-setting-per-project.html#quotas-setting-per-project[Setting resource quotas for extended resources] diff --git a/knative-serving/autoscaling/serverless-autoscaling-developer.adoc b/knative-serving/autoscaling/serverless-autoscaling-developer.adoc index 7f52f50aece2..2d30550b8dcf 100644 --- a/knative-serving/autoscaling/serverless-autoscaling-developer.adoc +++ b/knative-serving/autoscaling/serverless-autoscaling-developer.adoc @@ -1,20 +1,14 @@ :_content-type: ASSEMBLY +include::_attributes/common-attributes.adoc[] [id="serverless-autoscaling-developer"] = Autoscaling -include::_attributes/common-attributes.adoc[] :context: serverless-autoscaling-developer toc::[] Knative Serving provides automatic scaling, or _autoscaling_, for applications to match incoming demand. For example, if an application is receiving no traffic, and scale-to-zero is enabled, Knative Serving scales the application down to zero replicas. If scale-to-zero is disabled, the application is scaled down to the minimum number of replicas configured for applications on the cluster. Replicas can also be scaled up to meet demand if traffic to the application increases. -ifdef::openshift-enterprise[] -Autoscaling settings for Knative services can be global settings that are configured by cluster administrators, or per-revision settings that are configured for individual services. -endif::[] - -ifdef::openshift-dedicated,openshift-rosa[] -Autoscaling settings for Knative services can be global settings that are configured by cluster or dedicated administrators, or per-revision settings that are configured for individual services. -endif::[] +Autoscaling settings for Knative services can be global settings that are configured by cluster administrators (or dedicated administrators for {rosa-product-title} and {dedicated-product-title}),or per-revision settings that are configured for individual services. You can modify per-revision settings for your services by using the {ocp-product-title} web console, by modifying the YAML file for your service, or by using the Knative (`kn`) CLI. @@ -22,5 +16,3 @@ You can modify per-revision settings for your services by using the {ocp-product ==== Any limits or targets that you set for a service are measured against a single instance of your application. For example, setting the `target` annotation to `50` configures the autoscaler to scale the application so that each revision handles 50 requests at a time. ==== - - diff --git a/knative-serving/config-applications/pvcs-for-serving.adoc b/knative-serving/config-applications/pvcs-for-serving.adoc index 16fdb8ad1bc1..b6db888baee6 100644 --- a/knative-serving/config-applications/pvcs-for-serving.adoc +++ b/knative-serving/config-applications/pvcs-for-serving.adoc @@ -10,9 +10,7 @@ To achieve this, you can configure persistent volume claims (PVCs) for your Knat // Enabling PVC for Serving include::modules/serverless-enabling-pvc-support.adoc[leveloffset=+1] -ifdef::openshift-enterprise[] [id="additional-resources_pvcs-for-serving"] [role="_additional-resources"] -== Additional resources -* link:https://docs.openshift.com/container-platform/latest/storage/understanding-persistent-storage.adoc#understanding-persistent-storage[Understanding persistent storage] -endif::[] \ No newline at end of file +== Additional resources for {ocp-product-title} +* link:https://docs.openshift.com/container-platform/latest/storage/understanding-persistent-storage.html#understanding-persistent-storage[Understanding persistent storage] diff --git a/knative-serving/config-applications/serverless-config-tls.adoc b/knative-serving/config-applications/serverless-config-tls.adoc index 4e0c45611fdc..fcc47707ecb9 100644 --- a/knative-serving/config-applications/serverless-config-tls.adoc +++ b/knative-serving/config-applications/serverless-config-tls.adoc @@ -13,9 +13,8 @@ TLS is the only supported method of traffic encryption for Knative Kafka. Red Ha [NOTE] ==== If you want to enable internal TLS with a {SMProductName} integration, you must enable {SMProductShortName} with mTLS instead of the internal encryption explained in the following procedure. -ifndef::openshift-dedicated[] -{nbsp}See the documentation for xref:../../integrations/serverless-ossm-setup.adoc#serverless-ossm-enabling-serving-metrics_serverless-ossm-setup[Enabling Knative Serving metrics when using Service Mesh with mTLS]. -endif::[] + +For {ocp-product-title} and {rosa-product-title}, see the documentation for xref:../../integrations/serverless-ossm-setup.adoc#serverless-ossm-enabling-serving-metrics_serverless-ossm-setup[Enabling Knative Serving metrics when using Service Mesh with mTLS]. ==== include::modules/serverless-enabling-tls-internal-traffic.adoc[leveloffset=+1] @@ -24,6 +23,4 @@ include::modules/serverless-enabling-tls-internal-traffic.adoc[leveloffset=+1] .Additional resources * xref:../../eventing/brokers/kafka-broker.adoc#serverless-kafka-broker-tls-default-config_kafka-broker[Configuring TLS authentication for the Knative broker for Apache Kafka] * xref:../../eventing/channels/serverless-kafka-admin-security-channels.adoc#serverless-kafka-tls-channels_serverless-kafka-admin-security-channels[Configuring TLS authentication for channels for Apache Kafka] -ifndef::openshift-dedicated[] * xref:../../integrations/serverless-ossm-setup.adoc#serverless-ossm-enabling-serving-metrics_serverless-ossm-setup[Enabling Knative Serving metrics when using Service Mesh with mTLS] -endif::[] diff --git a/knative-serving/external-ingress-routing/routing-overview.adoc b/knative-serving/external-ingress-routing/routing-overview.adoc index f7d19f340318..30a65c27635c 100644 --- a/knative-serving/external-ingress-routing/routing-overview.adoc +++ b/knative-serving/external-ingress-routing/routing-overview.adoc @@ -10,10 +10,7 @@ You can disable Operator control of {ocp-product-title} routing so that you can Knative routes can also be used alongside the {ocp-product-title} route to provide additional fine-grained routing capabilities, such as traffic splitting. - -ifdef::openshift-enterprise[] [id="additional-resources_serverless-configuring-routes"] [role="_additional-resources"] -== Additional resources -* link:https://docs.openshift.com/container-platform/latest/networking/routes/route-configuration.adoc#nw-route-specific-annotations_route-configuration[Route-specific annotations] -endif::[] +== Additional resources for {ocp-product-title} +* link:https://docs.openshift.com/container-platform/latest/networking/routes/route-configuration.html#nw-route-specific-annotations_route-configuration[Route-specific annotations] diff --git a/knative-serving/external-ingress-routing/using-http2-gRPC.adoc b/knative-serving/external-ingress-routing/using-http2-gRPC.adoc index 2eac51c7bed3..a5b51f95564d 100644 --- a/knative-serving/external-ingress-routing/using-http2-gRPC.adoc +++ b/knative-serving/external-ingress-routing/using-http2-gRPC.adoc @@ -10,6 +10,6 @@ include::modules/interacting-serverless-apps-http2-gRPC.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources -* link:https://docs.openshift.com/container-platform/latest/networking/ingress-operator.adoc#nw-http2-haproxy_configuring-ingress[Enabling HTTP/2 Ingress connectivity] +* link:https://docs.openshift.com/container-platform/latest/networking/ingress-operator.html#nw-http2-haproxy_configuring-ingress[Enabling HTTP/2 Ingress connectivity] include::modules/interacting-serverless-apps-http2-gRPC-up-to-4-9.adoc[leveloffset=+1] diff --git a/knative-serving/getting-started/serverless-applications.adoc b/knative-serving/getting-started/serverless-applications.adoc index fcc690ce4ddc..562eb215988a 100644 --- a/knative-serving/getting-started/serverless-applications.adoc +++ b/knative-serving/getting-started/serverless-applications.adoc @@ -12,9 +12,8 @@ You can create a serverless application by using one of the following methods: * Create a Knative service from the {ocp-product-title} web console. + -ifdef::openshift-enterprise[] -See link:https://docs.openshift.com/container-platform/latest/applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[Creating applications using the Developer perspective] for more information. -endif::[] +For {ocp-product-title}, see link:https://docs.openshift.com/container-platform/latest/applications/creating_applications/odc-creating-applications-using-developer-perspective.html#odc-creating-applications-using-developer-perspective[Creating applications using the Developer perspective] for more information. + * Create a Knative service by using the Knative (`kn`) CLI. * Create and apply a Knative `Service` object as a YAML file, by using the `oc` CLI. diff --git a/modules/distr-tracing-product-overview.adoc b/modules/distr-tracing-product-overview.adoc new file mode 100644 index 000000000000..633db1758204 --- /dev/null +++ b/modules/distr-tracing-product-overview.adoc @@ -0,0 +1,30 @@ +//// +This module included in the following assemblies: +-service_mesh/v2x/ossm-architecture.adoc +- distributed-tracing-release-notes.adoc +-distr_tracing_arch/distr-tracing-architecture.adoc +-serverless/serverless-tracing.adoc +//// + +:_content-type: CONCEPT +[id="distr-tracing-product-overview_{context}"] += Distributed tracing overview + +As a service owner, you can use distributed tracing to instrument your services to gather insights into your service architecture. +You can use {DTShortName} for monitoring, network profiling, and troubleshooting the interaction between components in modern, cloud-native, microservices-based applications. + +With {DTShortName} you can perform the following functions: + +* Monitor distributed transactions + +* Optimize performance and latency + +* Perform root cause analysis + +{DTProductName} consists of two main components: + +* *{JaegerName}* - This component is based on the open source link:https://www.jaegertracing.io/[Jaeger project]. + +* *{OTELNAME}* - This component is based on the open source link:https://opentelemetry.io/[OpenTelemetry project]. + +Both of these components are based on the vendor-neutral link:https://opentracing.io/[OpenTracing] APIs and instrumentation. diff --git a/modules/knative-serving-controller-custom-certs-secrets.adoc b/modules/knative-serving-controller-custom-certs-secrets.adoc index 42c2c53ec45e..0c059deae8b1 100644 --- a/modules/knative-serving-controller-custom-certs-secrets.adoc +++ b/modules/knative-serving-controller-custom-certs-secrets.adoc @@ -10,13 +10,7 @@ If the `controller-custom-certs` spec uses the `Secret` type, the secret is moun .Prerequisites -ifdef::openshift-enterprise[] -* You have cluster administrator permissions on {ocp-product-title}. -endif::[] - -ifdef::openshift-dedicated,openshift-rosa[] -* You have cluster or dedicated administrator permissions on {ocp-product-title}. -endif::[] +* You have cluster administrator permissions on {ocp-product-title}, or you have cluster or dedicated administrator permissions on {rosa-product-title} or {dedicated-product-title}. * You have installed the {ServerlessOperatorName} and Knative Serving on your cluster. diff --git a/modules/olm-deleting-operators-from-a-cluster-using-cli.adoc b/modules/olm-deleting-operators-from-a-cluster-using-cli.adoc deleted file mode 100644 index 3a7d3396d631..000000000000 --- a/modules/olm-deleting-operators-from-a-cluster-using-cli.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/admin/olm-deleting-operators-from-a-cluster.adoc -// * serverless/install/removing-openshift-serverless.adoc - -:_content-type: PROCEDURE -[id="olm-deleting-operator-from-a-cluster-using-cli_{context}"] -= Deleting Operators from a cluster using the CLI - -Cluster administrators can delete installed Operators from a selected namespace by using the CLI. - -.Prerequisites - -- Access to an {ocp-product-title} cluster using an account with -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -`cluster-admin` permissions. -endif::[] -- `oc` command installed on workstation. - -.Procedure - -. Check the current version of the subscribed Operator (for example, `jaeger`) in the `currentCSV` field: -+ -[source,terminal] ----- -$ oc get subscription jaeger -n openshift-operators -o yaml | grep currentCSV ----- -+ -.Example output -[source,terminal] ----- - currentCSV: jaeger-operator.v1.8.2 ----- - -. Delete the subscription (for example, `jaeger`): -+ -[source,terminal] ----- -$ oc delete subscription jaeger -n openshift-operators ----- -+ -.Example output -[source,terminal] ----- -subscription.operators.coreos.com "jaeger" deleted ----- - -. Delete the CSV for the Operator in the target namespace using the `currentCSV` value from the previous step: -+ -[source,terminal] ----- -$ oc delete clusterserviceversion jaeger-operator.v1.8.2 -n openshift-operators ----- -+ -.Example output -[source,terminal] ----- -clusterserviceversion.operators.coreos.com "jaeger-operator.v1.8.2" deleted ----- diff --git a/modules/olm-deleting-operators-from-a-cluster-using-web-console.adoc b/modules/olm-deleting-operators-from-a-cluster-using-web-console.adoc deleted file mode 100644 index 53d8f0b20e9e..000000000000 --- a/modules/olm-deleting-operators-from-a-cluster-using-web-console.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/admin/olm-deleting-operators-from-a-cluster.adoc -// * backup_and_restore/application_backup_and_restore/installing/uninstalling-oadp.adoc -// * serverless/install/removing-openshift-serverless.adoc -// * virt/install/uninstalling-virt.adoc - -:_content-type: PROCEDURE -[id="olm-deleting-operators-from-a-cluster-using-web-console_{context}"] -= Deleting Operators from a cluster using the web console - -Cluster administrators can delete installed Operators from a selected namespace by using the web console. - -.Prerequisites - -- You have access to an {ocp-product-title} cluster web console using an account with -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -`cluster-admin` permissions. -endif::[] - -.Procedure - -. Navigate to the *Operators* → *Installed Operators* page. - -. Scroll or enter a keyword into the *Filter by name* field to find the Operator that you want to remove. Then, click on it. - -. On the right side of the *Operator Details* page, select *Uninstall Operator* from the *Actions* list. -+ -An *Uninstall Operator?* dialog box is displayed. - -. Select *Uninstall* to remove the Operator, Operator deployments, and pods. Following this action, the Operator stops running and no longer receives updates. -+ -[NOTE] -==== -This action does not remove resources managed by the Operator, including custom resource definitions (CRDs) and custom resources (CRs). Dashboards and navigation items enabled by the web console and off-cluster resources that continue to run might need manual clean up. To remove these after uninstalling the Operator, you might need to manually delete the Operator CRDs. -==== diff --git a/modules/olm-refresh-subs.adoc b/modules/olm-refresh-subs.adoc deleted file mode 100644 index 7c9aaeda6d1d..000000000000 --- a/modules/olm-refresh-subs.adoc +++ /dev/null @@ -1,109 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/troubleshooting-operator-issues.adoc -// * serverless/install/removing-openshift-serverless.adoc - -:_content-type: PROCEDURE -[id="olm-refresh-subs_{context}"] -= Refreshing failing subscriptions - -In Operator Lifecycle Manager (OLM), if you subscribe to an Operator that references images that are not accessible on your network, you can find jobs in the `openshift-marketplace` namespace that are failing with the following errors: - -.Example output -[source,terminal] ----- -ImagePullBackOff for -Back-off pulling image "example.com/openshift4/ose-elasticsearch-operator-bundle@sha256:6d2587129c846ec28d384540322b40b05833e7e00b25cca584e004af9a1d292e" ----- - -.Example output -[source,terminal] ----- -rpc error: code = Unknown desc = error pinging docker registry example.com: Get "https://example.com/v2/": dial tcp: lookup example.com on 10.0.0.1:53: no such host ----- - -As a result, the subscription is stuck in this failing state and the Operator is unable to install or upgrade. - -You can refresh a failing subscription by deleting the subscription, cluster service version (CSV), and other related objects. After recreating the subscription, OLM then reinstalls the correct version of the Operator. - -.Prerequisites - -* You have a failing subscription that is unable to pull an inaccessible bundle image. -* You have confirmed that the correct bundle image is accessible. - -.Procedure - -. Get the names of the `Subscription` and `ClusterServiceVersion` objects from the namespace where the Operator is installed: -+ -[source,terminal] ----- -$ oc get sub,csv -n ----- -+ -.Example output -[source,terminal] ----- -NAME PACKAGE SOURCE CHANNEL -subscription.operators.coreos.com/elasticsearch-operator elasticsearch-operator redhat-operators 5.0 - -NAME DISPLAY VERSION REPLACES PHASE -clusterserviceversion.operators.coreos.com/elasticsearch-operator.5.0.0-65 OpenShift Elasticsearch Operator 5.0.0-65 Succeeded ----- - -. Delete the subscription: -+ -[source,terminal] ----- -$ oc delete subscription -n ----- - -. Delete the cluster service version: -+ -[source,terminal] ----- -$ oc delete csv -n ----- - -. Get the names of any failing jobs and related config maps in the `openshift-marketplace` namespace: -+ -[source,terminal] ----- -$ oc get job,configmap -n openshift-marketplace ----- -+ -.Example output -[source,terminal] ----- -NAME COMPLETIONS DURATION AGE -job.batch/1de9443b6324e629ddf31fed0a853a121275806170e34c926d69e53a7fcbccb 1/1 26s 9m30s - -NAME DATA AGE -configmap/1de9443b6324e629ddf31fed0a853a121275806170e34c926d69e53a7fcbccb 3 9m30s ----- - -. Delete the job: -+ -[source,terminal] ----- -$ oc delete job -n openshift-marketplace ----- -+ -This ensures pods that try to pull the inaccessible image are not recreated. - -. Delete the config map: -+ -[source,terminal] ----- -$ oc delete configmap -n openshift-marketplace ----- - -. Reinstall the Operator using OperatorHub in the web console. - -.Verification - -* Check that the Operator has been reinstalled successfully: -+ -[source,terminal] ----- -$ oc get sub,csv,installplan -n ----- diff --git a/modules/serverless-admin-init-containers.adoc b/modules/serverless-admin-init-containers.adoc index 6b0ca088dcea..0d8982b9754a 100644 --- a/modules/serverless-admin-init-containers.adoc +++ b/modules/serverless-admin-init-containers.adoc @@ -10,13 +10,7 @@ * You have installed {ServerlessOperatorName} and Knative Serving on your cluster. -ifdef::openshift-enterprise[] -* You have cluster administrator permissions. -endif::[] - -ifdef::openshift-dedicated,openshift-rosa[] -* You have cluster or dedicated administrator permissions. -endif::[] +* You have cluster administrator permissions on {ocp-product-title}, or you have cluster or dedicated administrator permissions on {rosa-product-title} or {dedicated-product-title}. .Procedure diff --git a/modules/serverless-channel-default.adoc b/modules/serverless-channel-default.adoc index 72d660b4e8b1..7ed7c40a036a 100644 --- a/modules/serverless-channel-default.adoc +++ b/modules/serverless-channel-default.adoc @@ -10,7 +10,7 @@ * You have administrator permissions on {ocp-product-title}. * You have installed the {ServerlessOperatorName} and Knative Eventing on your cluster. -* If you want to use Kafka channels as the default channel implementation, you must also install the `KnativeKafka` CR on your cluster. +* If you want to use Knative channels for Apache Kafka as the default channel implementation, you must also install the `KnativeKafka` CR on your cluster. .Procedure diff --git a/modules/serverless-cluster-sizing-req.adoc b/modules/serverless-cluster-sizing-req.adoc index c36ec9b91156..ed1e363507db 100644 --- a/modules/serverless-cluster-sizing-req.adoc +++ b/modules/serverless-cluster-sizing-req.adoc @@ -6,11 +6,11 @@ [id="serverless-cluster-sizing-req_{context}"] = Defining cluster size requirements -To install and use {ServerlessProductName}, the {ocp-product-title} cluster must be sized correctly. +To install and use {ServerlessProductName}, the cluster must be sized correctly. [NOTE] ==== -The following requirements relate only to the pool of worker machines of the {ocp-product-title} cluster. Control plane nodes are not used for general scheduling and are omitted from the requirements. +The following requirements relate only to the pool of worker machines of the cluster. Control plane nodes are not used for general scheduling and are omitted from the requirements. ==== The minimum requirement to use {ServerlessProductName} is a cluster with 10 CPUs and 40GB memory. diff --git a/modules/serverless-config-replicas-eventing.adoc b/modules/serverless-config-replicas-eventing.adoc index 361ddf9b6f22..dffe05040e07 100644 --- a/modules/serverless-config-replicas-eventing.adoc +++ b/modules/serverless-config-replicas-eventing.adoc @@ -15,13 +15,7 @@ For Knative Eventing, the `mt-broker-filter` and `mt-broker-ingress` deployments .Prerequisites -ifdef::openshift-enterprise[] -* You have access to an {ocp-product-title} account with cluster administrator access. -endif::[] - -ifdef::openshift-dedicated,openshift-rosa[] -* You have access to an {ocp-product-title} account with cluster administrator or dedicated administrator access. -endif::[] +* You have cluster administrator permissions on {ocp-product-title}, or you have cluster or dedicated administrator permissions on {rosa-product-title} or {dedicated-product-title}. * The {ServerlessOperatorName} and Knative Eventing are installed on your cluster. diff --git a/modules/serverless-config-replicas-kafka.adoc b/modules/serverless-config-replicas-kafka.adoc index b7ec87a5967c..067f43dcbe8c 100644 --- a/modules/serverless-config-replicas-kafka.adoc +++ b/modules/serverless-config-replicas-kafka.adoc @@ -4,21 +4,15 @@ :_content-type: PROCEDURE [id="serverless-config-replicas-kafka_{context}"] -= Configuring high availability replicas for Knative Kafka += Configuring high availability replicas for the Knative broker implementation for Apache Kafka -High availability (HA) is available by default for the Knative Kafka `kafka-controller` and `kafka-webhook-eventing` components, which are configured to have two each replicas by default. You can change the number of replicas for these components by modifying the `spec.high-availability.replicas` value in the `KnativeKafka` custom resource (CR). +High availability (HA) is available by default for the Knative broker implementation for Apache Kafka components `kafka-controller` and `kafka-webhook-eventing`, which are configured to have two each replicas by default. You can change the number of replicas for these components by modifying the `spec.high-availability.replicas` value in the `KnativeKafka` custom resource (CR). .Prerequisites -ifdef::openshift-enterprise[] -* You have access to an {ocp-product-title} account with cluster administrator access. -endif::[] +* You have cluster administrator permissions on {ocp-product-title}, or you have cluster or dedicated administrator permissions on {rosa-product-title} or {dedicated-product-title}. -ifdef::openshift-dedicated,openshift-rosa[] -* You have access to an {ocp-product-title} account with cluster administrator or dedicated administrator access. -endif::[] - -* The {ServerlessOperatorName} and Knative Kafka are installed on your cluster. +* The {ServerlessOperatorName} and Knative broker for Apache Kafka are installed on your cluster. .Procedure diff --git a/modules/serverless-config-replicas-serving.adoc b/modules/serverless-config-replicas-serving.adoc index 5f3c956b3397..9e795ec218c5 100644 --- a/modules/serverless-config-replicas-serving.adoc +++ b/modules/serverless-config-replicas-serving.adoc @@ -11,13 +11,7 @@ To specify three minimum replicas for the eligible deployment resources, set the .Prerequisites -ifdef::openshift-enterprise[] -* You have access to an {ocp-product-title} account with cluster administrator access. -endif::[] - -ifdef::openshift-dedicated,openshift-rosa[] -* You have access to an {ocp-product-title} account with cluster administrator or dedicated administrator access. -endif::[] +* You have cluster administrator permissions on {ocp-product-title}, or you have cluster or dedicated administrator permissions on {rosa-product-title} or {dedicated-product-title}. * The {ServerlessOperatorName} and Knative Serving are installed on your cluster. diff --git a/modules/serverless-create-kafka-channel-yaml.adoc b/modules/serverless-create-kafka-channel-yaml.adoc index 3978f34b92c2..e0e77463cbbf 100644 --- a/modules/serverless-create-kafka-channel-yaml.adoc +++ b/modules/serverless-create-kafka-channel-yaml.adoc @@ -5,7 +5,7 @@ :_content-type: PROCEDURE [id="serverless-create-kafka-channel-yaml_{context}"] -= Creating a Kafka channel by using YAML += Creating a channel for Apache Kafka by using YAML Creating Knative resources by using YAML files uses a declarative API, which enables you to describe channels declaratively and in a reproducible manner. You can create a Knative Eventing channel that is backed by Kafka topics by creating a Kafka channel. To create a Kafka channel by using YAML, you must create a YAML file that defines a `KafkaChannel` object, then apply it by using the `oc apply` command. diff --git a/modules/serverless-creating-a-kafka-event-sink.adoc b/modules/serverless-creating-a-kafka-event-sink.adoc index 4d1a66862fde..aaf9f0af041c 100644 --- a/modules/serverless-creating-a-kafka-event-sink.adoc +++ b/modules/serverless-creating-a-kafka-event-sink.adoc @@ -1,20 +1,23 @@ // Module included in the following assemblies: // -// * serverless/develop/serverless-event-sinks.adoc +// * serverless/eventing/event-sinks/serverless-kafka-developer-sink.adoc :_content-type: PROCEDURE [id="serverless-creating-a-kafka-event-sink_{context}"] += Creating an event sink for Apache Kafka by using the {ocp-product-title} web console + +You can create a Kafka sink that sends events to a Kafka topic by using the *Developer* perspective in the {ocp-product-title} web console. By default, a Kafka sink uses the binary content mode, which is more efficient than the structured mode. -= Creating a Kafka event sink As a developer, you can create an event sink to receive events from a particular source and send them to a Kafka topic. .Prerequisites -* You have installed the Red Hat OpenShift Serverless operator, with Knative Serving, Knative Eventing, and Knative Kafka APIs, from the Operator Hub. +* You have installed the {ServerlessOperatorName}, with Knative Serving, Knative Eventing, and Knative broker for Apache Kafka APIs, from the OperatorHub. * You have created a Kafka topic in your Kafka environment. .Procedure + . In the *Developer* perspective, navigate to the *+Add* view. . Click *Event Sink* in the *Eventing catalog*. . Search for `KafkaSink` in the catalog items and click it. @@ -28,5 +31,6 @@ image::create-event-sink.png[] . Click *Create*. .Verification + . In the *Developer* perspective, navigate to the *Topology* view. . Click the created event sink to view its details in the right panel. diff --git a/modules/serverless-creating-broker-admin-web-console.adoc b/modules/serverless-creating-broker-admin-web-console.adoc index 2439bbad3c31..de3e3f9bdb53 100644 --- a/modules/serverless-creating-broker-admin-web-console.adoc +++ b/modules/serverless-creating-broker-admin-web-console.adoc @@ -14,13 +14,7 @@ include::snippets/serverless-brokers-intro.adoc[] * You have logged in to the web console and are in the *Administrator* perspective. -ifdef::openshift-enterprise[] -* You have cluster administrator permissions for {ocp-product-title}. -endif::[] - -ifdef::openshift-dedicated,openshift-rosa[] -* You have cluster or dedicated administrator permissions for {ocp-product-title}. -endif::[] +* You have cluster administrator permissions on {ocp-product-title}, or you have cluster or dedicated administrator permissions on {rosa-product-title} or {dedicated-product-title}. .Procedure diff --git a/modules/serverless-creating-broker-labeling.adoc b/modules/serverless-creating-broker-labeling.adoc index 0b30ecca167b..aa912160121b 100644 --- a/modules/serverless-creating-broker-labeling.adoc +++ b/modules/serverless-creating-broker-labeling.adoc @@ -19,9 +19,7 @@ Brokers created using this method are not removed if you remove the label. You m * Install the OpenShift CLI (`oc`). * You have created a project or have access to a project with the appropriate roles and permissions to create applications and other workloads in {ocp-product-title}. -ifdef::openshift-dedicated,openshift-rosa[] -* You have cluster or dedicated administrator permissions. -endif::[] +* You have cluster or dedicated administrator permissions if you are using {rosa-product-title} or {dedicated-product-title}. .Procedure diff --git a/modules/serverless-creating-channel-admin-web-console.adoc b/modules/serverless-creating-channel-admin-web-console.adoc index e340897128c2..24cd74982a0b 100644 --- a/modules/serverless-creating-channel-admin-web-console.adoc +++ b/modules/serverless-creating-channel-admin-web-console.adoc @@ -14,13 +14,7 @@ After Knative Eventing is installed on your cluster, you can create a channel by * You have logged in to the web console and are in the *Administrator* perspective. -ifdef::openshift-enterprise[] -* You have cluster administrator permissions for {ocp-product-title}. -endif::[] - -ifdef::openshift-dedicated,openshift-rosa[] -* You have cluster or dedicated administrator permissions for {ocp-product-title}. -endif::[] +* You have cluster administrator permissions on {ocp-product-title}, or you have cluster or dedicated administrator permissions on {rosa-product-title} or {dedicated-product-title}. .Procedure @@ -30,6 +24,6 @@ endif::[] + [NOTE] ==== -Currently only `InMemoryChannel` channel objects are supported by default. Kafka channels are available if you have installed Knative Kafka on {ServerlessProductName}. +Currently only `InMemoryChannel` channel objects are supported by default. Knative channels for Apache Kafka are available if you have installed the Knative broker implementation for Apache Kafka on {ServerlessProductName}. ==== . Click *Create*. diff --git a/modules/serverless-creating-event-source-admin-web-console.adoc b/modules/serverless-creating-event-source-admin-web-console.adoc index ca17d4b72c08..31d60f31c78f 100644 --- a/modules/serverless-creating-event-source-admin-web-console.adoc +++ b/modules/serverless-creating-event-source-admin-web-console.adoc @@ -14,13 +14,7 @@ A Knative _event source_ can be any Kubernetes object that generates or imports * You have logged in to the web console and are in the *Administrator* perspective. -ifdef::openshift-enterprise[] -* You have cluster administrator permissions for {ocp-product-title}. -endif::[] - -ifdef::openshift-dedicated,openshift-rosa[] -* You have cluster or dedicated administrator permissions for {ocp-product-title}. -endif::[] +* You have cluster administrator permissions on {ocp-product-title}, or you have cluster or dedicated administrator permissions on {rosa-product-title} or {dedicated-product-title}. .Procedure diff --git a/modules/serverless-creating-subscription-admin-web-console.adoc b/modules/serverless-creating-subscription-admin-web-console.adoc index 37826ac9a46e..aec825e9c826 100644 --- a/modules/serverless-creating-subscription-admin-web-console.adoc +++ b/modules/serverless-creating-subscription-admin-web-console.adoc @@ -14,13 +14,7 @@ After you have created a channel and an event sink, also known as a _subscriber_ * You have logged in to the web console and are in the *Administrator* perspective. -ifdef::openshift-enterprise[] -* You have cluster administrator permissions for {ocp-product-title}. -endif::[] - -ifdef::openshift-dedicated,openshift-rosa[] -* You have cluster or dedicated administrator permissions for {ocp-product-title}. -endif::[] +* You have cluster administrator permissions on {ocp-product-title}, or you have cluster or dedicated administrator permissions on {rosa-product-title} or {dedicated-product-title}. * You have created a Knative channel. diff --git a/modules/serverless-creating-subscriptions-kn.adoc b/modules/serverless-creating-subscriptions-kn.adoc index f4b7480e117d..a5093d7b2e51 100644 --- a/modules/serverless-creating-subscriptions-kn.adoc +++ b/modules/serverless-creating-subscriptions-kn.adoc @@ -25,7 +25,7 @@ $ kn subscription create \ --sink : \ <2> --sink-dead-letter : <3> ---- -<1> `--channel` specifies the source for cloud events that should be processed. You must provide the channel name. If you are not using the default `InMemoryChannel` channel that is backed by the `Channel` custom resource, you must prefix the channel name with the `` for the specified channel type. For example, this will be `messaging.knative.dev:v1beta1:KafkaChannel` for a Kafka backed channel. +<1> `--channel` specifies the source for cloud events that should be processed. You must provide the channel name. If you are not using the default `InMemoryChannel` channel that is backed by the `Channel` custom resource, you must prefix the channel name with the `` for the specified channel type. For example, this will be `messaging.knative.dev:v1beta1:KafkaChannel` for an Apache Kafka backed channel. <2> `--sink` specifies the target destination to which the event should be delivered. By default, the `` is interpreted as a Knative service of this name, in the same namespace as the subscription. You can specify the type of the sink by using one of the following prefixes: `ksvc`:: A Knative service. `channel`:: A channel that should be used as destination. Only default channel types can be referenced here. diff --git a/modules/serverless-creating-trigger-admin-web-console.adoc b/modules/serverless-creating-trigger-admin-web-console.adoc index 801a815fb300..a6930ae18643 100644 --- a/modules/serverless-creating-trigger-admin-web-console.adoc +++ b/modules/serverless-creating-trigger-admin-web-console.adoc @@ -16,13 +16,7 @@ Using the {ocp-product-title} web console provides a streamlined and intuitive u * You have logged in to the web console and are in the *Administrator* perspective. -ifdef::openshift-enterprise[] -* You have cluster administrator permissions for {ocp-product-title}. -endif::[] - -ifdef::openshift-dedicated,openshift-rosa[] -* You have cluster or dedicated administrator permissions for {ocp-product-title}. -endif::[] +* You have cluster administrator permissions on {ocp-product-title}, or you have cluster or dedicated administrator permissions on {rosa-product-title} or {dedicated-product-title}. * You have created a Knative broker. diff --git a/modules/serverless-deleting-crds.adoc b/modules/serverless-deleting-crds.adoc index 7ca45bddb5bf..04e6203faa3b 100644 --- a/modules/serverless-deleting-crds.adoc +++ b/modules/serverless-deleting-crds.adoc @@ -12,13 +12,7 @@ Delete the Operator and API CRDs using the following procedure. * Install the OpenShift CLI (`oc`). -ifdef::openshift-enterprise[] -* You have access to an {ocp-product-title} account with cluster administrator access. -endif::[] - -ifdef::openshift-dedicated,openshift-rosa[] -* You have access to an {ocp-product-title} account with cluster administrator or dedicated administrator access. -endif::[] +* You have cluster administrator permissions on {ocp-product-title}, or you have cluster or dedicated administrator permissions on {rosa-product-title} or {dedicated-product-title}. * You have uninstalled Knative Serving and removed the {ServerlessOperatorName}. diff --git a/modules/serverless-deprecated-removed-features.adoc b/modules/serverless-deprecated-removed-features.adoc index 8de8c0aebb3e..e5277a316ec1 100644 --- a/modules/serverless-deprecated-removed-features.adoc +++ b/modules/serverless-deprecated-removed-features.adoc @@ -11,8 +11,8 @@ Some features that were Generally Available (GA) or a Technology Preview (TP) in For the most recent list of major functionality deprecated and removed within {ServerlessProductName}, refer to the following table: // OCP + OSD table -ifdef::openshift-enterprise,openshift-dedicated[] -.Deprecated and removed features tracker + +.Deprecated and removed features tracker for {ocp-product-title} and {dedicated-product-title} [cols="3,1,1,1,1,1",options="header"] |==== |Feature |1.20|1.21|1.22 to 1.26|1.27|1.28 @@ -46,11 +46,11 @@ ifdef::openshift-enterprise,openshift-dedicated[] |Deprecated |==== -endif::[] + // ROSA table -ifdef::openshift-rosa[] -.Deprecated and removed features tracker + +.Deprecated and removed features tracker for {rosa-product-title} [cols="3,1,1,1",options="header"] |==== |Feature |1.23 to 1.26|1.27|1.28 @@ -71,4 +71,4 @@ ifdef::openshift-rosa[] |Deprecated |==== -endif::[] + diff --git a/modules/serverless-domain-mapping-odc-admin.adoc b/modules/serverless-domain-mapping-odc-admin.adoc index 7043f5171f7a..d7cdcd1ce3e2 100644 --- a/modules/serverless-domain-mapping-odc-admin.adoc +++ b/modules/serverless-domain-mapping-odc-admin.adoc @@ -8,13 +8,8 @@ include::snippets/serverless-domain-mapping.adoc[] -ifdef::openshift-enterprise[] -If you have cluster administrator permissions, you can create a `DomainMapping` custom resource (CR) by using the *Administrator* perspective in the {ocp-product-title} web console. -endif::[] -ifdef::openshift-dedicated,openshift-rosa[] -If you have cluster or dedicated administrator permissions, you can create a `DomainMapping` custom resource (CR) by using the *Administrator* perspective in the {ocp-product-title} web console. -endif::[] +If you have cluster administrator permissions on {ocp-product-title} (or cluster or dedicated administrator permissions on {dedicated-product-title} or {rosa-product-title}), you can create a `DomainMapping` custom resource (CR) by using the *Administrator* perspective in the web console. .Prerequisites @@ -22,12 +17,12 @@ endif::[] * You are in the *Administrator* perspective. * You have installed the {ServerlessOperatorName}. * You have installed Knative Serving. -* You have created a project or have access to a project with the appropriate roles and permissions to create applications and other workloads in {ocp-product-title}. +* You have created a project or have access to a project with the appropriate roles and permissions to create applications and other workloads. * You have created a Knative service and control a custom domain that you want to map to that service. + [NOTE] ==== -Your custom domain must point to the IP address of the {ocp-product-title} cluster. +Your custom domain must point to the IP address of the cluster. ==== .Procedure diff --git a/modules/serverless-enable-scale-to-zero.adoc b/modules/serverless-enable-scale-to-zero.adoc index 2c6b749ccce3..d4888d6a28a7 100644 --- a/modules/serverless-enable-scale-to-zero.adoc +++ b/modules/serverless-enable-scale-to-zero.adoc @@ -12,13 +12,7 @@ You can use the `enable-scale-to-zero` spec to enable or disable scale-to-zero g * You have installed {ServerlessOperatorName} and Knative Serving on your cluster. -ifdef::openshift-enterprise[] -* You have cluster administrator permissions. -endif::[] - -ifdef::openshift-dedicated,openshift-rosa[] -* You have cluster or dedicated administrator permissions. -endif::[] +* You have cluster administrator permissions on {ocp-product-title}, or you have cluster or dedicated administrator permissions on {rosa-product-title} or {dedicated-product-title}. * You are using the default Knative Pod Autoscaler. The scale to zero feature is not available if you are using the Kubernetes Horizontal Pod Autoscaler. diff --git a/modules/serverless-event-delivery-component-behaviors.adoc b/modules/serverless-event-delivery-component-behaviors.adoc index 7e43802e7fe9..d6fcacd07d77 100644 --- a/modules/serverless-event-delivery-component-behaviors.adoc +++ b/modules/serverless-event-delivery-component-behaviors.adoc @@ -9,7 +9,7 @@ Different channel and broker types have their own behavior patterns that are followed for event delivery. [id="serverless-event-delivery-component-behaviors-kafka_{context}"] -== Knative Kafka channels and brokers +== Knative channels and brokers for Apache Kafka If an event is successfully delivered to a Kafka channel or broker receiver, the receiver responds with a `202` status code, which means that the event has been safely stored inside a Kafka topic and is not lost. diff --git a/modules/serverless-gpu-resources-kn.adoc b/modules/serverless-gpu-resources-kn.adoc index 8777a0cc885c..8a6c409506be 100644 --- a/modules/serverless-gpu-resources-kn.adoc +++ b/modules/serverless-gpu-resources-kn.adoc @@ -15,13 +15,11 @@ After GPU resources are enabled for your {ocp-product-title} cluster, you can sp * GPU resources are enabled for your {ocp-product-title} cluster. * You have created a project or have access to a project with the appropriate roles and permissions to create applications and other workloads in {ocp-product-title}. -ifndef::openshift-rosa[] [NOTE] ==== -Using NVIDIA GPU resources is not supported for {ibmzProductName} and {ibmpowerProductName}. +Using NVIDIA GPU resources is not supported for {ibmzProductName} and {ibmpowerProductName} on {ocp-product-title} or {dedicated-product-title}. ==== -endif::openshift-rosa[] .Procedure . Create a Knative service and set the GPU resource requirement limit to `1` by using the `--limit nvidia.com/gpu=1` flag: diff --git a/modules/serverless-install-cli.adoc b/modules/serverless-install-cli.adoc index ee8a33741c0a..41a5780c5835 100644 --- a/modules/serverless-install-cli.adoc +++ b/modules/serverless-install-cli.adoc @@ -10,16 +10,11 @@ You can install the {ServerlessOperatorName} from the OperatorHub by using the C .Prerequisites -ifdef::openshift-enterprise[] -* You have access to an {ocp-product-title} account with cluster administrator access. -* Your cluster has the Marketplace capability enabled or the Red Hat Operator catalog source configured manually. -endif::[] +* You have cluster administrator permissions on {ocp-product-title}, or you have cluster or dedicated administrator permissions on {rosa-product-title} or {dedicated-product-title}. +* For {ocp-product-title}, your cluster has the Marketplace capability enabled or the Red Hat Operator catalog source configured manually. -ifdef::openshift-dedicated,openshift-rosa[] -* You have access to an {ocp-product-title} account with cluster or dedicated administrator access. -endif::[] -* You have logged in to the {ocp-product-title} cluster. +* You have logged in to the cluster. .Procedure . Create a YAML file containing `Namespace`, `OperatorGroup`, and `Subscription` objects to subscribe a namespace to the {ServerlessOperatorName}. For example, create the file `serverless-subscription.yaml` with the following content: diff --git a/modules/serverless-install-eventing-web-console.adoc b/modules/serverless-install-eventing-web-console.adoc index a6b65a77a4f6..2bb95130ee92 100644 --- a/modules/serverless-install-eventing-web-console.adoc +++ b/modules/serverless-install-eventing-web-console.adoc @@ -10,13 +10,7 @@ After you install the {ServerlessOperatorName}, install Knative Eventing by usin .Prerequisites -ifdef::openshift-enterprise[] -* You have access to an {ocp-product-title} account with cluster administrator access. -endif::[] - -ifdef::openshift-dedicated,openshift-rosa[] -* You have access to an {ocp-product-title} account with cluster administrator or dedicated administrator access. -endif::[] +* You have cluster administrator permissions on {ocp-product-title}, or you have cluster or dedicated administrator permissions on {rosa-product-title} or {dedicated-product-title}. * You have logged in to the {ocp-product-title} web console. * You have installed the {ServerlessOperatorName}. diff --git a/modules/serverless-install-eventing-yaml.adoc b/modules/serverless-install-eventing-yaml.adoc index f95de8ce7283..02ac2866cad3 100644 --- a/modules/serverless-install-eventing-yaml.adoc +++ b/modules/serverless-install-eventing-yaml.adoc @@ -10,13 +10,7 @@ After you install the {ServerlessOperatorName}, you can install Knative Eventing .Prerequisites -ifdef::openshift-enterprise[] -* You have access to an {ocp-product-title} account with cluster administrator access. -endif::[] - -ifdef::openshift-dedicated,openshift-rosa[] -* You have access to an {ocp-product-title} account with cluster administrator or dedicated administrator access. -endif::[] +* You have cluster administrator permissions on {ocp-product-title}, or you have cluster or dedicated administrator permissions on {rosa-product-title} or {dedicated-product-title}. * You have installed the {ServerlessOperatorName}. * Install the OpenShift CLI (`oc`). diff --git a/modules/serverless-install-kafka-odc.adoc b/modules/serverless-install-kafka-odc.adoc index b257bd5609a3..722dc1763e8d 100644 --- a/modules/serverless-install-kafka-odc.adoc +++ b/modules/serverless-install-kafka-odc.adoc @@ -4,9 +4,9 @@ :_content-type: PROCEDURE [id="serverless-install-kafka-odc_{context}"] -= Installing Knative Kafka += Installing Knative broker for Apache Kafka -Knative Kafka provides integration options for you to use supported versions of the Apache Kafka message streaming platform with {ServerlessProductName}. Knative Kafka functionality is available in an {ServerlessProductName} installation if you have installed the `KnativeKafka` custom resource. +The Knative broker implementation for Apache Kafka provides integration options for you to use supported versions of the Apache Kafka message streaming platform with {ServerlessProductName}. Knative broker for Apache Kafka functionality is available in an {ServerlessProductName} installation if you have installed the `KnativeKafka` custom resource. .Prerequisites @@ -14,18 +14,9 @@ Knative Kafka provides integration options for you to use supported versions of * You have access to a Red Hat AMQ Streams cluster. * Install the OpenShift CLI (`oc`) if you want to use the verification steps. -// OCP -ifdef::openshift-enterprise[] -* You have cluster administrator permissions on {ocp-product-title}. -endif::[] - -// OSD and ROSA -ifdef::openshift-dedicated,openshift-rosa[] -* You have cluster or dedicated administrator permissions on {ocp-product-title}. -endif::[] +* You have cluster administrator permissions on {ocp-product-title}, or you have cluster or dedicated administrator permissions on {rosa-product-title} or {dedicated-product-title}. * You are logged in to the {ocp-product-title} web console. - .Procedure . In the *Administrator* perspective, navigate to *Operators* -> *Installed Operators*. @@ -67,7 +58,7 @@ spec: <1> Enables developers to use the `KafkaChannel` channel type in the cluster. <2> A comma-separated list of bootstrap servers from your AMQ Streams cluster. <3> Enables developers to use the `KafkaSource` event source type in the cluster. -<4> Enables developers to use the Knative Kafka broker implementation in the cluster. +<4> Enables developers to use the Knative broker implementation for Apache Kafka in the cluster. <5> A comma-separated list of bootstrap servers from your Red Hat AMQ Streams cluster. <6> Defines the number of partitions of the Kafka topics, backed by the `Broker` objects. The default is `10`. <7> Defines the replication factor of the Kafka topics, backed by the `Broker` objects. The default is `3`. @@ -94,7 +85,7 @@ image::knative-kafka-overview.png[Kafka Knative Overview page showing Conditions + If the conditions have a status of *Unknown* or *False*, wait a few moments to refresh the page. -. Check that the Knative Kafka resources have been created: +. Check that the Knative broker for Apache Kafka resources have been created: + [source,terminal] ---- diff --git a/modules/serverless-install-serving-web-console.adoc b/modules/serverless-install-serving-web-console.adoc index 8e2976926c81..bc31206c5480 100644 --- a/modules/serverless-install-serving-web-console.adoc +++ b/modules/serverless-install-serving-web-console.adoc @@ -10,13 +10,7 @@ After you install the {ServerlessOperatorName}, install Knative Serving by using .Prerequisites -ifdef::openshift-enterprise[] -* You have access to an {ocp-product-title} account with cluster administrator access. -endif::[] - -ifdef::openshift-dedicated,openshift-rosa[] -* You have access to an {ocp-product-title} account with cluster administrator or dedicated administrator access. -endif::[] +* You have cluster administrator permissions on {ocp-product-title}, or you have cluster or dedicated administrator permissions on {rosa-product-title} or {dedicated-product-title}. * You have logged in to the {ocp-product-title} web console. * You have installed the {ServerlessOperatorName}. diff --git a/modules/serverless-install-serving-yaml.adoc b/modules/serverless-install-serving-yaml.adoc index 5ebe07c398eb..e33e0973c073 100644 --- a/modules/serverless-install-serving-yaml.adoc +++ b/modules/serverless-install-serving-yaml.adoc @@ -10,13 +10,7 @@ After you install the {ServerlessOperatorName}, you can install Knative Serving .Prerequisites -ifdef::openshift-enterprise[] -* You have access to an {ocp-product-title} account with cluster administrator access. -endif::[] - -ifdef::openshift-dedicated,openshift-rosa[] -* You have access to an {ocp-product-title} account with cluster administrator or dedicated administrator access. -endif::[] +* You have cluster administrator permissions on {ocp-product-title}, or you have cluster or dedicated administrator permissions on {rosa-product-title} or {dedicated-product-title}. * You have installed the {ServerlessOperatorName}. * Install the OpenShift CLI (`oc`). diff --git a/modules/serverless-install-web-console.adoc b/modules/serverless-install-web-console.adoc index c7d1d85098ff..4ff3e0022d0e 100644 --- a/modules/serverless-install-web-console.adoc +++ b/modules/serverless-install-web-console.adoc @@ -10,20 +10,15 @@ You can install the {ServerlessOperatorName} from the OperatorHub by using the { .Prerequisites -ifdef::openshift-enterprise[] -* You have access to an {ocp-product-title} account with cluster administrator access. -* Your cluster has the Marketplace capability enabled or the Red Hat Operator catalog source configured manually. -endif::[] +* You have cluster administrator permissions on {ocp-product-title}, or you have cluster or dedicated administrator permissions on {rosa-product-title} or {dedicated-product-title}. +* For {ocp-product-title}, your cluster has the Marketplace capability enabled or the Red Hat Operator catalog source configured manually. -ifdef::openshift-dedicated,openshift-rosa[] -* You have access to an {ocp-product-title} account with cluster or dedicated administrator access. -endif::[] -* You have logged in to the {ocp-product-title} web console. +* You have logged in to the web console. .Procedure -. In the {ocp-product-title} web console, navigate to the *Operators* -> *OperatorHub* page. +. In the web console, navigate to the *Operators* -> *OperatorHub* page. . Scroll, or type the keyword *Serverless* into the *Filter by keyword* box to find the {ServerlessOperatorName}. diff --git a/modules/serverless-installing-cli-linux-rpm-package-manager.adoc b/modules/serverless-installing-cli-linux-rpm-package-manager.adoc index 7130e5918e1d..544251a1ff07 100644 --- a/modules/serverless-installing-cli-linux-rpm-package-manager.adoc +++ b/modules/serverless-installing-cli-linux-rpm-package-manager.adoc @@ -45,7 +45,6 @@ For {op-system-base-full}, you can install the Knative (`kn`) CLI as an RPM by u ---- # subscription-manager repos --enable="openshift-serverless-1-for-rhel-8-x86_64-rpms" ---- -ifndef::openshift-rosa[] + * Linux on {ibmzProductName} and {linuxoneProductName} (s390x) + @@ -60,7 +59,6 @@ ifndef::openshift-rosa[] ---- # subscription-manager repos --enable="openshift-serverless-1-for-rhel-8-ppc64le-rpms" ---- -endif::openshift-rosa[] . Install the Knative (`kn`) CLI as an RPM by using a package manager: + diff --git a/modules/serverless-installing-cli-linux.adoc b/modules/serverless-installing-cli-linux.adoc index c80247dede6e..0c99043f5fd4 100644 --- a/modules/serverless-installing-cli-linux.adoc +++ b/modules/serverless-installing-cli-linux.adoc @@ -28,12 +28,10 @@ $ kn: No such file or directory + -- * link:https://mirror.openshift.com/pub/openshift-v4/clients/serverless/latest/kn-linux-amd64.tar.gz[Linux (x86_64, amd64)] -ifndef::openshift-rosa[] * link:https://mirror.openshift.com/pub/openshift-v4/clients/serverless/latest/kn-linux-s390x.tar.gz[Linux on {ibmzProductName} and {linuxoneProductName} (s390x)] * link:https://mirror.openshift.com/pub/openshift-v4/clients/serverless/latest/kn-linux-ppc64le.tar.gz[Linux on {ibmpowerProductName} (ppc64le)] -endif::openshift-rosa[] -- + You can also download any version of `kn` by navigating to that version's corresponding directory in the link:https://mirror.openshift.com/pub/openshift-v4/clients/serverless/[Serverless client download mirror]. diff --git a/modules/serverless-jaeger-config.adoc b/modules/serverless-jaeger-config.adoc index 0098f15b4983..219981e66f78 100644 --- a/modules/serverless-jaeger-config.adoc +++ b/modules/serverless-jaeger-config.adoc @@ -10,18 +10,12 @@ To enable distributed tracing using Jaeger, you must install and configure Jaege .Prerequisites -ifdef::openshift-enterprise[] -* You have access to an {ocp-product-title} account with cluster administrator access. -endif::[] - -ifdef::openshift-dedicated,openshift-rosa[] -* You have access to an {ocp-product-title} account with cluster or dedicated administrator access. -endif::[] +* You have cluster administrator permissions on {ocp-product-title}, or you have cluster or dedicated administrator permissions on {rosa-product-title} or {dedicated-product-title}. * You have installed the {ServerlessOperatorName}, Knative Serving, and Knative Eventing. * You have installed the {JaegerName} Operator. * You have installed the OpenShift CLI (`oc`). -* You have created a project or have access to a project with the appropriate roles and permissions to create applications and other workloads in {ocp-product-title}. +* You have created a project or have access to a project with the appropriate roles and permissions to create applications and other workloads. .Procedure diff --git a/modules/serverless-kafka-broker-configmap.adoc b/modules/serverless-kafka-broker-configmap.adoc index 0b7248ea15fa..3367f93a222a 100644 --- a/modules/serverless-kafka-broker-configmap.adoc +++ b/modules/serverless-kafka-broker-configmap.adoc @@ -4,7 +4,7 @@ :_content-type: PROCEDURE [id="serverless-kafka-broker-configmap_{context}"] -= Configuring Kafka broker settings += Configuring Apache Kafka broker settings You can configure the replication factor, bootstrap servers, and the number of topic partitions for a Kafka broker, by creating a config map and referencing this config map in the Kafka `Broker` object. diff --git a/modules/serverless-kafka-broker-sasl-default-config.adoc b/modules/serverless-kafka-broker-sasl-default-config.adoc index 9359ee3e575a..5ca18d6e4fe9 100644 --- a/modules/serverless-kafka-broker-sasl-default-config.adoc +++ b/modules/serverless-kafka-broker-sasl-default-config.adoc @@ -4,7 +4,7 @@ :_content-type: PROCEDURE [id="serverless-kafka-broker-sasl-default-config_{context}"] -= Configuring SASL authentication for Kafka brokers += Configuring SASL authentication for Apache Kafka brokers _Simple Authentication and Security Layer_ (SASL) is used by Apache Kafka for authentication. If you use SASL authentication on your cluster, users must provide credentials to Knative for communicating with the Kafka cluster; otherwise events cannot be produced or consumed. diff --git a/modules/serverless-kafka-broker-tls-default-config.adoc b/modules/serverless-kafka-broker-tls-default-config.adoc index 0f4b0f34a646..85ed1b176fa5 100644 --- a/modules/serverless-kafka-broker-tls-default-config.adoc +++ b/modules/serverless-kafka-broker-tls-default-config.adoc @@ -5,9 +5,9 @@ :_content-type: PROCEDURE [id="serverless-kafka-broker-tls-default-config_{context}"] -= Configuring TLS authentication for Kafka brokers += Configuring TLS authentication for Apache Kafka brokers -_Transport Layer Security_ (TLS) is used by Apache Kafka clients and servers to encrypt traffic between Knative and Kafka, as well as for authentication. TLS is the only supported method of traffic encryption for Knative Kafka. +_Transport Layer Security_ (TLS) is used by Apache Kafka clients and servers to encrypt traffic between Knative and Kafka, as well as for authentication. TLS is the only supported method of traffic encryption for the Knative broker implementation for Apache Kafka. .Prerequisites diff --git a/modules/serverless-kafka-broker-with-kafka-topic.adoc b/modules/serverless-kafka-broker-with-kafka-topic.adoc index a20593abc780..82bed64d7b3d 100644 --- a/modules/serverless-kafka-broker-with-kafka-topic.adoc +++ b/modules/serverless-kafka-broker-with-kafka-topic.adoc @@ -4,7 +4,7 @@ :_content-type: PROCEDURE [id="serverless-kafka-broker-with-kafka-topic_{context}"] -= Creating a Kafka broker that uses an externally managed Kafka topic += Creating an Apache Kafka broker that uses an externally managed Kafka topic If you want to use a Kafka broker without allowing it to create its own internal topic, you can use an externally managed Kafka topic instead. To do this, you must create a Kafka `Broker` object that uses the `kafka.eventing.knative.dev/external.topic` annotation. diff --git a/modules/serverless-kafka-broker.adoc b/modules/serverless-kafka-broker.adoc index d34ae524022a..f6c756e2c679 100644 --- a/modules/serverless-kafka-broker.adoc +++ b/modules/serverless-kafka-broker.adoc @@ -4,7 +4,7 @@ :_content-type: PROCEDURE [id="serverless-kafka-broker_{context}"] -= Creating a Kafka broker by using YAML += Creating an Apache Kafka broker by using YAML Creating Knative resources by using YAML files uses a declarative API, which enables you to describe applications declaratively and in a reproducible manner. To create a Kafka broker by using YAML, you must create a YAML file that defines a `Broker` object, then apply it by using the `oc apply` command. @@ -36,7 +36,7 @@ spec: namespace: knative-eventing ---- <1> The broker class. If not specified, brokers use the default class as configured by cluster administrators. To use the Kafka broker, this value must be `Kafka`. -<2> The default config map for Knative Kafka brokers. This config map is created when the Kafka broker functionality is enabled on the cluster by a cluster administrator. +<2> The default config map for Knative brokers for Apache Kafka. This config map is created when the Kafka broker functionality is enabled on the cluster by a cluster administrator. . Apply the Kafka-based broker YAML file: + diff --git a/modules/serverless-kafka-developer.adoc b/modules/serverless-kafka-developer.adoc index 009340622b66..b57284995e87 100644 --- a/modules/serverless-kafka-developer.adoc +++ b/modules/serverless-kafka-developer.adoc @@ -4,21 +4,21 @@ :_content-type: CONCEPT [id="serverless-kafka-developer_{context}"] -= Using Knative Kafka += Using the Knative broker for Apache Kafka -Knative Kafka provides integration options for you to use supported versions of the Apache Kafka message streaming platform with {ServerlessProductName}. Kafka provides options for event source, channel, broker, and event sink capabilities. +The Knative broker implementation for Apache Kafka provides integration options for you to use supported versions of the Apache Kafka message streaming platform with {ServerlessProductName}. Kafka provides options for event source, channel, broker, and event sink capabilities. // OCP ifdef::openshift-enterprise[] [NOTE] ==== -Knative Kafka is not currently supported for {ibmzProductName} and {ibmpowerProductName}. +The Knative broker implementation for Apache Kafka is not currently supported for {ibmzProductName} and {ibmpowerProductName} on {ocp-product-title}. ==== endif::[] -Knative Kafka provides additional options, such as: +Knative broker for Apache Kafka provides additional options, such as: * Kafka source * Kafka channel diff --git a/modules/serverless-kafka-event-delivery.adoc b/modules/serverless-kafka-event-delivery.adoc index d7d0852e7bc4..6a62692364a2 100644 --- a/modules/serverless-kafka-event-delivery.adoc +++ b/modules/serverless-kafka-event-delivery.adoc @@ -4,8 +4,8 @@ :_content-type: CONCEPT [id="serverless-kafka-delivery-retries_{context}"] -= Kafka event delivery and retries += Apache Kafka event delivery and retries -Using Kafka components in an event-driven architecture provides "at least once" event delivery. This means that operations are retried until a return code value is received. This makes applications more resilient to lost events; however, it might result in duplicate events being sent. +Using Apache Kafka components in an event-driven architecture provides "at least once" event delivery. This means that operations are retried until a return code value is received. This makes applications more resilient to lost events; however, it might result in duplicate events being sent. For the Kafka event source, there is a fixed number of retries for event delivery by default. For Kafka channels, retries are only performed if they are configured in the Kafka channel `Delivery` spec. diff --git a/modules/serverless-kafka-sasl-channels.adoc b/modules/serverless-kafka-sasl-channels.adoc index ebd6ff426436..4063592a67e8 100644 --- a/modules/serverless-kafka-sasl-channels.adoc +++ b/modules/serverless-kafka-sasl-channels.adoc @@ -4,7 +4,7 @@ :_content-type: PROCEDURE [id="serverless-kafka-sasl-channels_{context}"] -= Configuring SASL authentication for Kafka channels += Configuring SASL authentication for Knative channels for Apache Kafka _Simple Authentication and Security Layer_ (SASL) is used by Apache Kafka for authentication. If you use SASL authentication on your cluster, users must provide credentials to Knative for communicating with the Kafka cluster; otherwise events cannot be produced or consumed. diff --git a/modules/serverless-kafka-sasl-source.adoc b/modules/serverless-kafka-sasl-source.adoc index 0e088bbe1c16..7462b6217426 100644 --- a/modules/serverless-kafka-sasl-source.adoc +++ b/modules/serverless-kafka-sasl-source.adoc @@ -4,7 +4,7 @@ :_content-type: PROCEDURE [id="serverless-kafka-sasl-source_{context}"] -= Configuring SASL authentication for Kafka sources += Configuring SASL authentication for Apache Kafka sources _Simple Authentication and Security Layer_ (SASL) is used by Apache Kafka for authentication. If you use SASL authentication on your cluster, users must provide credentials to Knative for communicating with the Kafka cluster; otherwise events cannot be produced or consumed. diff --git a/modules/serverless-kafka-sink-security-config.adoc b/modules/serverless-kafka-sink-security-config.adoc index 6e7d14ac51b4..6b05d793dfd8 100644 --- a/modules/serverless-kafka-sink-security-config.adoc +++ b/modules/serverless-kafka-sink-security-config.adoc @@ -4,9 +4,9 @@ :_content-type: PROCEDURE [id="serverless-kafka-sink-security-config_{context}"] -= Configuring security for Kafka sinks += Configuring security for Apache Kafka sinks -_Transport Layer Security_ (TLS) is used by Apache Kafka clients and servers to encrypt traffic between Knative and Kafka, as well as for authentication. TLS is the only supported method of traffic encryption for Knative Kafka. +_Transport Layer Security_ (TLS) is used by Apache Kafka clients and servers to encrypt traffic between Knative and Kafka, as well as for authentication. TLS is the only supported method of traffic encryption for the Knative broker implementation for Apache Kafka. _Simple Authentication and Security Layer_ (SASL) is used by Apache Kafka for authentication. If you use SASL authentication on your cluster, users must provide credentials to Knative for communicating with the Kafka cluster; otherwise events cannot be produced or consumed. diff --git a/modules/serverless-kafka-sink.adoc b/modules/serverless-kafka-sink.adoc index bdef61c8abaa..0e35ee6b3638 100644 --- a/modules/serverless-kafka-sink.adoc +++ b/modules/serverless-kafka-sink.adoc @@ -4,9 +4,9 @@ :_content-type: PROCEDURE [id="serverless-kafka-sink_{context}"] -= Using a Kafka sink += Creating an Apache Kafka sink by using YAML -You can create an event sink called a Kafka sink that sends events to a Kafka topic. Creating Knative resources by using YAML files uses a declarative API, which enables you to describe applications declaratively and in a reproducible manner. By default, a Kafka sink uses the binary content mode, which is more efficient than the structured mode. To create a Kafka sink by using YAML, you must create a YAML file that defines a `KafkaSink` object, then apply it by using the `oc apply` command. +You can create a Kafka sink that sends events to a Kafka topic. By default, a Kafka sink uses the binary content mode, which is more efficient than the structured mode. To create a Kafka sink by using YAML, you must create a YAML file that defines a `KafkaSink` object, then apply it by using the `oc apply` command. .Prerequisites diff --git a/modules/serverless-kafka-source-kn.adoc b/modules/serverless-kafka-source-kn.adoc index c0ac829baad9..4af542ecc724 100644 --- a/modules/serverless-kafka-source-kn.adoc +++ b/modules/serverless-kafka-source-kn.adoc @@ -5,7 +5,7 @@ :_content-type: PROCEDURE [id="serverless-kafka-source-kn_{context}"] -= Creating a Kafka event source by using the Knative CLI += Creating an Apache Kafka event source by using the Knative CLI You can use the `kn source kafka create` command to create a Kafka source by using the Knative (`kn`) CLI. Using the Knative CLI to create event sources provides a more streamlined and intuitive user interface than modifying YAML files directly. diff --git a/modules/serverless-kafka-source-odc.adoc b/modules/serverless-kafka-source-odc.adoc index 9d6e54b45854..2807f98d3a72 100644 --- a/modules/serverless-kafka-source-odc.adoc +++ b/modules/serverless-kafka-source-odc.adoc @@ -4,9 +4,9 @@ :_content-type: PROCEDURE [id="serverless-kafka-source-odc_{context}"] -= Creating a Kafka event source by using the web console += Creating an Apache Kafka event source by using the web console -After Knative Kafka is installed on your cluster, you can create a Kafka source by using the web console. Using the {ocp-product-title} web console provides a streamlined and intuitive user interface to create a Kafka source. +After the Knative broker implementation for Apache Kafka is installed on your cluster, you can create an Apache Kafka source by using the web console. Using the {product-title} web console provides a streamlined and intuitive user interface to create a Kafka source. .Prerequisites diff --git a/modules/serverless-kafka-source-yaml.adoc b/modules/serverless-kafka-source-yaml.adoc index cde0905682e6..f1ab7f424c14 100644 --- a/modules/serverless-kafka-source-yaml.adoc +++ b/modules/serverless-kafka-source-yaml.adoc @@ -4,7 +4,7 @@ :_content-type: PROCEDURE [id="serverless-kafka-source-yaml_{context}"] -= Creating a Kafka event source by using YAML += Creating an Apache Kafka event source by using YAML Creating Knative resources by using YAML files uses a declarative API, which enables you to describe applications declaratively and in a reproducible manner. To create a Kafka source by using YAML, you must create a YAML file that defines a `KafkaSource` object, then apply it by using the `oc apply` command. diff --git a/modules/serverless-kafka-tls-channels.adoc b/modules/serverless-kafka-tls-channels.adoc index b350269be2ed..fdd595769f3d 100644 --- a/modules/serverless-kafka-tls-channels.adoc +++ b/modules/serverless-kafka-tls-channels.adoc @@ -5,9 +5,9 @@ :_content-type: PROCEDURE [id="serverless-kafka-tls-channels_{context}"] -= Configuring TLS authentication for Kafka channels += Configuring TLS authentication for Knative channels for Apache Kafka -_Transport Layer Security_ (TLS) is used by Apache Kafka clients and servers to encrypt traffic between Knative and Kafka, as well as for authentication. TLS is the only supported method of traffic encryption for Knative Kafka. +_Transport Layer Security_ (TLS) is used by Apache Kafka clients and servers to encrypt traffic between Knative and Kafka, as well as for authentication. TLS is the only supported method of traffic encryption for the Knative broker implementation for Apache Kafka. .Prerequisites diff --git a/modules/serverless-list-source-types-kn.adoc b/modules/serverless-list-source-types-kn.adoc index 9f69c34732e9..317c580f0d2c 100644 --- a/modules/serverless-list-source-types-kn.adoc +++ b/modules/serverless-list-source-types-kn.adoc @@ -31,12 +31,12 @@ PingSource pingsources.sources.knative.dev Periodically s SinkBinding sinkbindings.sources.knative.dev Binding for connecting a PodSpecable to a sink ---- -ifdef::openshift-enterprise[] -. Optional: You can also list the available event source types in YAML format: + +. Optional: On {ocp-product-title}, you can also list the available event source types in YAML format: + [source,terminal] ---- $ kn source list-types -o yaml ---- -endif::[] + // optional step not allowed yet for OSD due to upstream https://github.com/knative/client/issues/1385 diff --git a/modules/serverless-ossm-enabling-serving-metrics.adoc b/modules/serverless-ossm-enabling-serving-metrics.adoc index 986250e2d592..fce4d01accac 100644 --- a/modules/serverless-ossm-enabling-serving-metrics.adoc +++ b/modules/serverless-ossm-enabling-serving-metrics.adoc @@ -13,16 +13,10 @@ If Service Mesh is enabled with mTLS, metrics for Knative Serving are disabled b * You have installed the {ServerlessOperatorName} and Knative Serving on your cluster. * You have installed {SMProductName} with the mTLS functionality enabled. -ifdef::openshift-enterprise[] -* You have access to an {ocp-product-title} account with cluster administrator access. -endif::[] - -ifdef::openshift-dedicated,openshift-rosa[] -* You have access to an {ocp-product-title} account with cluster or dedicated administrator access. -endif::[] +* You have cluster administrator permissions on {ocp-product-title}, or you have cluster or dedicated administrator permissions on {rosa-product-title} or {dedicated-product-title}. * Install the OpenShift CLI (`oc`). -* You have created a project or have access to a project with the appropriate roles and permissions to create applications and other workloads in {ocp-product-title}. +* You have created a project or have access to a project with the appropriate roles and permissions to create applications and other workloads. .Procedure diff --git a/modules/serverless-ossm-external-certs.adoc b/modules/serverless-ossm-external-certs.adoc index c7b584806426..496d545295e8 100644 --- a/modules/serverless-ossm-external-certs.adoc +++ b/modules/serverless-ossm-external-certs.adoc @@ -10,17 +10,11 @@ By default, the {SMProductShortName} mTLS feature only secures traffic inside of .Prerequisites -ifdef::openshift-enterprise[] -* You have access to an {ocp-product-title} account with cluster administrator access. -endif::[] - -ifdef::openshift-dedicated,openshift-rosa[] -* You have access to an {ocp-product-title} account with cluster or dedicated administrator access. -endif::[] +* You have cluster administrator permissions on {ocp-product-title}, or you have cluster or dedicated administrator permissions on {rosa-product-title} or {dedicated-product-title}. * You have installed the {ServerlessOperatorName} and Knative Serving. * Install the OpenShift CLI (`oc`). -* You have created a project or have access to a project with the appropriate roles and permissions to create applications and other workloads in {ocp-product-title}. +* You have created a project or have access to a project with the appropriate roles and permissions to create applications and other workloads. .Procedure diff --git a/modules/serverless-ossm-secret-filtering-net-istio.adoc b/modules/serverless-ossm-secret-filtering-net-istio.adoc index 679fbfa38e8d..9520ffa36916 100644 --- a/modules/serverless-ossm-secret-filtering-net-istio.adoc +++ b/modules/serverless-ossm-secret-filtering-net-istio.adoc @@ -15,15 +15,9 @@ If you enable secret filtering, all of your secrets need to be labeled with `ne .Prerequisites -ifdef::openshift-enterprise[] -* You have access to an {ocp-product-title} account with cluster administrator access. -endif::[] +* You have cluster administrator permissions on {ocp-product-title}, or you have cluster or dedicated administrator permissions on {rosa-product-title} or {dedicated-product-title}. -ifdef::openshift-dedicated,openshift-rosa[] -* You have access to an {ocp-product-title} account with cluster or dedicated administrator access. -endif::[] - -* You have created a project or have access to a project with the appropriate roles and permissions to create applications and other workloads in {ocp-product-title}. +* You have created a project or have access to a project with the appropriate roles and permissions to create applications and other workloads. * Install {SMProductName}. {ServerlessProductName} with {SMProductShortName} only is supported for use with {SMProductName} version 2.0.5 or later. * Install the {ServerlessOperatorName} and Knative Serving. * Install the OpenShift CLI (`oc`). diff --git a/modules/serverless-ossm-secret-filtering-net-kourier.adoc b/modules/serverless-ossm-secret-filtering-net-kourier.adoc index 378143339c29..98a2a5b2e86f 100644 --- a/modules/serverless-ossm-secret-filtering-net-kourier.adoc +++ b/modules/serverless-ossm-secret-filtering-net-kourier.adoc @@ -15,15 +15,9 @@ If you enable secret filtering, all of your secrets need to be labeled with `ne .Prerequisites -ifdef::openshift-enterprise[] -* You have access to an {ocp-product-title} account with cluster administrator access. -endif::[] +* You have cluster administrator permissions on {ocp-product-title}, or you have cluster or dedicated administrator permissions on {rosa-product-title} or {dedicated-product-title}. -ifdef::openshift-dedicated,openshift-rosa[] -* You have access to an {ocp-product-title} account with cluster or dedicated administrator access. -endif::[] - -* A project that you created or that you have roles and permissions for to create applications and other workloads in {ocp-product-title}. +* A project that you created or that you have roles and permissions for to create applications and other workloads. * Install the {ServerlessOperatorName} and Knative Serving. * Install the OpenShift CLI (`oc`). diff --git a/modules/serverless-ossm-setup-with-kourier.adoc b/modules/serverless-ossm-setup-with-kourier.adoc index 5695c559d980..12f50b03ad0c 100644 --- a/modules/serverless-ossm-setup-with-kourier.adoc +++ b/modules/serverless-ossm-setup-with-kourier.adoc @@ -10,15 +10,9 @@ You can use {SMProductShortName} with {ServerlessProductName} even if Kourier is .Prerequisites -ifdef::openshift-enterprise[] -* You have access to an {ocp-product-title} account with cluster administrator access. -endif::[] +* You have cluster administrator permissions on {ocp-product-title}, or you have cluster or dedicated administrator permissions on {rosa-product-title} or {dedicated-product-title}. -ifdef::openshift-dedicated,openshift-rosa[] -* You have access to an {ocp-product-title} account with cluster or dedicated administrator access. -endif::[] - -* You have created a project or have access to a project with the appropriate roles and permissions to create applications and other workloads in {ocp-product-title}. +* You have created a project or have access to a project with the appropriate roles and permissions to create applications and other workloads. * Install the OpenShift CLI (`oc`). * Install the {ServerlessOperatorName} and Knative Serving on your cluster. * Install {SMProductName}. {ServerlessProductName} with {SMProductShortName} and Kourier is supported for use with both {SMProductName} versions 1.x and 2.x. diff --git a/modules/serverless-ossm-setup.adoc b/modules/serverless-ossm-setup.adoc index 122b2e2de490..796a26632028 100644 --- a/modules/serverless-ossm-setup.adoc +++ b/modules/serverless-ossm-setup.adoc @@ -10,15 +10,9 @@ You can integrate {SMProductShortName} with {ServerlessProductName} without usin .Prerequisites -ifdef::openshift-enterprise[] -* You have access to an {ocp-product-title} account with cluster administrator access. -endif::[] +* You have cluster administrator permissions on {ocp-product-title}, or you have cluster or dedicated administrator permissions on {rosa-product-title} or {dedicated-product-title}. -ifdef::openshift-dedicated,openshift-rosa[] -* You have access to an {ocp-product-title} account with cluster or dedicated administrator access. -endif::[] - -* You have created a project or have access to a project with the appropriate roles and permissions to create applications and other workloads in {ocp-product-title}. +* You have created a project or have access to a project with the appropriate roles and permissions to create applications and other workloads. * Install the {SMProductName} Operator and create a `ServiceMeshControlPlane` resource in the `istio-system` namespace. If you want to use mTLS functionality, you must also set the `spec.security.dataPlane.mtls` field for the `ServiceMeshControlPlane` resource to `true`. + diff --git a/modules/serverless-ossm-v1x-jwt.adoc b/modules/serverless-ossm-v1x-jwt.adoc index efdfd7c5a7b8..d09adad173e9 100644 --- a/modules/serverless-ossm-v1x-jwt.adoc +++ b/modules/serverless-ossm-v1x-jwt.adoc @@ -10,9 +10,7 @@ ==== Adding sidecar injection to pods in system namespaces, such as `knative-serving` and `knative-serving-ingress`, is not supported when Kourier is enabled. -ifdef::openshift-enterprise[] -If you require sidecar injection for pods in these namespaces, see the {ServerlessProductName} documentation on _Integrating {SMProductShortName} with {ServerlessProductName} natively_. -endif::[] +For {ocp-product-title}, if you require sidecar injection for pods in these namespaces, see the {ServerlessProductName} documentation on _Integrating {SMProductShortName} with {ServerlessProductName} natively_. ==== .Prerequisites diff --git a/modules/serverless-ossm-v2x-jwt.adoc b/modules/serverless-ossm-v2x-jwt.adoc index d5588935a36a..0e351a223cbf 100644 --- a/modules/serverless-ossm-v2x-jwt.adoc +++ b/modules/serverless-ossm-v2x-jwt.adoc @@ -10,9 +10,7 @@ ==== Adding sidecar injection to pods in system namespaces, such as `knative-serving` and `knative-serving-ingress`, is not supported when Kourier is enabled. -ifdef::openshift-enterprise[] -If you require sidecar injection for pods in these namespaces, see the {ServerlessProductName} documentation on _Integrating {SMProductShortName} with {ServerlessProductName} natively_. -endif::[] +For {ocp-product-title}, if you require sidecar injection for pods in these namespaces, see the {ServerlessProductName} documentation on _Integrating {SMProductShortName} with {ServerlessProductName} natively_. ==== .Prerequisites diff --git a/modules/serverless-rn-1-24-0.adoc b/modules/serverless-rn-1-24-0.adoc index 7cd59924579f..1e383590f1b4 100644 --- a/modules/serverless-rn-1-24-0.adoc +++ b/modules/serverless-rn-1-24-0.adoc @@ -22,9 +22,7 @@ * {ServerlessProductName} logic is now available as a Developer Preview. It enables defining declarative workflow models for managing serverless applications. -ifdef::openshift-enterprise[] -* You can now use the cost management service with {ServerlessProductName}. -endif::[] +* For {ocp-product-title}, you can now use the cost management service with {ServerlessProductName}. [id="fixed-issues-1.24.0_{context}"] == Fixed issues diff --git a/modules/serverless-rn-1-28-0.adoc b/modules/serverless-rn-1-28-0.adoc index c85b8af05577..db3c09941d02 100644 --- a/modules/serverless-rn-1-28-0.adoc +++ b/modules/serverless-rn-1-28-0.adoc @@ -15,7 +15,7 @@ * {ServerlessProductName} now uses Knative Eventing 1.7. * {ServerlessProductName} now uses Kourier 1.7. * {ServerlessProductName} now uses Knative (`kn`) CLI 1.7. -* {ServerlessProductName} now uses Knative Kafka 1.7. +* {ServerlessProductName} now uses Knative broker implementation for Apache Kafka 1.7. * The `kn func` CLI plug-in now uses `func` 1.9.1 version. * Node.js and TypeScript runtimes for {ServerlessProductName} Functions are now Generally Available (GA). diff --git a/modules/serverless-scale-to-zero-grace-period.adoc b/modules/serverless-scale-to-zero-grace-period.adoc index 89e25a137d86..7a1632f0b69c 100644 --- a/modules/serverless-scale-to-zero-grace-period.adoc +++ b/modules/serverless-scale-to-zero-grace-period.adoc @@ -12,13 +12,7 @@ Knative Serving provides automatic scaling down to zero pods for applications. Y * You have installed {ServerlessOperatorName} and Knative Serving on your cluster. -ifdef::openshift-enterprise[] -* You have cluster administrator permissions. -endif::[] - -ifdef::openshift-dedicated,openshift-rosa[] -* You have cluster or dedicated administrator permissions. -endif::[] +* You have cluster administrator permissions on {ocp-product-title}, or you have cluster or dedicated administrator permissions on {rosa-product-title} or {dedicated-product-title}. * You are using the default Knative Pod Autoscaler. The scale-to-zero feature is not available if you are using the Kubernetes Horizontal Pod Autoscaler. diff --git a/modules/serverless-tech-preview-features.adoc b/modules/serverless-tech-preview-features.adoc index 0f93e2251c5c..1187b2208372 100644 --- a/modules/serverless-tech-preview-features.adoc +++ b/modules/serverless-tech-preview-features.adoc @@ -11,8 +11,7 @@ Features which are Generally Available (GA) are fully supported and are suitable The following table provides information about which {ServerlessProductName} features are GA and which are TP: // OCP + OSD table -ifdef::openshift-enterprise,openshift-dedicated[] -.Generally Available and Technology Preview features tracker +.Generally Available and Technology Preview features tracker for {ocp-product-title} and {dedicated-product-title} [cols="2,1,1,1",options="header"] |==== |Feature |1.26|1.27|1.28 @@ -93,11 +92,11 @@ ifdef::openshift-enterprise,openshift-dedicated[] |TP |==== -endif::[] + // ROSA table -ifdef::openshift-rosa[] -.Generally Available and Technology Preview features tracker + +.Generally Available and Technology Preview features tracker for {rosa-product-title} [cols="2,1,1,1",options="header"] |==== |Feature |1.26|1.27|1.28 @@ -158,4 +157,3 @@ ifdef::openshift-rosa[] |TP |==== -endif::[] diff --git a/modules/serverless-uninstalling-knative-eventing.adoc b/modules/serverless-uninstalling-knative-eventing.adoc index 2a1d2c707c41..91a6a8ca9b31 100644 --- a/modules/serverless-uninstalling-knative-eventing.adoc +++ b/modules/serverless-uninstalling-knative-eventing.adoc @@ -8,13 +8,7 @@ .Prerequisites -ifdef::openshift-enterprise[] -* You have access to an {ocp-product-title} account with cluster administrator access. -endif::[] - -ifdef::openshift-dedicated[] -* You have access to an {ocp-product-title} account with cluster administrator or dedicated administrator access. -endif::[] +* You have cluster administrator permissions on {ocp-product-title}, or you have cluster or dedicated administrator permissions on {dedicated-product-title}. * Install the OpenShift CLI (`oc`). diff --git a/modules/serverless-uninstalling-knative-serving.adoc b/modules/serverless-uninstalling-knative-serving.adoc index ca60f607d4b3..b8eb7f598f95 100644 --- a/modules/serverless-uninstalling-knative-serving.adoc +++ b/modules/serverless-uninstalling-knative-serving.adoc @@ -8,13 +8,7 @@ .Prerequisites -ifdef::openshift-enterprise[] -* You have access to an {ocp-product-title} account with cluster administrator access. -endif::[] - -ifdef::openshift-dedicated,openshift-rosa[] -* You have access to an {ocp-product-title} account with cluster administrator or dedicated administrator access. -endif::[] +* You have cluster administrator permissions on {ocp-product-title}, or you have cluster or dedicated administrator permissions on {dedicated-product-title}. * Install the OpenShift CLI (`oc`). diff --git a/modules/support-knowledgebase-about.adoc b/modules/support-knowledgebase-about.adoc new file mode 100644 index 000000000000..aa0f1117d935 --- /dev/null +++ b/modules/support-knowledgebase-about.adoc @@ -0,0 +1,12 @@ +// Module included in the following assemblies: +// +// * serverless/serverless-support.adoc +// * support/getting-support.adoc +// * service_mesh/v2x/ossm-troubleshooting-istio.adoc +// * osd_architecture/osd-support.adoc + +:_content-type: CONCEPT +[id="support-knowledgebase-about_{context}"] += About the Red Hat Knowledgebase + +The link:https://access.redhat.com/knowledgebase[Red Hat Knowledgebase] provides rich content aimed at helping you make the most of Red Hat's products and technologies. The Red Hat Knowledgebase consists of articles, product documentation, and videos outlining best practices on installing, configuring, and using Red Hat products. In addition, you can search for solutions to known issues, each providing concise root cause descriptions and remedial steps. diff --git a/modules/support-knowledgebase-search.adoc b/modules/support-knowledgebase-search.adoc new file mode 100644 index 000000000000..0b79bfae5d8c --- /dev/null +++ b/modules/support-knowledgebase-search.adoc @@ -0,0 +1,32 @@ +// Module included in the following assemblies: +// +// * serverless/serverless-support.adoc +// * support/getting-support.adoc +// * service_mesh/v2x/ossm-troubleshooting-istio.adoc +// * osd_architecture/osd-support.adoc + +:_content-type: PROCEDURE +[id="support-knowledgebase-search_{context}"] += Searching the Red Hat Knowledgebase + +In the event of an {product-title} issue, you can perform an initial search to determine if a solution already exists within the Red Hat Knowledgebase. + +.Prerequisites + +* You have a Red Hat Customer Portal account. + +.Procedure + +. Log in to the link:http://access.redhat.com[Red Hat Customer Portal]. + +. In the main Red Hat Customer Portal search field, input keywords and strings relating to the problem, including: ++ +* {product-title} components (such as *etcd*) +* Related procedure (such as *installation*) +* Warnings, error messages, and other outputs related to explicit failures + +. Click *Search*. + +. Select the *{product-title}* product filter. + +. Select the *Knowledgebase* content type filter. diff --git a/modules/support-submitting-a-case.adoc b/modules/support-submitting-a-case.adoc new file mode 100644 index 000000000000..6fd04be8ecb8 --- /dev/null +++ b/modules/support-submitting-a-case.adoc @@ -0,0 +1,86 @@ +// Module included in the following assemblies: +// +// * serverless/serverless-support.adoc +// * support/getting-support.adoc +// * service_mesh/v2x/ossm-troubleshooting-istio.adoc +// * osd_architecture/osd-support.adoc + +:_content-type: PROCEDURE +[id="support-submitting-a-case_{context}"] += Submitting a support case + +.Prerequisites + +ifndef::openshift-dedicated[] +* You have access to the cluster as a user with the `cluster-admin` role. +* You have installed the OpenShift CLI (`oc`). +endif::openshift-dedicated[] +ifdef::openshift-dedicated[] +* You have access to the {cluster-manager-first}. +endif::openshift-dedicated[] +* You have a Red Hat Customer Portal account. +ifndef::openshift-dedicated[] +* You have a Red Hat standard or premium Subscription. +endif::openshift-dedicated[] + +.Procedure + +. Log in to the link:http://access.redhat.com[Red Hat Customer Portal] and select *SUPPORT CASES* -> *Open a case*. + +. Select the appropriate category for your issue (such as *Defect / Bug*), product (*{product-title}*), and product version +ifndef::openshift-dedicated[] +(*{product-version}*, +endif::openshift-dedicated[] +ifdef::openshift-dedicated[] +(*{product-title}*, +endif::openshift-dedicated[] +if this is not already autofilled). + +. Review the list of suggested Red Hat Knowledgebase solutions for a potential match against the problem that is being reported. If the suggested articles do not address the issue, click *Continue*. + +. Enter a concise but descriptive problem summary and further details about the symptoms being experienced, as well as your expectations. + +. Review the updated list of suggested Red Hat Knowledgebase solutions for a potential match against the problem that is being reported. The list is refined as you provide more information during the case creation process. If the suggested articles do not address the issue, click *Continue*. + +. Ensure that the account information presented is as expected, and if not, amend accordingly. + +. Check that the autofilled {product-title} Cluster ID is correct. If it is not, manually obtain your cluster ID. +ifdef::openshift-dedicated[] ++ +* To manually obtain your cluster ID using {cluster-manager-url}: +.. Navigate to *Clusters*. +.. Click on the name of the cluster you need to open a support case for. +.. Find the value in the *Cluster ID* field of the *Details* section of the *Overview* tab. +endif::openshift-dedicated[] +ifndef::openshift-dedicated[] ++ +* To manually obtain your cluster ID using the {product-title} web console: +.. Navigate to *Home* -> *Dashboards* -> *Overview*. +.. Find the value in the *Cluster ID* field of the *Details* section. ++ +* Alternatively, it is possible to open a new support case through the {product-title} web console and have your cluster ID autofilled. +.. From the toolbar, navigate to *(?) Help* -> *Open Support Case*. +.. The *Cluster ID* value is autofilled. ++ +* To obtain your cluster ID using the OpenShift CLI (`oc`), run the following command: ++ +[source,terminal] +---- +$ oc get clusterversion -o jsonpath='{.items[].spec.clusterID}{"\n"}' +---- +endif::openshift-dedicated[] + +. Complete the following questions where prompted and then click *Continue*: ++ +* Where are you experiencing the behavior? What environment? +* When does the behavior occur? Frequency? Repeatedly? At certain times? +* What information can you provide around time-frames and the business impact? + +. Upload relevant diagnostic data files and click *Continue*. +ifndef::openshift-dedicated[] +It is recommended to include data gathered using the `oc adm must-gather` command as a starting point, plus any issue specific data that is not collected by that command. +endif::openshift-dedicated[] + +. Input relevant case management details and click *Continue*. + +. Preview the case details and click *Submit*. diff --git a/observability/admin-metrics/serverless-admin-metrics.adoc b/observability/admin-metrics/serverless-admin-metrics.adoc index c3f6630be517..532d8bc06775 100644 --- a/observability/admin-metrics/serverless-admin-metrics.adoc +++ b/observability/admin-metrics/serverless-admin-metrics.adoc @@ -8,36 +8,22 @@ toc::[] Metrics enable cluster administrators to monitor how {ServerlessProductName} cluster components and workloads are performing. -ifdef::openshift-enterprise[] -You can view different metrics for {ServerlessProductName} by navigating to link:https://docs.openshift.com/container-platform/latest/monitoring/reviewing-monitoring-dashboards.adoc#reviewing-monitoring-dashboards-admin_reviewing-monitoring-dashboards[*Dashboards*] in the {ocp-product-title} web console *Administrator* perspective. -endif::[] - -ifdef::openshift-dedicated,openshift-rosa[] -You can view different metrics for {ServerlessProductName} by navigating to *Dashboards* in the {ocp-product-title} web console *Administrator* perspective. -endif::[] +You can view different metrics for {ServerlessProductName} by navigating to link:https://docs.openshift.com/container-platform/latest/monitoring/reviewing-monitoring-dashboards.html#reviewing-monitoring-dashboards-admin_reviewing-monitoring-dashboards[*Dashboards*] in the web console *Administrator* perspective. [id="prerequisites_serverless-admin-metrics"] == Prerequisites -ifdef::openshift-enterprise[] -* See the {ocp-product-title} documentation on link:https://docs.openshift.com/container-platform/latest/monitoring/managing-metrics.adoc#managing-metrics[Managing metrics] for information about enabling metrics for your cluster. - -* You have access to an {ocp-product-title} account with cluster administrator access. -endif::[] +* See the {ocp-product-title} documentation on link:https://docs.openshift.com/container-platform/latest/monitoring/managing-metrics.html#managing-metrics[Managing metrics] for information about enabling metrics for your cluster. -ifdef::openshift-dedicated,openshift-rosa[] -* You have access to an {ocp-product-title} account with cluster or dedicated administrator access. -endif::[] +* You have access to an account with cluster administrator access (or dedicated administrator access for {dedicated-product-title} or {rosa-product-title}). -* You have access to the *Administrator* perspective in the {ocp-product-title} web console. +* You have access to the *Administrator* perspective in the web console. [WARNING] ==== If {SMProductShortName} is enabled with mTLS, metrics for Knative Serving are disabled by default because Service Mesh prevents Prometheus from scraping metrics. -ifndef::openshift-dedicated[] For information about resolving this issue, see xref:../../integrations/serverless-ossm-setup.adoc#serverless-ossm-enabling-serving-metrics_serverless-ossm-setup[Enabling Knative Serving metrics when using Service Mesh with mTLS]. -endif::[] Scraping the metrics does not affect autoscaling of a Knative service, because scraping requests do not go through the activator. Consequently, no scraping takes place if no pods are running. ==== diff --git a/observability/cluster-logging/cluster-logging-serverless.adoc b/observability/cluster-logging/cluster-logging-serverless.adoc index 3d9dc2799a81..af6ea3c97292 100644 --- a/observability/cluster-logging/cluster-logging-serverless.adoc +++ b/observability/cluster-logging/cluster-logging-serverless.adoc @@ -6,5 +6,7 @@ include::_attributes/common-attributes.adoc[] toc::[] +:openshift-enterprise: include::modules/cluster-logging-about.adoc[leveloffset=+1] include::modules/cluster-logging-deploying-about.adoc[leveloffset=+1] +:openshift-enterprise!: \ No newline at end of file diff --git a/observability/developer-metrics/serverless-developer-metrics.adoc b/observability/developer-metrics/serverless-developer-metrics.adoc index 7ab8fb3584c5..78d99e0dc177 100644 --- a/observability/developer-metrics/serverless-developer-metrics.adoc +++ b/observability/developer-metrics/serverless-developer-metrics.adoc @@ -8,31 +8,20 @@ toc::[] Metrics enable developers to monitor how Knative services are performing. You can use the {ocp-product-title} monitoring stack to record and view health checks and metrics for your Knative services. -ifdef::openshift-enterprise[] -You can view different metrics for {ServerlessProductName} by navigating to link:https://docs.openshift.com/container-platform/latest/monitoring/reviewing-monitoring-dashboards.adoc#reviewing-monitoring-dashboards-developer_reviewing-monitoring-dashboards[*Dashboards*] in the {ocp-product-title} web console *Developer* perspective. -endif::[] - -ifdef::openshift-dedicated,openshift-rosa[] -You can view different metrics for {ServerlessProductName} by navigating to *Dashboards* in the {ocp-product-title} web console *Developer* perspective. -endif::[] +You can view different metrics for {ServerlessProductName} by navigating to link:https://docs.openshift.com/container-platform/latest/monitoring/reviewing-monitoring-dashboards.html#reviewing-monitoring-dashboards-developer_reviewing-monitoring-dashboards[*Dashboards*] in the web console *Developer* perspective. [WARNING] ==== If {SMProductShortName} is enabled with mTLS, metrics for Knative Serving are disabled by default because Service Mesh prevents Prometheus from scraping metrics. -ifndef::openshift-dedicated[] For information about resolving this issue, see xref:../../integrations/serverless-ossm-setup.adoc#serverless-ossm-enabling-serving-metrics_serverless-ossm-setup[Enabling Knative Serving metrics when using Service Mesh with mTLS]. -endif::[] Scraping the metrics does not affect autoscaling of a Knative service, because scraping requests do not go through the activator. Consequently, no scraping takes place if no pods are running. ==== - -ifdef::openshift-enterprise[] [id="additional-resources_serverless-service-monitoring"] [role="_additional-resources"] -== Additional resources -* link:https://docs.openshift.com/container-platform/latest/monitoring/monitoring-overview.adoc#monitoring-overview[Monitoring overview] -* link:https://docs.openshift.com/container-platform/latest/monitoring/managing-metrics.adoc#specifying-how-a-service-is-monitored[Enabling monitoring for user-defined projects] -* link:https://docs.openshift.com/container-platform/latest/monitoring/enabling-monitoring-for-user-defined-projects.adoc#enabling-monitoring-for-user-defined-projects[Specifying how a service is monitored] -endif::[] +== Additional resources for {ocp-product-title} +* link:https://docs.openshift.com/container-platform/latest/monitoring/monitoring-overview.html#monitoring-overview[Monitoring overview] +* link:https://docs.openshift.com/container-platform/latest/monitoring/managing-metrics.html#specifying-how-a-service-is-monitored[Enabling monitoring for user-defined projects] +* link:https://docs.openshift.com/container-platform/latest/monitoring/enabling-monitoring-for-user-defined-projects.html#enabling-monitoring-for-user-defined-projects[Specifying how a service is monitored] diff --git a/observability/tracing/serverless-tracing-open-telemetry.adoc b/observability/tracing/serverless-tracing-open-telemetry.adoc index f3ad6da3d21f..fb3221410636 100644 --- a/observability/tracing/serverless-tracing-open-telemetry.adoc +++ b/observability/tracing/serverless-tracing-open-telemetry.adoc @@ -6,7 +6,4 @@ include::_attributes/common-attributes.adoc[] You can use {DTProductName} with {ServerlessProductName} to monitor and troubleshoot serverless applications. -ifdef::openshift-enterprise[] -// we can only use this module for OCP until OSD docs have distributed tracing install docs available, since this is part of the prereqs include::modules/serverless-open-telemetry.adoc[leveloffset=+1] -endif::[] diff --git a/observability/tracing/serverless-tracing.adoc b/observability/tracing/serverless-tracing.adoc index 94739deb8942..603426b54dc6 100644 --- a/observability/tracing/serverless-tracing.adoc +++ b/observability/tracing/serverless-tracing.adoc @@ -4,19 +4,14 @@ include::_attributes/common-attributes.adoc[] = Tracing requests :context: serverless-tracing - toc::[] Distributed tracing records the path of a request through the various services that make up an application. It is used to tie information about different units of work together, to understand a whole chain of events in a distributed transaction. The units of work might be executed in different processes or hosts. -ifdef::openshift-enterprise[] include::modules/distr-tracing-product-overview.adoc[leveloffset=+1] -endif::[] -ifdef::openshift-enterprise[] [id="additional-resources_serverless-tracing"] [role="_additional-resources"] -== Additional resources -* link:https://docs.openshift.com/container-platform/latest/distr_tracing/distr_tracing_arch/distr-tracing-architecture.adoc#distr-tracing-architecture[{DTProductName} architecture] -* link:https://docs.openshift.com/container-platform/latest/distr_tracing/distr_tracing_install/distr-tracing-installing.adoc#installing-distributed-tracing[Installing distributed tracing] -endif::[] +== Additional resources for {ocp-product-title} +* link:https://docs.openshift.com/container-platform/latest/distr_tracing/distr_tracing_arch/distr-tracing-architecture.html#distr-tracing-architecture[{DTProductName} architecture] +* link:https://docs.openshift.com/container-platform/latest/distr_tracing/distr_tracing_install/distr-tracing-installing.html#installing-distributed-tracing[Installing distributed tracing] diff --git a/removing/removing-serverless-operator.adoc b/removing/removing-serverless-operator.adoc index b7d5bda0e4b4..3a05737531f2 100644 --- a/removing/removing-serverless-operator.adoc +++ b/removing/removing-serverless-operator.adoc @@ -4,8 +4,10 @@ include::_attributes/common-attributes.adoc[] = Removing the {ServerlessOperatorName} :context: removing-serverless-operator -After you have removed Knative Serving and Knative Eventing, you can remove the {ServerlessOperatorName}. You can do this by using the {ocp-product-title} web console or the `oc` CLI. +After you have removed Knative Serving and Knative Eventing, you can remove the {ServerlessOperatorName}. You can do this by using the web console or the `oc` CLI. -include::modules/olm-deleting-operators-from-a-cluster-using-web-console.adoc[leveloffset=+1] -include::modules/olm-deleting-operators-from-a-cluster-using-cli.adoc[leveloffset=+1] -include::modules/olm-refresh-subs.adoc[leveloffset=+1] +* link:https://docs.openshift.com/container-platform/latest/operators/admin/olm-deleting-operators-from-cluster.html#olm-deleting-operators-from-a-cluster-using-web-console_olm-deleting-operators-from-a-cluster[Deleting Operators from a cluster using the web console] + +* link:https://docs.openshift.com/container-platform/latest/operators/admin/olm-deleting-operators-from-cluster.html#olm-deleting-operator-from-a-cluster-using-cli_olm-deleting-operators-from-a-cluster[Deleting Operators from a cluster using the CLI] + +* link:https://docs.openshift.com/container-platform/latest/operators/admin/olm-deleting-operators-from-cluster.html#olm-refresh-subs_olm-deleting-operators-from-a-cluster[Refreshing failing subscriptions]