diff --git a/_topic_maps/_topic_map.yml b/_topic_maps/_topic_map.yml index f4834f8ac6ef..3cead4a5727a 100644 --- a/_topic_maps/_topic_map.yml +++ b/_topic_maps/_topic_map.yml @@ -2599,27 +2599,27 @@ Topics: --- Name: Distributed tracing -Dir: jaeger +Dir: distr_tracing Distros: openshift-enterprise Topics: - Name: Distributed tracing release notes File: distributed-tracing-release-notes - Name: Distributed tracing architecture - Dir: jaeger_arch + Dir: distr_tracing_arch Topics: - Name: Distributed tracing architecture - File: rhbjaeger-architecture + File: distr-tracing-architecture - Name: Distributed tracing installation - Dir: jaeger_install + Dir: distr_tracing_install Topics: - Name: Installing distributed tracing - File: rhbjaeger-installation + File: distr-tracing-installing - Name: Configuring distributed tracing - File: rhbjaeger-deploying + File: distr-tracing-deploying - Name: Upgrading distributed tracing - File: rhbjaeger-updating + File: distr-tracing-updating - Name: Removing distributed tracing - File: rhbjaeger-removing + File: distr-tracing-removing --- Name: OpenShift Virtualization Dir: virt diff --git a/distr_tracing/distr_tracing_arch/distr-tracing-architecture.adoc b/distr_tracing/distr_tracing_arch/distr-tracing-architecture.adoc new file mode 100644 index 000000000000..a301e44453e8 --- /dev/null +++ b/distr_tracing/distr_tracing_arch/distr-tracing-architecture.adoc @@ -0,0 +1,46 @@ +[id="distr-tracing-architecture"] += Distributed tracing architecture +include::modules/distr-tracing-document-attributes.adoc[] +:context: distributed-tracing-architecture + +toc::[] + +Every time a user takes an action in an application, a request is executed by the architecture that may require dozens of different services to participate to produce a response. +{DTProductName} lets you perform distributed tracing, which records the path of a request through various microservices that make up an application. + +_Distributed tracing_ is a technique that is used to tie the information about different units of work together — usually executed in different processes or hosts — to understand a whole chain of events in a distributed transaction. +Developers can visualize call flows in large microservice architectures with distributed tracing. +It is valuable for understanding serialization, parallelism, and sources of latency. + +{DTProductName} records the execution of individual requests across the whole stack of microservices, and presents them as traces. A _trace_ is a data/execution path through the system. An end-to-end trace is comprised of one or more spans. + +A _span_ represents a logical unit of work in {DTProductName} that has an operation name, the start time of the operation, and the duration, as well as potentially tags and logs. Spans may be nested and ordered to model causal relationships. + +// The following include statements pull in the module files that comprise the assembly. + +include::modules/distr-tracing-product-overview.adoc[leveloffset=+1] + +include::modules/distr-tracing-features.adoc[leveloffset=+1] + +include::modules/distr-tracing-architecture.adoc[leveloffset=+1] + +//// +TODO +WRITE more detailed component docs + +include::modules/distr-tracing-client-java.adoc[leveloffset=+1] + +include::modules/distr-tracing-agent.adoc[leveloffset=+1] + +include::modules/distr-tracing--jaeger-collector.adoc[leveloffset=+1] + +include::modules/distr-tracing-otel-collector.adoc[leveloffset=+1] + +include::modules/distr-tracing-data-store.adoc[leveloffset=+1] + +include::modules/distr-tracing-query.adoc[leveloffset=+1] + +include::modules/distr-tracing-ingester.adoc[leveloffset=+1] + +include::modules/distr-tracing-console.adoc[leveloffset=+1] +//// diff --git a/distr_tracing/distr_tracing_arch/images b/distr_tracing/distr_tracing_arch/images new file mode 120000 index 000000000000..e4c5bd02a10a --- /dev/null +++ b/distr_tracing/distr_tracing_arch/images @@ -0,0 +1 @@ +../images/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_arch/modules b/distr_tracing/distr_tracing_arch/modules new file mode 120000 index 000000000000..43aab75b53c9 --- /dev/null +++ b/distr_tracing/distr_tracing_arch/modules @@ -0,0 +1 @@ +../modules/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_config/images b/distr_tracing/distr_tracing_config/images new file mode 120000 index 000000000000..e4c5bd02a10a --- /dev/null +++ b/distr_tracing/distr_tracing_config/images @@ -0,0 +1 @@ +../images/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_config/modules b/distr_tracing/distr_tracing_config/modules new file mode 120000 index 000000000000..43aab75b53c9 --- /dev/null +++ b/distr_tracing/distr_tracing_config/modules @@ -0,0 +1 @@ +../modules/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_config/serverless-jaeger-integration.adoc b/distr_tracing/distr_tracing_config/serverless-jaeger-integration.adoc new file mode 100644 index 000000000000..774689e16f43 --- /dev/null +++ b/distr_tracing/distr_tracing_config/serverless-jaeger-integration.adoc @@ -0,0 +1,11 @@ +include::modules/serverless-document-attributes.adoc[] +[id="serverless-jaeger-integration"] += Integrating distributed tracing with serverless applications using OpenShift Serverless +:context: serverless-jaeger-integration +include::modules/common-attributes.adoc[] + +toc::[] + +You can enable distributed tracing with xref:../../serverless/serverless-getting-started.adoc#serverless-getting-started[{ServerlessProductName}] for your serverless applications on {product-title}. + +include::modules/serverless-jaeger-config.adoc[leveloffset=+1] diff --git a/distr_tracing/distr_tracing_install/distr-tracing-deploying.adoc b/distr_tracing/distr_tracing_install/distr-tracing-deploying.adoc new file mode 100644 index 000000000000..eeb6372f33dd --- /dev/null +++ b/distr_tracing/distr_tracing_install/distr-tracing-deploying.adoc @@ -0,0 +1,86 @@ +[id="distr-tracing-deploying"] += Configuring and deploying distributed tracing +include::modules/distr-tracing-document-attributes.adoc[] +:context: deploying-distributed-tracing + +toc::[] + +The {JaegerName} Operator uses a custom resource definition (CRD) file that defines the architecture and configuration settings to be used when creating and deploying the {JaegerShortName} resources. You can either install the default configuration or modify the file to better suit your business requirements. + +{JaegerName} has predefined deployment strategies. You specify a deployment strategy in the custom resource file. When you create a {JaegerShortName} instance the Operator uses this configuration file to create the objects necessary for the deployment. + +.Jaeger custom resource file showing deployment strategy +[source,yaml] +---- +apiVersion: jaegertracing.io/v1 +kind: Jaeger +metadata: + name: MyConfigFile +spec: + strategy: production <1> +---- + +<1> The {JaegerName} Operator currently supports the following deployment strategies: + +* *allInOne* (Default) - This strategy is intended for development, testing, and demo purposes; it is not intended for production use. The main backend components, Agent, Collector, and Query service, are all packaged into a single executable which is configured, by default. to use in-memory storage. ++ +[NOTE] +==== +In-memory storage is not persistent, which means that if the {JaegerShortName} instance shuts down, restarts, or is replaced, that your trace data will be lost. And in-memory storage cannot be scaled, since each pod has its own memory. For persistent storage, you must use the `production` or `streaming` strategies, which use Elasticsearch as the default storage. +==== + +* *production* - The production strategy is intended for production environments, where long term storage of trace data is important, as well as a more scalable and highly available architecture is required. Each of the backend components is therefore deployed separately. The Agent can be injected as a sidecar on the instrumented application. The Query and Collector services are configured with a supported storage type - currently Elasticsearch. Multiple instances of each of these components can be provisioned as required for performance and resilience purposes. + +* *streaming* - The streaming strategy is designed to augment the production strategy by providing a streaming capability that effectively sits between the Collector and the Elasticsearch backend storage. This provides the benefit of reducing the pressure on the backend storage, under high load situations, and enables other trace post-processing capabilities to tap into the real time span data directly from the streaming platform (https://access.redhat.com/documentation/en-us/red_hat_amq/7.6/html/using_amq_streams_on_openshift/index[AMQ Streams]/ https://kafka.apache.org/documentation/[Kafka]). ++ +[NOTE] +==== +The streaming strategy requires an additional Red Hat subscription for AMQ Streams. +==== + +[NOTE] +==== +The streaming deployment strategy is currently unsupported on IBM Z. +==== + +[NOTE] +==== +There are two ways to install and use {DTProductName}, as part of a service mesh or as a stand alone component. If you have installed {DTShortName} as part of Red Hat OpenShift Service Mesh, you can perform basic configuration as part of the xref:../../service_mesh/v2x/installing-ossm.adoc#installing-ossm[ServiceMeshControlPlane] but for completely control you should configure a Jaeger CR and then xref:../../service_mesh/v2x/ossm-observability.html#ossm-config-external-jaeger_observability[reference your distributed tracing configuration file in the ServiceMeshControlPlane]. + +==== + +// The following include statements pull in the module files that comprise the assembly. + +include::modules/distr-tracing-deploy-default.adoc[leveloffset=+1] + +include::modules/distr-tracing-deploy-production-es.adoc[leveloffset=+1] + +include::modules/distr-tracing-deploy-streaming.adoc[leveloffset=+1] + +[id="customizing-your-deployment"] +== Customizing your deployment + +include::modules/distr-tracing-deployment-best-practices.adoc[leveloffset=+2] + +include::modules/distr-tracing-config-default.adoc[leveloffset=+2] + +include::modules/distr-tracing-config-jaeger-collector.adoc[leveloffset=+2] + +//include::modules/distr-tracing-config-otel-collector.adoc[leveloffset=+2] + +include::modules/distr-tracing-config-sampling.adoc[leveloffset=+2] + +include::modules/distr-tracing-config-storage.adoc[leveloffset=+2] + +include::modules/distr-tracing-config-query.adoc[leveloffset=+2] + +include::modules/distr-tracing-config-ingester.adoc[leveloffset=+2] + +[id="injecting-sidecars"] +== Injecting sidecars + +{JaegerName} relies on a proxy sidecar within the application's pod to provide the agent. The {JaegerName} Operator can inject Agent sidecars into Deployment workloads. You can enable automatic sidecar injection or manage it manually. + +include::modules/distr-tracing-sidecar-automatic.adoc[leveloffset=+2] + +include::modules/distr-tracing-sidecar-manual.adoc[leveloffset=+2] diff --git a/distr_tracing/distr_tracing_install/distr-tracing-installing.adoc b/distr_tracing/distr_tracing_install/distr-tracing-installing.adoc new file mode 100644 index 000000000000..d1ab1f4a5071 --- /dev/null +++ b/distr_tracing/distr_tracing_install/distr-tracing-installing.adoc @@ -0,0 +1,42 @@ +[id="installing-distributed-tracing"] += Installing distributed tracing +include::modules/distr-tracing-document-attributes.adoc[] +:context: install-distributed-tracing + +toc::[] + +You can install {DTProductName} on {product-title} in either of two ways: + +* You can install {DTProductName} as part of Red Hat OpenShift Service Mesh. Distributed tracing is included by default in the Service Mesh installation. To install {DTProductName} as part of a service mesh, follow the xref:../../service_mesh/v2x/preparing-ossm-installation.adoc#preparing-ossm-installation[Red Hat Service Mesh Installation] instructions. You must install {DTProductName} in the same namespace as your service mesh, that is, the `ServiceMeshControlPlane` and the {DTProductName} resources must be in the same namespace. + +* If you do not want to install a service mesh, you can use the {DTProductName} Operators to install {DTShortName} by itself. To install {DTProductName} without a service mesh, use the following instructions. + +== Prerequisites + +Before you can install {DTProductName}, review the installation activities, and ensure that you meet the prerequisites: + +* Possess an active {product-title} subscription on your Red Hat account. If you do not have a subscription, contact your sales representative for more information. + +* Review the xref:../../architecture/architecture-installation.adoc#installation-overview_architecture-installation[{product-title} {product-version} overview]. +* Install {product-title} {product-version}. + +** xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[Install {product-title} {product-version} on AWS] +** xref:../../installing/installing_aws/installing-aws-user-infra.adoc#installing-aws-user-infra[Install {product-title} {product-version} on user-provisioned AWS] +** xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installing-bare-metal[Install {product-title} {product-version} on bare metal] +** xref:../../installing/installing_vsphere/installing-vsphere.adoc#installing-vsphere[Install {product-title} {product-version} on vSphere] +* Install the version of the OpenShift CLI (`oc`) that matches your {product-title} version and add it to your path. + +* An account with the `cluster-admin` role. + +// The following include statements pull in the module files that comprise the assembly. + +include::modules/distr-tracing-install-overview.adoc[leveloffset=+1] + +include::modules/distr-tracing-install-elasticsearch.adoc[leveloffset=+1] + +include::modules/distr-tracing-install-jaeger-operator.adoc[leveloffset=+1] + +//// +== Next steps +* xref:../../distr_tracing/distr_tracing_install/distr-tracing-deploying.adoc#deploying-distributed-tracing[Deploy {DTProductName}]. +//// diff --git a/distr_tracing/distr_tracing_install/distr-tracing-removing.adoc b/distr_tracing/distr_tracing_install/distr-tracing-removing.adoc new file mode 100644 index 000000000000..f68f301e3023 --- /dev/null +++ b/distr_tracing/distr_tracing_install/distr-tracing-removing.adoc @@ -0,0 +1,30 @@ +[id="removing-distributed-tracing"] += Removing distributed tracing +include::modules/distr-tracing-document-attributes.adoc[] +:context: removing-distributed-tracing + +toc::[] + +The steps for removing {DTProductName} from an {product-title} cluster are as follows: + +. Shut down any {DTProductName} pods. +. Remove any {DTProductName} instances. +. Remove the {JaegerName} Operator. +. Remove the {OTELName} Operator. + +include::modules/distr-tracing-removing-instance.adoc[leveloffset=+1] + +include::modules/distr-tracing-removing-instance-cli.adoc[leveloffset=+1] + + +== Removing the {DTProductName} Operators + +.Procedure + +. Follow the instructions for xref:../../operators/admin/olm-deleting-operators-from-cluster.adoc#olm-deleting-operators-from-a-cluster[Deleting Operators from a cluster]. + +* Remove the {JaegerName} Operator. + +//* Remove the {OTELName} Operator. + +* After the {JaegerName} Operator has been removed, if appropriate, remove the OpenShift Elasticsearch Operator. diff --git a/distr_tracing/distr_tracing_install/distr-tracing-updating.adoc b/distr_tracing/distr_tracing_install/distr-tracing-updating.adoc new file mode 100644 index 000000000000..b91410c396ca --- /dev/null +++ b/distr_tracing/distr_tracing_install/distr-tracing-updating.adoc @@ -0,0 +1,14 @@ +[id="upgrading-distributed-tracing"] += Upgrading distributed tracing +include::modules/distr-tracing-document-attributes.adoc[] +:context: upgrading-distributed-tracing + +toc::[] + +Operator Lifecycle Manager (OLM) controls the installation, upgrade, and role-based access control (RBAC) of Operators in a cluster. The OLM runs by default in {product-title}. +OLM queries for available Operators as well as upgrades for installed Operators. +For more information about how {product-title} handles upgrades, refer to the xref:../../operators/understanding/olm/olm-understanding-olm.adoc#olm-understanding-olm[Operator Lifecycle Manager] documentation. + +During an update, the {DTProductName} Operators upgrade the managed {DTShortName} instances to the version associated with the Operator. Whenever a new version of the {JaegerName} Operator is installed, all the {JaegerShortName} application instances managed by the Operator are upgraded to the Operator's version. For example, after upgrading the Operator from 1.10 installed to 1.11, the Operator scans for running {JaegerShortName} instances and upgrades them to 1.11 as well. + +For specific instructions on how to update the OpenShift Elasticsearch Operator, refer to xref:../../logging/cluster-logging-upgrading.adoc#cluster-logging-upgrading_cluster-logging-upgrading[Updating OpenShift Logging]. diff --git a/distr_tracing/distr_tracing_install/images b/distr_tracing/distr_tracing_install/images new file mode 120000 index 000000000000..e4c5bd02a10a --- /dev/null +++ b/distr_tracing/distr_tracing_install/images @@ -0,0 +1 @@ +../images/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_install/modules b/distr_tracing/distr_tracing_install/modules new file mode 120000 index 000000000000..43aab75b53c9 --- /dev/null +++ b/distr_tracing/distr_tracing_install/modules @@ -0,0 +1 @@ +../modules/ \ No newline at end of file diff --git a/jaeger/distributed-tracing-release-notes.adoc b/distr_tracing/distributed-tracing-release-notes.adoc similarity index 100% rename from jaeger/distributed-tracing-release-notes.adoc rename to distr_tracing/distributed-tracing-release-notes.adoc diff --git a/distr_tracing/images b/distr_tracing/images new file mode 120000 index 000000000000..5e67573196d8 --- /dev/null +++ b/distr_tracing/images @@ -0,0 +1 @@ +../images \ No newline at end of file diff --git a/distr_tracing/modules b/distr_tracing/modules new file mode 120000 index 000000000000..43aab75b53c9 --- /dev/null +++ b/distr_tracing/modules @@ -0,0 +1 @@ +../modules/ \ No newline at end of file diff --git a/modules/distr-tracing-architecture.adoc b/modules/distr-tracing-architecture.adoc new file mode 100644 index 000000000000..995fda21d8c7 --- /dev/null +++ b/modules/distr-tracing-architecture.adoc @@ -0,0 +1,24 @@ +//// +This module included in the following assemblies: +-service_mesh/v2x/ossm-architecture.adoc +-dist_tracing_arch/distr-tracing-architecture.adoc +//// + +[id="distributed-tracing-architecture_{context}"] += {DTProductName} architecture + +{DTProductName} is made up of several components that work together to collect, store, and display tracing data. + +* *Client* (Jaeger client, Tracer, Reporter, instrumented application, client libraries)- The {JaegerShortName} clients are language-specific implementations of the OpenTracing API. They can be used to instrument applications for distributed tracing either manually or with a variety of existing open source frameworks, such as Camel (Fuse), Spring Boot (RHOAR), MicroProfile (RHOAR/Thorntail), Wildfly (EAP), and many more, that are already integrated with OpenTracing. + +* *Agent* (Jaeger agent, Server Queue, Processor Workers) - The {JaegerShortName} agent is a network daemon that listens for spans sent over User Datagram Protocol (UDP), which it batches and sends to the Collector. The agent is meant to be placed on the same host as the instrumented application. This is typically accomplished by having a sidecar in container environments such as Kubernetes. + +* *Collector* (Jaeger Collector, Queue, Workers) - Similar to the agent, the Collector receives spans and places them in an internal queue for processing. This allows the Collector to return immediately to the client/agent instead of waiting for the span to make its way to the storage. + +* *Storage* (Data Store) - Collectors require a persistent storage backend. {JaegerName} has a pluggable mechanism for span storage. Note that for this release, the only supported storage is Elasticsearch. + +* *Query* (Query Service) - Query is a service that retrieves traces from storage. + +* *Ingester* (Ingester Service) - {DTProductName} can use Apache Kafka as a buffer between the Collector and the actual Elasticsearch backing storage. Ingester is a service that reads data from Kafka and writes to the Elasticsearch storage backend. + +* *Jaeger Console* – With the {JaegerName} user interface, you can visualize your distributed tracing data. On the Search page, you can find traces and explore details of the spans that make up an individual trace. diff --git a/modules/distr-tracing-config-default.adoc b/modules/distr-tracing-config-default.adoc new file mode 100644 index 000000000000..7baa2a4249ba --- /dev/null +++ b/modules/distr-tracing-config-default.adoc @@ -0,0 +1,125 @@ +//// +This module included in the following assemblies: +- distr_tracing_install/distr-tracing-deploying.adoc +//// + +[id="distr-tracing-config-default_{context}"] += Distributed tracing default configuration options + +The Jaeger custom resource (CR) defines the architecture and settings to be used when creating the {JaegerShortName} resources. You can modify these parameters to customize your {JaegerShortName} implementation to your business needs. + +.Jaeger generic YAML example +[source,yaml] +---- +apiVersion: jaegertracing.io/v1 +kind: Jaeger +metadata: + name: name +spec: + strategy: + allInOne: + options: {} + resources: {} + agent: + options: {} + resources: {} + collector: + options: {} + resources: {} + sampling: + options: {} + storage: + type: + options: {} + query: + options: {} + resources: {} + ingester: + options: {} + resources: {} + options: {} +---- + +.Jaeger parameters +[options="header"] +|=== +|Parameter |Description |Values |Default value + +|`apiVersion:` +||API version to use when creating the object. +|`jaegertracing.io/v1` +|`jaegertracing.io/v1` + +|`kind:` +|Defines the kind of Kubernetes object to create. +|`jaeger` +| + +|`metadata:` +|Data that helps uniquely identify the object, including a `name` string, `UID`, and optional `namespace`. +| +|{product-title} automatically generates the `UID` and completes the `namespace` with the name of the project where the object is created. + +|`name:` +|Name for the object. +|The name of your {JaegerShortName} instance. +|`jaeger-all-in-one-inmemory` + +|`spec:` +|Specification for the object to be created. +|Contains all of the configuration parameters for your {JaegerShortName} instance. When a common definition for all Jaeger components is required, it is defined under the `spec` node. When the definition relates to an individual component, it is placed under the `spec/` node. +|N/A + +|`strategy:` +|Jaeger deployment strategy +|`allInOne`, `production`, or `streaming` +|`allInOne` + +|`allInOne:` +|Because the `allInOne` image deploys the Agent, Collector, Query, Ingester, and Jaeger UI in a single pod, configuration for this deployment must nest component configuration under the `allInOne` parameter. +| +| + +|`agent:` +|Configuration options that define the Agent. +| +| + +|`collector:` +|Configuration options that define the Jaeger Collector. +| +| + +|`sampling:` +|Configuration options that define the sampling strategies for tracing. +| +| + +|`storage:` +|Configuration options that define the storage. All storage-related options must be placed under `storage`, rather than under the `allInOne` or other component options. +| +| + +|`query:` +|Configuration options that define the Query service. +| +| + +|`ingester:` +|Configuration options that define the Ingester service. +| +| + +|=== + + +The following example YAML is the minimum required to create a {JaegerName} deployment using the default settings. + +.Example minimum required dist-tracing-all-in-one.yaml +[source,yaml] +---- +apiVersion: jaegertracing.io/v1 +kind: Jaeger +metadata: + name: jaeger-all-in-one-inmemory +---- diff --git a/modules/distr-tracing-config-ingester.adoc b/modules/distr-tracing-config-ingester.adoc new file mode 100644 index 000000000000..c1b0101f0eb1 --- /dev/null +++ b/modules/distr-tracing-config-ingester.adoc @@ -0,0 +1,76 @@ +//// +This module included in the following assemblies: +- distr_tracing_install/distr-tracing-deploying.adoc +//// + +[id="distr-tracing-config-ingester_{context}"] += Ingester configuration options + +Ingester is a service that reads from a Kafka topic and writes to the Elasticsearch storage backend. If you are using the `allInOne` or `production` deployment strategies, you do not need to configure the Ingester service. + +.Jaeger parameters passed to the Ingester +[options="header"] +[cols="l, a, a"] +|=== +|Parameter |Description |Values +|spec: + ingester: + options: {} +|Configuration options that define the Ingester service. +| + +|options: + deadlockInterval: +|Specifies the interval, in seconds or minutes, that the Ingester must wait for a message before terminating. +The deadlock interval is disabled by default (set to `0`), to avoid terminating the Ingester when no messages arrive during system initialization. +|Minutes and seconds, for example, `1m0s`. Default value is `0`. + +|options: + kafka: + consumer: + topic: +|The `topic` parameter identifies the Kafka configuration used by the collector to produce the messages, and the Ingester to consume the messages. +|Label for the consumer. For example, `jaeger-spans`. + +|options: + kafka: + consumer: + brokers: +|Identifies the Kafka configuration used by the Ingester to consume the messages. +|Label for the broker, for example, `my-cluster-kafka-brokers.kafka:9092`. + +|options: + log-level: +|Logging level for the Ingester. +|Possible values: `trace`, `debug`, `info`, `warning`, `error`, `fatal`, `panic`. +|=== + +.Streaming Collector and Ingester example +[source,yaml] +---- +apiVersion: jaegertracing.io/v1 +kind: Jaeger +metadata: + name: simple-streaming +spec: + strategy: streaming + collector: + options: + kafka: + producer: + topic: jaeger-spans + brokers: my-cluster-kafka-brokers.kafka:9092 + ingester: + options: + kafka: + consumer: + topic: jaeger-spans + brokers: my-cluster-kafka-brokers.kafka:9092 + ingester: + deadlockInterval: 5 + storage: + type: elasticsearch + options: + es: + server-urls: http://elasticsearch:9200 +---- diff --git a/modules/distr-tracing-config-jaeger-collector.adoc b/modules/distr-tracing-config-jaeger-collector.adoc new file mode 100644 index 000000000000..a16d1f454956 --- /dev/null +++ b/modules/distr-tracing-config-jaeger-collector.adoc @@ -0,0 +1,66 @@ +//// +This module included in the following assemblies: +- distr_tracing_install/distr-tracing-deploying.adoc +//// + +[id="distr-tracing-config-jaeger-collector_{context}"] += Jaeger Collector configuration options + +The Jaeger Collector is the component responsible for receiving the spans that were captured by the tracer and writing them to persistent Elasticsearch storage when using the `production` strategy, or to AMQ Streams when using the `streaming` strategy. + +The Collectors are stateless and thus many instances of Jaeger Collector can be run in parallel. Collectors require almost no configuration, except for the location of the Elasticsearch cluster. + +.Parameters used by the Operator to define the Jaeger Collector +[options="header"] +[cols="l, a, a"] +|=== +|Parameter |Description |Values +|collector: + replicas: +|Specifies the number of Collector replicas to create. +|Integer, for example, `5` +|=== + + +.Configuration parameters passed to the Collector +[options="header"] +[cols="l, a, a"] +|=== +|Parameter |Description |Values +|spec: + collector: + options: {} +|Configuration options that define the Jaeger Collector. +| + +|options: + collector: + num-workers: +|The number of workers pulling from the queue. +|Integer, for example, `50` + +|options: + collector: + queue-size: +|The size of the Collector queue. +|Integer, for example, `2000` + +|options: + kafka: + producer: + topic: jaeger-spans +|The `topic` parameter identifies the Kafka configuration used by the Collector to produce the messages, and the Ingester to consume the messages. +|Label for the producer. + +|options: + kafka: + producer: + brokers: my-cluster-kafka-brokers.kafka:9092 +|Identifies the Kafka configuration used by the Collector to produce the messages. If brokers are not specified, and you have AMQ Streams 1.4.0+ installed, the {JaegerName} Operator will self-provision Kafka. +| + +|options: + log-level: +|Logging level for the Collector. +|`trace`, `debug`, `info`, `warning`, `error`, `fatal`, `panic` +|=== diff --git a/modules/distr-tracing-config-otel-collector.adoc b/modules/distr-tracing-config-otel-collector.adoc new file mode 100644 index 000000000000..6aa2a3de8fd1 --- /dev/null +++ b/modules/distr-tracing-config-otel-collector.adoc @@ -0,0 +1,39 @@ +//// +This module included in the following assemblies: +-distr_tracing_install/distributed-tracing-deploying.adoc + +STUB TOPIC for documenting OTEL collector options (Duplicated from Jaeger Collector) +//// + +[id="distributed-tracing-config-otel-collector_{context}"] += OpenTelemetry Collector configuration options + +#TECH PREVIEW BOILERPLATE HERE# +#What is the actual Operator name?# + +The OpenTelemetry Collector is the component responsible for receiving the spans that were captured by the tracer and writing them to an Elasticsearch persistent storage when using the `production` strategy, or to AMQ Streams when using the `streaming` strategy. + +The Collectors are stateless and thus many instances of OpenTelemetry Collector can be run in parallel. Collectors require almost no configuration, except for the location of the Elasticsearch cluster. + +.Parameters used by the Operator to define the OpenTelemetry Collector +[options="header"] +[cols="l, a, a"] +|=== +|Parameter |Description |Values +| +| +| +|=== + + +.Parameters passed to the Collector +[options="header"] +[cols="l, a, a"] +|=== +|Parameter |Description |Values +|spec: + collector: + options: {} +| +| +|=== diff --git a/modules/distr-tracing-config-query.adoc b/modules/distr-tracing-config-query.adoc new file mode 100644 index 000000000000..f1fd2b9373c0 --- /dev/null +++ b/modules/distr-tracing-config-query.adoc @@ -0,0 +1,68 @@ +//// +This module included in the following assemblies: +- distr_tracing_install/distr-tracing-deploying.adoc +//// + +[id="distr-tracing-config-query_{context}"] += Query configuration options + +Query is a service that retrieves traces from storage and hosts the user interface to display them. + +.Parameters used by the {JaegerName} Operator to define Query +[options="header"] +[cols="l, a, a, a"] +|=== +|Parameter |Description |Values |Default value + +|spec: + query: + replicas: +|Specifies the number of Query replicas to create. +|Integer, for example, `2` +| + +|=== + + +.Configuration parameters passed to Query +[options="header"] +[cols="l, a, a, a"] +|=== +|Parameter |Description |Values |Default value + +|spec: + query: + options: {} +|Configuration options that define the Query service. +| +| + +|options: + log-level: +|Logging level for Query. +|Possible values: `trace`, `debug`, `info`, `warning`, `error`, `fatal`, `panic`. +| + +|options: + query: + base-path: +|The base path for all jaeger-query HTTP routes can be set to a non-root value, for example, `/jaeger` would cause all UI URLs to start with `/jaeger`. This can be useful when running jaeger-query behind a reverse proxy. +|/{path} +| +|=== + +.Sample Query configuration +[source,yaml] +---- +apiVersion: jaegertracing.io/v1 +kind: "Jaeger" +metadata: + name: "my-jaeger" +spec: + strategy: allInOne + allInOne: + options: + log-level: debug + query: + base-path: /jaeger +---- diff --git a/modules/distr-tracing-config-sampling.adoc b/modules/distr-tracing-config-sampling.adoc new file mode 100644 index 000000000000..dbadc570170c --- /dev/null +++ b/modules/distr-tracing-config-sampling.adoc @@ -0,0 +1,99 @@ +//// +This module included in the following assemblies: +- distr_tracing_install/distr-tracing-deploying.adoc +//// + +[id="distr-tracing-config-sampling_{context}"] += Distributed tracing sampling configuration options + +The {JaegerName} Operator can be used to define sampling strategies that will be supplied to tracers that have been configured to use a remote sampler. + +While all traces are generated, only a few are sampled. Sampling a trace marks the trace for further processing and storage. + +[NOTE] +==== +This is not relevant if a trace was started by the Envoy proxy, as the sampling decision is made there. The Jaeger sampling decision is only relevant when the trace is started by an application using the client. +==== + +When a service receives a request that contains no trace context, the client starts a new trace, assigns it a random trace ID, and makes a sampling decision based on the currently installed sampling strategy. The sampling decision propagates to all subsequent requests in the trace so that other services are not making the sampling decision again. + +{JaegerShortName} libraries support the following samplers: + +* *Probabilistic* - The sampler makes a random sampling decision with the probability of sampling equal to the value of the `sampling.param` property. For example, using `sampling.param=0.1` samples approximately 1 in 10 traces. + +* *Rate Limiting* - The sampler uses a leaky bucket rate limiter to ensure that traces are sampled with a certain constant rate. For example, using `sampling.param=2.0` samples requests with the rate of 2 traces per second. + +.Jaeger sampling options +[options="header"] +[cols="l, a, a, a"] +|=== +|Parameter |Description |Values |Default value +|spec: + sampling: + options: {} + default_strategy: + service_strategy: +|Configuration options that define the sampling strategies for tracing. +| +|If you do not provide configuration, the Collectors will return the default probabilistic sampling policy with 0.001 (0.1%) probability for all services. + +|default_strategy: + type: +service_strategy: + type: +|Sampling strategy to use. See descriptions above. +|Valid values are `probabilistic`, and `ratelimiting`. +|`probabilistic` + +|default_strategy: + param: +service_strategy: + param: +|Parameters for the selected sampling strategy. +|Decimal and integer values (0, .1, 1, 10) +|1 +|=== + +This example defines a default sampling strategy that is probabilistic, with a 50% chance of the trace instances being sampled. + +.Probabilistic sampling example +[source,yaml] +---- +apiVersion: jaegertracing.io/v1 +kind: Jaeger +metadata: + name: with-sampling +spec: + sampling: + options: + default_strategy: + type: probabilistic + param: 0.5 + service_strategies: + - service: alpha + type: probabilistic + param: 0.8 + operation_strategies: + - operation: op1 + type: probabilistic + param: 0.2 + - operation: op2 + type: probabilistic + param: 0.4 + - service: beta + type: ratelimiting + param: 5 +---- + +If there are no user-supplied configurations, the {JaegerShortName} uses the following settings: + +.Default sampling +[source,yaml] +---- +spec: + sampling: + options: + default_strategy: + type: probabilistic + param: 1 +---- diff --git a/modules/distr-tracing-config-storage.adoc b/modules/distr-tracing-config-storage.adoc new file mode 100644 index 000000000000..5a02a442d8d4 --- /dev/null +++ b/modules/distr-tracing-config-storage.adoc @@ -0,0 +1,617 @@ +//// +This module included in the following assemblies: +- distr_tracing_install/distr-tracing-deploying.adoc +//// + +[id="distr-tracing-config-storage_{context}"] += Distributed tracing storage configuration options + +You configure storage for the Collector, Ingester, and Query services under `spec.storage`. Multiple instances of each of these components can be provisioned as required for performance and resilience purposes. + +.General storage parameters used by the {JaegerName} Operator to define distributed tracing storage + +[options="header"] +[cols="l, a, a, a"] +|=== +|Parameter |Description |Values |Default value +|spec: + storage: + type: +|Type of storage to use for the deployment. +|`memory` or `elasticsearch`. +Memory storage is only appropriate for development, testing, demonstrations, and proof of concept environments as the data does not persist if the pod is shut down. For production environments {JaegerShortName} supports Elasticsearch for persistent storage. +|`memory` + +|storage: + secretname: +|Name of the secret, for example `tracing-secret`. +| +|N/A + +|storage: + options: {} +|Configuration options that define the storage. +| +| +|=== + +.Elasticsearch index cleaner parameters +[options="header"] +[cols="l, a, a, a"] +|=== +|Parameter |Description |Values |Default value +|storage: + esIndexCleaner: + enabled: +|When using Elasticsearch storage, by default a job is created to clean old traces from the index. This parameter enables or disables the index cleaner job. +|`true`/ `false` +|`true` + +|storage: + esIndexCleaner: + numberOfDays: +|Number of days to wait before deleting an index. +|Integer value +|`7` + +|storage: + esIndexCleaner: + schedule: +|Defines the schedule for how often to clean the Elasticsearch index. +|Cron expression +|"55 23 * * *" +|=== + +[id="distributed-tracing-config-auto-provisioning-es_{context}"] +== Auto-provisioning an Elasticsearch instance + +When the `storage:type` is set to `elasticsearch` but there is no value set for `spec:storage:options:es:server-urls`, the {JaegerName} Operator uses the OpenShift Elasticsearch Operator to create an Elasticsearch cluster based on the configuration provided in the `storage` section of the custom resource file. + +.Restrictions + +* You can have only one {JaegerShortName} with self-provisioned Elasticsearch instance per namespace. The Elasticsearch cluster is meant to be dedicated for a single {JaegerShortName} instance. +* There can be only one Elasticsearch per namespace. + +[NOTE] +==== +If you already have installed Elasticsearch as part of OpenShift Logging, the {JaegerName} Operator can use the installed OpenShift Elasticsearch Operator to provision storage. +==== + +The following configuration parameters are for a _self-provisioned_ Elasticsearch instance, that is an instance created by the {JaegerName} Operator using the OpenShift Elasticsearch Operator. You specify configuration options for self-provisioned Elasticsearch under `spec:storage:elasticsearch` in your configuration file. + +.Elasticsearch resource configuration parameters +[options="header"] +[cols="l, a, a, a"] +|=== +|Parameter |Description |Values |Default value +|elasticsearch: + nodeCount: +|Number of Elasticsearch nodes. For high availability use at least 3 nodes. Do not use 2 nodes as “split brain” problem can happen. +|Integer value. For example, Proof of concept = 1, +Minimum deployment =3 +|3 + +|elasticsearch: + resources: + requests: + cpu: +|Number of central processing units for requests, based on your environment's configuration. +|Specified in cores or millicores, for example, 200m, 0.5, 1. For example, Proof of concept = 500m, +Minimum deployment =1 +|1 + +|elasticsearch: + resources: + requests: + memory: +|Available memory for requests, based on your environment's configuration. +|Specified in bytes, for example, 200Ki, 50Mi, 5Gi. For example, Proof of concept = 1Gi, +Minimum deployment = 16Gi* +|16Gi + +|elasticsearch: + resources: + limits: + cpu: +|Limit on number of central processing units, based on your environment's configuration. +|Specified in cores or millicores, for example, 200m, 0.5, 1. For example, Proof of concept = 500m, +Minimum deployment =1 +| + +|elasticsearch: + resources: + limits: + memory: +|Available memory limit based on your environment's configuration. +|Specified in bytes, for example, 200Ki, 50Mi, 5Gi. For example, Proof of concept = 1Gi, +Minimum deployment = 16Gi* +| + +|elasticsearch: + redundancyPolicy: +|Data replication policy defines how Elasticsearch shards are replicated across data nodes in the cluster. If not specified, the {JaegerName} Operator automatically determines the most appropriate replication based on number of nodes. +|`ZeroRedundancy`(no replica shards), `SingleRedundancy`(one replica shard), `MultipleRedundancy`(each index is spread over half of the Data nodes), `FullRedundancy` (each index is fully replicated on every Data node in the cluster). +| + +| +3+|*Each Elasticsearch node can operate with a lower memory setting though this is NOT recommended for production deployments. For production use, you should have no less than 16Gi allocated to each pod by default, but preferably allocate as much as you can, up to 64Gi per pod. +|=== + +.Production storage example +[source,yaml] +---- +apiVersion: jaegertracing.io/v1 +kind: Jaeger +metadata: + name: simple-prod +spec: + strategy: production + storage: + type: elasticsearch + elasticsearch: + nodeCount: 3 + resources: + requests: + cpu: 1 + memory: 16Gi + limits: + memory: 16Gi +---- + +.Storage example with persistent storage: +[source,yaml] +---- +apiVersion: jaegertracing.io/v1 +kind: Jaeger +metadata: + name: simple-prod +spec: + strategy: production + storage: + type: elasticsearch + elasticsearch: + nodeCount: 1 + storage: # <1> + storageClassName: gp2 + size: 5Gi + resources: + requests: + cpu: 200m + memory: 4Gi + limits: + memory: 4Gi + redundancyPolicy: ZeroRedundancy +---- + +<1> Persistent storage configuration. In this case AWS `gp2` with `5Gi` size. When no value is specified, {JaegerShortName} uses `emptyDir`. The OpenShift Elasticsearch Operator provisions `PersistentVolumeClaim` and `PersistentVolume` which are not removed with {JaegerShortName} instance. You can mount the same volumes if you create a {JaegerShortName} instance with the same name and namespace. + + +[id="distributed-tracing-config-external-es_{context}"] +== Connecting to an existing Elasticsearch instance + +You can use an existing Elasticsearch cluster for storage with {DTShortName}, that is, an instance that was not auto-provisioned by the {JaegerName} Operator. You do this by specifying the URL of the existing cluster as the `spec:storage:options:es:server-urls` value in your configuration. + +.Restrictions + +* You cannot share or reuse a {product-title} logging Elasticsearch instance with {JaegerShortName}. The Elasticsearch cluster is meant to be dedicated for a single {JaegerShortName} instance. + +[NOTE] +==== +Red Hat does not provide support for your external Elasticsearch instance. You can review the tested integrations matrix on the link:https://access.redhat.com/articles/5381021[Customer Portal]. +==== + +The following configuration parameters are for an already existing Elasticsearch instance, also known as an _external_ Elasticsearch instance. In this case, you specify configuration options for Elasticsearch under `spec:storage:options:es` in your custom resource file. + +.General ES configuration parameters +[options="header"] +[cols="l, a, a, a"] +|=== +|Parameter |Description |Values |Default value +|es: + server-urls: +|URL of the Elasticsearch instance. +|The fully-qualified domain name of the Elasticsearch server. +|`http://elasticsearch..svc:9200` + +|es: + max-doc-count: +|The maximum document count to return from an Elasticsearch query. This will also apply to aggregations. If you set both `es.max-doc-count` and `es.max-num-spans`, Elasticsearch will use the smaller value of the two. +| +|10000 + +|es: + max-num-spans: +|[*Deprecated* - Will be removed in a future release, use `es.max-doc-count` instead.] The maximum number of spans to fetch at a time, per query, in Elasticsearch. If you set both `es.max-num-spans` and `es.max-doc-count`, Elasticsearch will use the smaller value of the two. +| +|10000 + +|es: + max-span-age: +|The maximum lookback for spans in Elasticsearch. +| +|72h0m0s + +|es: + sniffer: +|The sniffer configuration for Elasticsearch. The client uses the sniffing process to find all nodes automatically. Disabled by default. +|`true`/ `false` +|`false` + +|es: + sniffer-tls-enabled: +|Option to enable TLS when sniffing an Elasticsearch Cluster. The client uses the sniffing process to find all nodes automatically. Disabled by default +|`true`/ `false` +|`false` + +|es: + timeout: +|Timeout used for queries. When set to zero there is no timeout. +| +|0s + +|es: + username: +|The username required by Elasticsearch. The basic authentication also loads CA if it is specified. See also `es.password`. +| +| + +|es: + password: +|The password required by Elasticsearch. See also, `es.username`. +| +| + +|es: + version: +|The major Elasticsearch version. If not specified, the value will be auto-detected from Elasticsearch. +| +|0 +|=== + +.ES data replication parameters +[options="header"] +[cols="l, a, a, a"] +|=== +|Parameter |Description |Values |Default value +|es: + num-replicas: +|The number of replicas per index in Elasticsearch. +| +|1 + +|es: + num-shards: +|The number of shards per index in Elasticsearch. +| +|5 +|=== + +.ES index configuration parameters +[options="header"] +[cols="l, a, a, a"] +|=== +|Parameter |Description |Values |Default value +|es: + create-index-templates: +|Automatically create index templates at application startup when set to `true`. When templates are installed manually, set to `false`. +|`true`/ `false` +|`true` + +|es: + index-prefix: +|Optional prefix for {JaegerShortName} indices. For example, setting this to "production" creates indices named "production-tracing-*". +| +| +|=== + +.ES bulk processor configuration parameters +[options="header"] +[cols="l, a, a, a"] +|=== +|Parameter |Description |Values |Default value +|es: + bulk: + actions: +|The number of requests that can be added to the queue before the bulk processor decides to commit updates to disk. +| +|1000 + +//What is the default here? The original text said "Set to zero to disable. By default, this is disabled." +|es: + bulk: + flush-interval: +|A `time.Duration` after which bulk requests are committed, regardless of other thresholds. To disable the bulk processor flush interval, set this to zero. +| +|200ms + +|es: + bulk: + size: +|The number of bytes that the bulk requests can take up before the bulk processor decides to commit updates to disk. +| +|5000000 + +|es: + bulk: + workers: +|The number of workers that are able to receive and commit bulk requests to Elasticsearch. +| +|1 +|=== + +.ES TLS configuration parameters +[options="header"] +[cols="l, a, a, a"] +|=== +|Parameter |Description |Values |Default value +|es: + tls: + ca: +|Path to a TLS Certification Authority (CA) file used to verify the remote servers. +| +|Will use the system truststore by default. + +|es: + tls: + cert: +|Path to a TLS Certificate file, used to identify this process to the remote servers. +| +| + +|es: + tls: + enabled: +|Enable transport layer security (TLS) when talking to the remote servers. Disabled by default. +|`true`/ `false` +|`false` + +|es: + tls: + key: +|Path to a TLS Private Key file, used to identify this process to the remote servers. +| +| + +|es: + tls: + server-name: +|Override the expected TLS server name in the certificate of the remote servers. +| +| +//Clarification of "if specified" for `token-file` and `username`, does that mean if this is set? Or that it only loads the CA if one is specified (that is, if es.tls.ca has a value?) +|es: + token-file: +|Path to a file containing the bearer token. This flag also loads the Certification Authority (CA) file if it is specified. +| +| +|=== + +.ES archive configuration parameters +[options="header"] +[cols="l, a, a, a"] +|=== +|Parameter |Description |Values |Default value +|es-archive: + bulk: + actions: +|The number of requests that can be added to the queue before the bulk processor decides to commit updates to disk. +| +|0 + +//What is the default here? The original text said "Set to zero to disable. By default, this is disabled." +|es-archive: + bulk: + flush-interval: +|A `time.Duration` after which bulk requests are committed, regardless of other thresholds. To disable the bulk processor flush interval, set this to zero. +| +|0s + +|es-archive: + bulk: + size: +|The number of bytes that the bulk requests can take up before the bulk processor decides to commit updates to disk. +| +|0 + +|es-archive: + bulk: + workers: +|The number of workers that are able to receive and commit bulk requests to Elasticsearch. +| +|0 + +|es-archive: + create-index-templates: +|Automatically create index templates at application startup when set to `true`. When templates are installed manually, set to `false`. +|`true`/ `false` +|`false` + +|es-archive: + enabled: +|Enable extra storage. +|`true`/ `false` +|`false` + +|es-archive: + index-prefix: +|Optional prefix for {JaegerShortName} indices. For example, setting this to "production" creates indices named "production-tracing-*". +| +| + +|es-archive: + max-doc-count: +|The maximum document count to return from an Elasticsearch query. This will also apply to aggregations. +| +|0 + +|es-archive: + max-num-spans: +|[*Deprecated* - Will be removed in a future release, use `es-archive.max-doc-count` instead.] The maximum number of spans to fetch at a time, per query, in Elasticsearch. +| +|0 + +|es-archive: + max-span-age: +|The maximum lookback for spans in Elasticsearch. +| +|0s + +|es-archive: + num-replicas: +|The number of replicas per index in Elasticsearch. +| +|0 + +|es-archive: + num-shards: +|The number of shards per index in Elasticsearch. +| +|0 + +|es-archive: + password: +|The password required by Elasticsearch. See also, `es.username`. +| +| + +|es-archive: + server-urls: +|The comma-separated list of Elasticsearch servers. Must be specified as fully qualified URLs, for example, `\http://localhost:9200`. +| +| + +|es-archive: + sniffer: +|The sniffer configuration for Elasticsearch. The client uses the sniffing process to find all nodes automatically. Disabled by default. +|`true`/ `false` +|`false` + +|es-archive: + sniffer-tls-enabled: +|Option to enable TLS when sniffing an Elasticsearch Cluster. The client uses the sniffing process to find all nodes automatically. Disabled by default. +|`true`/ `false` +|`false` + +|es-archive: + timeout: +|Timeout used for queries. When set to zero there is no timeout. +| +|0s + +|es-archive: + tls: + ca: +|Path to a TLS Certification Authority (CA) file used to verify the remote servers. +| +|Will use the system truststore by default. + +|es-archive: + tls: + cert: +|Path to a TLS Certificate file, used to identify this process to the remote servers. +| +| + +|es-archive: + tls: + enabled: +|Enable transport layer security (TLS) when talking to the remote servers. Disabled by default. +|`true`/ `false` +|`false` + +|es-archive: + tls: + key: +|Path to a TLS Private Key file, used to identify this process to the remote servers. +| +| + +|es-archive: + tls: + server-name: +|Override the expected TLS server name in the certificate of the remote servers. +| +| + +//Clarification of "if specified" for next two rows, does that mean if this is set? Or that it only loads the CA if one is specified (that is, if es-archive.tls.ca has a value?) +|es-archive: + token-file: +|Path to a file containing the bearer token. This flag also loads the Certification Authority (CA) file if it is specified. +| +| + +|es-archive: + username: +|The username required by Elasticsearch. The basic authentication also loads CA if it is specified. See also `es-archive.password`. +| +| + +|es-archive: + version: +|The major Elasticsearch version. If not specified, the value will be auto-detected from Elasticsearch. +| +|0 +|=== + + +.Storage example with volume mounts +[source,yaml] +---- +apiVersion: jaegertracing.io/v1 +kind: Jaeger +metadata: + name: simple-prod +spec: + strategy: production + storage: + type: elasticsearch + options: + es: + server-urls: https://quickstart-es-http.default.svc:9200 + index-prefix: my-prefix + tls: + ca: /es/certificates/ca.crt + secretName: tracing-secret + volumeMounts: + - name: certificates + mountPath: /es/certificates/ + readOnly: true + volumes: + - name: certificates + secret: + secretName: quickstart-es-http-certs-public +---- + +The following example shows a Jaeger CR using an external Elasticsearch cluster with TLS CA certificate mounted from a volume and user/password stored in a secret. + +.External Elasticsearch example: +[source,yaml] +---- +apiVersion: jaegertracing.io/v1 +kind: Jaeger +metadata: + name: simple-prod +spec: + strategy: production + storage: + type: elasticsearch + options: + es: + server-urls: https://quickstart-es-http.default.svc:9200 # <1> + index-prefix: my-prefix + tls: # <2> + ca: /es/certificates/ca.crt + secretName: tracing-secret # <3> + volumeMounts: # <4> + - name: certificates + mountPath: /es/certificates/ + readOnly: true + volumes: + - name: certificates + secret: + secretName: quickstart-es-http-certs-public +---- +<1> URL to Elasticsearch service running in default namespace. +<2> TLS configuration. In this case only CA certificate, but it can also contain es.tls.key and es.tls.cert when using mutual TLS. +<3> Secret which defines environment variables ES_PASSWORD and ES_USERNAME. Created by kubectl create secret generic tracing-secret --from-literal=ES_PASSWORD=changeme --from-literal=ES_USERNAME=elastic +<4> Volume mounts and volumes which are mounted into all storage components. diff --git a/modules/distr-tracing-deploy-default.adoc b/modules/distr-tracing-deploy-default.adoc new file mode 100644 index 000000000000..d14c8616b779 --- /dev/null +++ b/modules/distr-tracing-deploy-default.adoc @@ -0,0 +1,115 @@ +//// +This module included in the following assemblies: +- distr_tracing_install/distr-tracing-deploying.adoc +//// + +[id="distr-tracing-deploy-default_{context}"] += Deploying the {DTShortName} default strategy from the web console + +The custom resource definition (CRD) defines the configuration used when you deploy an instance of {DTProductName}. The default CR is named `jaeger-all-in-one-inmemory` and it is configured with minimal resources to ensure that you can successfully install it on a default {product-title} installation. You can use this default configuration to create a {JaegerName} instance that uses the `AllInOne` deployment strategy, or you can define your own custom resource file. + +[NOTE] +==== +In-memory storage is not persistent. If the Jaeger pod shuts down, restarts, or is replaced, your trace data will be lost. For persistent storage, you must use the `production` or `streaming` strategies, which use Elasticsearch as the default storage. +==== + +.Prerequisites + +* The {JaegerName} Operator has been installed. +* You have reviewed the instructions for how to customize the deployment. +* You have access to the cluster as a user with the `cluster-admin` role. + +.Procedure + +. Log in to the {product-title} web console as a user with the `cluster-admin` role. + +. Create a new project, for example `tracing-system`. ++ +[NOTE] +==== +If you are installing as part of Service Mesh, the {DTShortName} resources must be installed in the same namespace as the `ServiceMeshControlPlane` resource, for example `istio-system`. +==== ++ +.. Navigate to *Home* -> *Projects*. + +.. Click *Create Project*. + +.. Enter `tracing-system` in the *Name* field. + +.. Click *Create*. + +. Navigate to *Operators* -> *Installed Operators*. + +. If necessary, select `tracing-system` from the *Project* menu. You may have to wait a few moments for the Operators to be copied to the new project. + +. Click the {JaegerName} Operator. On the *Details* tab, under *Provided APIs*, the Operator provides a single link. + +. Under *Jaeger*, click *Create Instance*. + +. On the *Create Jaeger* page, to install using the defaults, click *Create* to create the {JaegerShortName} instance. + +. On the *Jaegers* page, click the name of the {JaegerShortName} instance, for example, `jaeger-all-in-one-inmemory`. + +. On the *Jaeger Details* page, click the *Resources* tab. Wait until the pod has a status of "Running" before continuing. + + +[id="distr-tracing-deploy-default-cli_{context}"] +== Deploying the {DTShortName} default strategy from the CLI + +Follow this procedure to create an instance of {JaegerShortName} from the command line. + +.Prerequisites + +* The {JaegerName} Operator has been installed and verified. +* You have reviewed the instructions for how to customize the deployment. +* You have access to the OpenShift CLI (`oc`) that matches your {product-title} version. +* You have access to the cluster as a user with the `cluster-admin` role. + +.Procedure + +. Log in to the {product-title} CLI as a user with the `cluster-admin` role. ++ +[source,terminal] +---- +$ oc login https://{HOSTNAME}:8443 +---- + +. Create a new project named `tracing-system`. ++ +[source,terminal] +---- +$ oc new-project tracing-system +---- + +. Create a custom resource file named `jaeger.yaml` that contains the following text: ++ +.Example jaeger-all-in-one.yaml +[source,yaml] +---- +apiVersion: jaegertracing.io/v1 +kind: Jaeger +metadata: + name: jaeger-all-in-one-inmemory +---- + +. Run the following command to deploy {JaegerShortName}: ++ +[source,terminal] +---- +$ oc create -n tracing-system -f jaeger.yaml +---- + +. Run the following command to watch the progress of the pods during the installation process: ++ +[source,terminal] +---- +$ oc get pods -n tracing-system -w +---- ++ +After the installation process has completed, you should see output similar to the following example: ++ +[source,terminal] +---- +NAME READY STATUS RESTARTS AGE +jaeger-all-in-one-inmemory-cdff7897b-qhfdx 2/2 Running 0 24s +---- diff --git a/modules/distr-tracing-deploy-production-es.adoc b/modules/distr-tracing-deploy-production-es.adoc new file mode 100644 index 000000000000..ed66b22737da --- /dev/null +++ b/modules/distr-tracing-deploy-production-es.adoc @@ -0,0 +1,136 @@ +//// +This module included in the following assemblies: +- distr_tracing_install/distr-tracing-deploying.adoc +//// + +[id="distr-tracing-deploy-production_{context}"] += Deploying the {DTShortName} production strategy from the web console + +The `production` deployment strategy is intended for production environments that require a more scalable and highly available architecture, and where long-term storage of trace data is important. + +.Prerequisites + +* The OpenShift Elasticsearch Operator has been installed. +* The {JaegerName} Operator has been installed. +* You have reviewed the instructions for how to customize the deployment. +* You have access to the cluster as a user with the `cluster-admin` role. + +.Procedure + +. Log in to the {product-title} web console as a user with the `cluster-admin` role. + +. Create a new project, for example `tracing-system`. ++ +[NOTE] +==== +If you are installing as part of Service Mesh, the {DTShortName} resources must be installed in the same namespace as the `ServiceMeshControlPlane` resource, for example `istio-system`. +==== ++ +.. Navigate to *Home* -> *Projects*. + +.. Click *Create Project*. + +.. Enter `tracing-system` in the *Name* field. + +.. Click *Create*. + +. Navigate to *Operators* -> *Installed Operators*. + +. If necessary, select `tracing-system` from the *Project* menu. You may have to wait a few moments for the Operators to be copied to the new project. + +. Click the {JaegerName} Operator. On the *Overview* tab, under *Provided APIs*, the Operator provides a single link. + +. Under *Jaeger*, click *Create Instance*. + +. On the *Create Jaeger* page, replace the default `all-in-one` YAML text with your production YAML configuration, for example: + ++ +.Example jaeger-production.yaml file with Elasticsearch +[source,yaml] +---- +apiVersion: jaegertracing.io/v1 +kind: Jaeger +metadata: + name: jaeger-production + namespace: +spec: + strategy: production + ingress: + security: oauth-proxy + storage: + type: elasticsearch + elasticsearch: + nodeCount: 3 + redundancyPolicy: SingleRedundancy + esIndexCleaner: + enabled: true + numberOfDays: 7 + schedule: 55 23 * * * + esRollover: + schedule: '*/30 * * * *' +---- ++ + +. Click *Create* to create the {JaegerShortName} instance. + +. On the *Jaegers* page, click the name of the {JaegerShortName} instance, for example, `jaeger-prod-elasticsearch`. + +. On the *Jaeger Details* page, click the *Resources* tab. Wait until all the pods have a status of "Running" before continuing. + + +[id="distr-tracing-deploy-production-cli_{context}"] +== Deploying the {DTShortName} production strategy from the CLI + +Follow this procedure to create an instance of {JaegerShortName} from the command line. + +.Prerequisites + +* The OpenShift Elasticsearch Operator has been installed. +* The {JaegerName} Operator has been installed. +* You have reviewed the instructions for how to customize the deployment. +* You have access to the OpenShift CLI (`oc`) that matches your {product-title} version. +* You have access to the cluster as a user with the `cluster-admin` role. + +.Procedure + +. Log in to the {product-title} CLI as a user with the `cluster-admin` role. ++ +[source,terminal] +---- +$ oc login https://{HOSTNAME}:8443 +---- + +. Create a new project named `tracing-system`. ++ +[source,terminal] +---- +$ oc new-project tracing-system +---- + +. Create a custom resource file named `jaeger-production.yaml` that contains the text of the example file in the previous procedure. + +. Run the following command to deploy {JaegerShortName}: ++ +[source,terminal] +---- +$ oc create -n tracing-system -f jaeger-production.yaml +---- ++ +. Run the following command to watch the progress of the pods during the installation process: ++ +[source,terminal] +---- +$ oc get pods -n tracing-system -w +---- ++ +After the installation process has completed, you should see output similar to the following example: ++ +[source,terminal] +---- +NAME READY STATUS RESTARTS AGE +elasticsearch-cdm-jaegersystemjaegerproduction-1-6676cf568gwhlw 2/2 Running 0 10m +elasticsearch-cdm-jaegersystemjaegerproduction-2-bcd4c8bf5l6g6w 2/2 Running 0 10m +elasticsearch-cdm-jaegersystemjaegerproduction-3-844d6d9694hhst 2/2 Running 0 10m +jaeger-production-collector-94cd847d-jwjlj 1/1 Running 3 8m32s +jaeger-production-query-5cbfbd499d-tv8zf 3/3 Running 3 8m32s +---- diff --git a/modules/distr-tracing-deploy-streaming.adoc b/modules/distr-tracing-deploy-streaming.adoc new file mode 100644 index 000000000000..d07d6d7dfe26 --- /dev/null +++ b/modules/distr-tracing-deploy-streaming.adoc @@ -0,0 +1,155 @@ +//// +This module included in the following assemblies: +- distr_tracing_install/distr-tracing-deploying.adoc +//// + +[id="distr-tracing-deploy-streaming_{context}"] += Deploying the {DTShortName} streaming strategy from the web console + +The `streaming` deployment strategy is intended for production environments that require a more scalable and highly available architecture, and where long-term storage of trace data is important. + +The `streaming` strategy provides a streaming capability that sits between the Collector and the Elasticsearch storage. This reduces the pressure on the storage under high load situations, and enables other trace post-processing capabilities to tap into the real-time span data in directly from the Kafka streaming platform. + +[NOTE] +==== +The streaming strategy requires an additional Red Hat subscription for AMQ Streams. If you do not have an AMQ Streams subscription, contact your sales representative for more information. +==== + +[NOTE] +==== +The streaming deployment strategy is currently unsupported on IBM Z. +==== + +.Prerequisites + +* The AMQ Streams Operator has been installed. If using version 1.4.0 or higher you can use self-provisioning. Otherwise you must create the Kafka instance. +* The {JaegerName} Operator has been installed. +* You have reviewed the instructions for how to customize the deployment. +* You have access to the cluster as a user with the `cluster-admin` role. + +.Procedure + +. Log in to the {product-title} web console as a user with the `cluster-admin` role. + +. Create a new project, for example `tracing-system`. + ++ +[NOTE] +==== +If you are installing as part of Service Mesh, the {DTShortName} resources must be installed in the same namespace as the `ServiceMeshControlPlane` resource, for example `istio-system`. +==== ++ + +.. Navigate to *Home* -> *Projects*. + +.. Click *Create Project*. + +.. Enter `tracing-system` in the *Name* field. + +.. Click *Create*. + +. Navigate to *Operators* -> *Installed Operators*. + +. If necessary, select `tracing-system` from the *Project* menu. You may have to wait a few moments for the Operators to be copied to the new project. + +. Click the {JaegerName} Operator. On the *Overview* tab, under *Provided APIs*, the Operator provides a single link. + +. Under *Jaeger*, click *Create Instance*. + +. On the *Create Jaeger* page, replace the default `all-in-one` YAML text with your streaming YAML configuration, for example: + +.Example jaeger-streaming.yaml file +[source,yaml] +---- +apiVersion: jaegertracing.io/v1 +kind: Jaeger +metadata: + name: jaeger-streaming +spec: + strategy: streaming + collector: + options: + kafka: + producer: + topic: jaeger-spans + #Note: If brokers are not defined,AMQStreams 1.4.0+ will self-provision Kafka. + brokers: my-cluster-kafka-brokers.kafka:9092 + storage: + type: elasticsearch + ingester: + options: + kafka: + consumer: + topic: jaeger-spans + brokers: my-cluster-kafka-brokers.kafka:9092 + +---- +//TODO - find out if this storage configuration is correct for OpenShift + +. Click *Create* to create the {JaegerShortName} instance. + +. On the *Jaegers* page, click the name of the {JaegerShortName} instance, for example, `jaeger-streaming`. + +. On the *Jaeger Details* page, click the *Resources* tab. Wait until all the pods have a status of "Running" before continuing. + + +[id="distr-tracing-deploy-streaming-cli_{context}"] +== Deploying the {DTShortName} streaming strategy from the CLI + +Follow this procedure to create an instance of {JaegerShortName} from the command line. + +.Prerequisites + +* The AMQ Streams Operator has been installed. If using version 1.4.0 or higher you can use self-provisioning. Otherwise you must create the Kafka instance. +* The {JaegerName} Operator has been installed. +* You have reviewed the instructions for how to customize the deployment. +* You have access to the OpenShift CLI (`oc`) that matches your {product-title} version. +* You have access to the cluster as a user with the `cluster-admin` role. + +Procedure + +. Log in to the {product-title} CLI as a user with the `cluster-admin` role. ++ +[source,terminal] +---- +$ oc login https://{HOSTNAME}:8443 +---- + +. Create a new project named `tracing-system`. ++ +[source,terminal] +---- +$ oc new-project tracing-system +---- + +. Create a custom resource file named `jaeger-streaming.yaml` that contains the text of the example file in the previous procedure. + +. Run the following command to deploy Jaeger: ++ +[source,terminal] +---- +$ oc create -n tracing-system -f jaeger-streaming.yaml +---- ++ +. Run the following command to watch the progress of the pods during the installation process: ++ +[source,terminal] +---- +$ oc get pods -n tracing-system -w +---- ++ +After the installation process has completed, you should see output similar to the following example: ++ +[source,terminal] +---- +NAME READY STATUS RESTARTS AGE +elasticsearch-cdm-jaegersystemjaegerstreaming-1-697b66d6fcztcnn 2/2 Running 0 5m40s +elasticsearch-cdm-jaegersystemjaegerstreaming-2-5f4b95c78b9gckz 2/2 Running 0 5m37s +elasticsearch-cdm-jaegersystemjaegerstreaming-3-7b6d964576nnz97 2/2 Running 0 5m5s +jaeger-streaming-collector-6f6db7f99f-rtcfm 1/1 Running 0 80s +jaeger-streaming-entity-operator-6b6d67cc99-4lm9q 3/3 Running 2 2m18s +jaeger-streaming-ingester-7d479847f8-5h8kc 1/1 Running 0 80s +jaeger-streaming-kafka-0 2/2 Running 0 3m1s +jaeger-streaming-query-65bf5bb854-ncnc7 3/3 Running 0 80s +jaeger-streaming-zookeeper-0 2/2 Running 0 3m39s +---- diff --git a/modules/distr-tracing-deployment-best-practices.adoc b/modules/distr-tracing-deployment-best-practices.adoc new file mode 100644 index 000000000000..5397602f17d2 --- /dev/null +++ b/modules/distr-tracing-deployment-best-practices.adoc @@ -0,0 +1,15 @@ +//// +This module included in the following assemblies: +- distr_tracing_install/distr-tracing-deploying.adoc +//// + +[id="distr-tracing-deployment-best-practices_{context}"] += Deployment best practices + +* {DTProductName} instance names must be unique. If you want to have multiple {JaegerName} instances and are using sidecar injected agents, then the {JaegerName} instances should have unique names, and the injection annotation should explicitly specify the {JaegerName} instance name the tracing data should be reported to. + +* If you have a multitenant implementation and tenants are separated by namespaces, deploy a {JaegerName} instance to each tenant namespace. + +** Agent as a daemonset is not supported for multitenant installations or OpenShift Dedicated. Agent as a sidecar is the only supported configuration for these use cases. + +* If you are installing {DTShortName} as part of Red Hat OpenShift Service Mesh, the {DTShortName} resources must be installed in the same namespace as the `ServiceMeshControlPlane` resource. diff --git a/modules/distr-tracing-document-attributes.adoc b/modules/distr-tracing-document-attributes.adoc index 08b681e7f9e3..3c428ac782a5 100644 --- a/modules/distr-tracing-document-attributes.adoc +++ b/modules/distr-tracing-document-attributes.adoc @@ -9,10 +9,21 @@ // Product content attributes, that is, substitution variables in the files. // :product-title: OpenShift Container Platform -:ProductName: Red Hat OpenShift distributed tracing -:ProductShortName: distributed tracing -:ProductRelease: -:ProductVersion: 2.0 +:product-dedicated: Red Hat OpenShift Dedicated +:console-redhat-com: Red Hat OpenShift Cluster Manager + +:DTProductName: Red Hat OpenShift distributed tracing +:DTShortName: distributed tracing +:DTProductVersion: 2.0 + +:JaegerName: Red Hat OpenShift distributed tracing platform +:JaegerShortName: distributed tracing platform +:JaegerVersion: 1.28.0 + +:OTELName: Red Hat OpenShift distributed tracing data collection +:OTELShortName: distributed tracing data collection +:OTELVersion: 0.33.0 + :product-build: :DownloadURL: registry.redhat.io :cloud-redhat-com: Red Hat OpenShift Cluster Manager diff --git a/modules/distr-tracing-features.adoc b/modules/distr-tracing-features.adoc new file mode 100644 index 000000000000..9237b7ffab9c --- /dev/null +++ b/modules/distr-tracing-features.adoc @@ -0,0 +1,18 @@ +//// +This module included in the following assemblies: +-service_mesh/v2x/ossm-architecture.adoc +-dist_tracing_arch/distr-tracing-architecture.adoc +//// + +[id="distributed-tracing-features_{context}"] += {DTProductName} features + +{DTProductName} provides the following capabilities: + +* Integration with Kiali – When properly configured, you can view {DTShortName} data from the Kiali console. + +* High scalability – The {DTShortName} back end is designed to have no single points of failure and to scale with the business needs. + +* Distributed Context Propagation – Enables you to connect data from different components together to create a complete end-to-end trace. + +* Backwards compatibility with Zipkin – {DTProductName} has APIs that enable it to be used as a drop-in replacement for Zipkin, but Red Hat is not supporting Zipkin compatibility in this release. diff --git a/modules/distr-tracing-install-elasticsearch.adoc b/modules/distr-tracing-install-elasticsearch.adoc new file mode 100644 index 000000000000..80c69dfbb3db --- /dev/null +++ b/modules/distr-tracing-install-elasticsearch.adoc @@ -0,0 +1,56 @@ +//// +This module included in the following assemblies: +- distr_tracing_install/distr-tracing-installing.adoc +//// + +[id="distributed-tracing-operator-install-elasticsearch_{context}"] += Installing the OpenShift Elasticsearch Operator + +The default {JaegerName} deployment uses in-memory storage because it is designed to be installed quickly for those evaluating {DTProductName}, giving demonstrations, or using {JaegerName} in a test environment. If you plan to use {JaegerName} in production, you must install and configure a persistent storage option, in this case, Elasticsearch. + +.Prerequisites +* You have access to the {product-title} web console. +* You have access to the cluster as a user with the `cluster-admin` role. If you use {product-dedicated}, you must have an account with the `dedicated-admin` role. + +[WARNING] +==== +Do not install Community versions of the Operators. Community Operators are not supported. +==== + +[NOTE] +==== +If you have already installed the OpenShift Elasticsearch Operator as part of OpenShift Logging, you do not need to install the OpenShift Elasticsearch Operator again. The {JaegerName} Operator creates the Elasticsearch instance using the installed OpenShift Elasticsearch Operator. +==== + +.Procedure + +. Log in to the {product-title} web console as a user with the `cluster-admin` role. If you use {product-dedicated}, you must have an account with the `dedicated-admin` role. + +. Navigate to *Operators* -> *OperatorHub*. + +. Type *Elasticsearch* into the filter box to locate the OpenShift Elasticsearch Operator. + +. Click the *OpenShift Elasticsearch Operator* provided by Red Hat to display information about the Operator. + +. Click *Install*. + +. On the *Install Operator* page, select the *stable* Update Channel. This automatically updates your Operator as new versions are released. + +. Accept the default *All namespaces on the cluster (default)*. This installs the Operator in the default `openshift-operators-redhat` project and makes the Operator available to all projects in the cluster. ++ +[NOTE] +==== +The Elasticsearch installation requires the *openshift-operators-redhat* namespace for the OpenShift Elasticsearch Operator. The other {DTProductName} Operators are installed in the `openshift-operators` namespace. +==== ++ + +* Accept the default *Automatic* approval strategy. By accepting the default, when a new version of this Operator is available, Operator Lifecycle Manager (OLM) automatically upgrades the running instance of your Operator without human intervention. If you select *Manual* updates, when a newer version of an Operator is available, OLM creates an update request. As a cluster administrator, you must then manually approve that update request to have the Operator updated to the new version. ++ +[NOTE] +==== +The *Manual* approval strategy requires a user with appropriate credentials to approve the Operator install and subscription process. +==== + +. Click *Install*. + +. On the *Installed Operators* page, select the `openshift-operators-redhat` project. Wait until you see that the OpenShift Elasticsearch Operator shows a status of "InstallSucceeded" before continuing. diff --git a/modules/distr-tracing-install-jaeger-operator.adoc b/modules/distr-tracing-install-jaeger-operator.adoc new file mode 100644 index 000000000000..c8b33d7262f5 --- /dev/null +++ b/modules/distr-tracing-install-jaeger-operator.adoc @@ -0,0 +1,50 @@ +//// +This module included in the following assemblies: +- distr_tracing_install/distr-tracing-installing.adoc +//// + +[id="distr-tracing-jaeger-operator-install_{context}"] += Installing the {JaegerName} Operator + +To install {JaegerName}, you use the link:https://operatorhub.io/[OperatorHub] to install the {JaegerName} Operator. + +By default, the Operator is installed in the `openshift-operators` project. + +.Prerequisites +* You have access to the {product-title} web console. +* You have access to the cluster as a user with the `cluster-admin` role. If you use {product-dedicated}, you must have an account with the `dedicated-admin` role. +* If you require persistent storage, you must also install the OpenShift Elasticsearch Operator before installing the {JaegerName} Operator. + +[WARNING] +==== +Do not install Community versions of the Operators. Community Operators are not supported. +==== + +.Procedure + +. Log in to the {product-title} web console as a user with the `cluster-admin` role. If you use {product-dedicated}, you must have an account with the `dedicated-admin` role. + +. Navigate to *Operators* -> *OperatorHub*. + +. Type *distributing tracing platform* into the filter to locate the {JaegerName} Operator. + +. Click the *{JaegerName} Operator* provided by Red Hat to display information about the Operator. + +. Click *Install*. + +. On the *Install Operator* page, select the *stable* Update Channel. This automatically updates your Operator as new versions are released. +//If you select a maintenance channel, for example, *Stable*, you will receive bug fixes and security patches for the length of the support cycle for that version. + +. Accept the default *All namespaces on the cluster (default)*. This installs the Operator in the default `openshift-operators` project and makes the Operator available to all projects in the cluster. + +* Accept the default *Automatic* approval strategy. By accepting the default, when a new version of this Operator is available, Operator Lifecycle Manager (OLM) automatically upgrades the running instance of your Operator without human intervention. If you select *Manual* updates, when a newer version of an Operator is available, OLM creates an update request. As a cluster administrator, you must then manually approve that update request to have the Operator updated to the new version. ++ +[NOTE] +==== +The *Manual* approval strategy requires a user with appropriate credentials to approve the Operator install and subscription process. +==== ++ + +. Click *Install*. + +. On the *Subscription Overview* page, select the `openshift-operators` project. Wait until you see that the {JaegerName} Operator shows a status of "InstallSucceeded" before continuing. diff --git a/modules/distr-tracing-install-otel-operator.adoc b/modules/distr-tracing-install-otel-operator.adoc new file mode 100644 index 000000000000..a6d7ec5ec3dd --- /dev/null +++ b/modules/distr-tracing-install-otel-operator.adoc @@ -0,0 +1,51 @@ +//// +This module included in the following assemblies: +- distr_tracing_install/distr-tracing-installing.adoc +//// + +[id="distr-tracing-otel-operator-install_{context}"] += Installing the {OTELName} Operator + +#TECH PREVIEW BOILERPLATE HERE# + +To install {OTELName}, you use the link:https://operatorhub.io/[OperatorHub] to install the {OTELName} Operator. + +By default, the Operator is installed in the `openshift-operators` project. + +.Prerequisites +* You have access to the {product-title} web console. +* You have access to the cluster as a user with the `cluster-admin` role. If you use {product-dedicated}, you must have an account with the `dedicated-admin` role. +//* If you require persistent storage, you must also install the OpenShift Elasticsearch Operator before installing the {OTELName} Operator. + +[WARNING] +==== +Do not install Community versions of the Operators. Community Operators are not supported. +==== + +.Procedure + +. Log in to the {product-title} web console as a user with the `cluster-admin` role. If you use {product-dedicated}, you must have an account with the `dedicated-admin` role. + +. Navigate to *Operators* -> *OperatorHub*. + +. Type *distributing tracing datacollection* into the filter to locate the {OTELName} Operator. + +. Click the *{OTELName} Operator* provided by Red Hat to display information about the Operator. + +. Click *Install*. + +. On the *Install Operator* page, select the *stable* Update Channel. This automatically updates your Operator as new versions are released. + +. Select *All namespaces on the cluster (default)*. This installs the Operator in the default `openshift-operators` project and makes the Operator available to all projects in the cluster. + +* Select an approval srategy. You can select *Automatic* or *Manual* updates. If you choose *Automatic* updates for an installed Operator, when a new version of that Operator is available, Operator Lifecycle Manager (OLM) automatically upgrades the running instance of your Operator without human intervention. If you select *Manual* updates, when a newer version of an Operator is available, OLM creates an update request. As a cluster administrator, you must then manually approve that update request to have the Operator updated to the new version. ++ +[NOTE] +==== +The *Manual* approval strategy requires a user with appropriate credentials to approve the Operator install and subscription process. +==== ++ + +. Click *Install*. + +. On the *Subscription Overview* page, select the `openshift-operators` project. Wait until you see that the {OTELName} Operator shows a status of "InstallSucceeded" before continuing. diff --git a/modules/distr-tracing-install-overview.adoc b/modules/distr-tracing-install-overview.adoc new file mode 100644 index 000000000000..a782e5debf9c --- /dev/null +++ b/modules/distr-tracing-install-overview.adoc @@ -0,0 +1,19 @@ +//// +This module included in the following assemblies: +- distr_tracing_install/distr-tracing-installing.adoc +//// + +[id="distributed-tracing-install-overview_{context}"] += {DTProductName} installation overview + +The steps for installing {DTProductName} are as follows: + +* Review the documentation and determine your deployment strategy. + +* If your deployment strategy requires persistent storage, install the OpenShift Elasticsearch Operator via the OperatorHub. + +* Install the {JaegerName} Operator via the OperatorHub. + +* Modify the custom resource YAML file to support your deployment strategy. + +* Deploy one or more instances of {JaegerName} to your {product-title} environment. diff --git a/modules/distr-tracing-product-overview.adoc b/modules/distr-tracing-product-overview.adoc index 3cf875fe68bb..052611e975cf 100644 --- a/modules/distr-tracing-product-overview.adoc +++ b/modules/distr-tracing-product-overview.adoc @@ -1,16 +1,16 @@ //// -This CONCEPT module included in the following assemblies: --service_mesh/v2x/ossm-architecture.adoc ?? --distr-tracing-architecture.adoc +This module included in the following assemblies: +-service_mesh/v2x/ossm-architecture.adoc +-distr_tracing_arch/distr-tracing-architecture.adoc //// -[id="distr-tracing-product-overview_{context}"] -= OpenShift distributed tracing overview +[id="distributed-tracing-product-overview_{context}"] += Distributed tracing overview As a service owner, you can use distributed tracing to instrument your services to gather insights into your service architecture. -You can use distributed tracing for monitoring, network profiling, and troubleshooting the interaction between components in modern, cloud-native, microservices-based applications. +You can use {DTShortName} for monitoring, network profiling, and troubleshooting the interaction between components in modern, cloud-native, microservices-based applications. -Using distributed tracing lets you perform the following functions: +With {DTShortName} you can perform the following functions: * Monitor distributed transactions @@ -18,10 +18,10 @@ Using distributed tracing lets you perform the following functions: * Perform root cause analysis -{ProductName} consists of two components: +{DTProductName} consists of two main components: -* *Red Hat OpenShift distributed tracing platform* - This component is based on the open source link:https://www.jaegertracing.io/[Jaeger project]. +* *{JaegerName}* - This component is based on the open source link:https://www.jaegertracing.io/[Jaeger project]. -* *Red Hat OpenShift distributed tracing data collection* - This component is based on the open source link:https://opentelemetry.io/[OpenTelemetry project]. +* *{OTELNAME}* - This component is based on the open source link:https://opentelemetry.io/[OpenTelemetry project]. Both of these components are based on the vendor-neutral link:https://opentracing.io/[OpenTracing] APIs and instrumentation. diff --git a/modules/distr-tracing-removing-instance-cli.adoc b/modules/distr-tracing-removing-instance-cli.adoc new file mode 100644 index 000000000000..547e8dd5bcf8 --- /dev/null +++ b/modules/distr-tracing-removing-instance-cli.adoc @@ -0,0 +1,90 @@ +//// +This module included in the following assemblies: +- distr_tracing_install/dist-tracing-removing.adoc +//// + +[id="dist-tracing-removing-cli_{context}"] += Removing a {JaegerName} instance from the CLI + +. Log in to the {product-title} CLI. ++ +[source,terminal] +---- +$ oc login +---- ++ +. To display the {JaegerShortName} instances run the command: ++ +[source,terminal] +---- +$ oc get deployments -n +---- ++ +For example, ++ +[source,terminal] +---- +$ oc get deployments -n openshift-operators +---- ++ +The names of Operators have the suffix `-operator`. The following example shows two {JaegerName} Operators and four {JaegerShortName} instances: ++ +[source,terminal] +---- +$ oc get deployments -n openshift-operators +---- ++ +You should see output similar to the following: ++ +[source,terminal] +---- +NAME READY UP-TO-DATE AVAILABLE AGE +elasticsearch-operator 1/1 1 1 93m +jaeger-operator 1/1 1 1 49m +jaeger-test 1/1 1 1 7m23s +jaeger-test2 1/1 1 1 6m48s +tracing1 1/1 1 1 7m8s +tracing2 1/1 1 1 35m +---- ++ +. To remove an instance of {JaegerShortName}, run the following command: ++ +[source,terminal] +---- +$ oc delete jaeger -n +---- ++ +For example: ++ +[source,terminal] +---- +$ oc delete jaeger tracing2 -n openshift-operators +---- ++ + +. To verify the deletion, run the `oc get deployments` command again: ++ +[source,terminal] +---- +$ oc get deployments -n +---- + ++ +For example: ++ +[source,terminal] +---- +$ oc get deployments -n openshift-operators +---- ++ +You should see generated output that is similar to the following example: ++ +[source,terminal] +---- +NAME READY UP-TO-DATE AVAILABLE AGE +elasticsearch-operator 1/1 1 1 94m +jaeger-operator 1/1 1 1 50m +jaeger-test 1/1 1 1 8m14s +jaeger-test2 1/1 1 1 7m39s +tracing1 1/1 1 1 7m59s +---- diff --git a/modules/distr-tracing-removing-instance.adoc b/modules/distr-tracing-removing-instance.adoc new file mode 100644 index 000000000000..278a45e40365 --- /dev/null +++ b/modules/distr-tracing-removing-instance.adoc @@ -0,0 +1,28 @@ +//// +This module included in the following assemblies: +- distr_tracing_install/dist-tracing-removing.adoc +//// + +[id="distr-tracing-removing_{context}"] += Removing a {JaegerName} instance using the web console + +[NOTE] +==== +When deleting an instance that uses the in-memory storage, all data is permanently lost. Data stored in a persistent storage such as Elasticsearch is not be deleted when a {JaegerName} instance is removed. +==== + +.Procedure + +. Log in to the {product-title} web console. + +. Navigate to *Operators* -> *Installed Operators*. + +. Select the name of the project where the Operators are installed from the *Project* menu, for example, `openshift-operators`. + +. Click the {JaegerName} Operator. + +. Click the *Jaeger* tab. + +. Click the Options menu {kebab} next to the instance you want to delete and select *Delete Jaeger*. + +. In the confirmation message, click *Delete*. diff --git a/modules/distr-tracing-rn-fixed-issues.adoc b/modules/distr-tracing-rn-fixed-issues.adoc index e273d779e951..d44bb3eb5a1c 100644 --- a/modules/distr-tracing-rn-fixed-issues.adoc +++ b/modules/distr-tracing-rn-fixed-issues.adoc @@ -1,22 +1,22 @@ //// Module included in the following assemblies: -* distr-tracing-release-notes.adoc +* distributed-tracing-release-notes.adoc * service_mesh/v2x/servicemesh-release-notes.adoc //// [id="distr-tracing-rn-fixed-issues_{context}"] -= Distributed tracing fixed issues += {DTProductName} fixed issues //// Provide the following info for each issue if possible: -Consequence - What user action or situation would make this problem appear (If you have the foo option enabled and did x)? What did the customer experience as a result of the issue? What was the symptom? +Consequence - What user action or situation would make this problem appear (If you have the foo option enabled and did x)? What did the customer experience as a result of the issue? What was the symptom? Cause - Why did this happen? Fix - What did we change to fix the problem? -Result - How has the behavior changed as a result? Try to avoid “It is fixed” or “The issue is resolved” or “The error no longer presents”. +Result - How has the behavior changed as a result? Try to avoid “It is fixed” or “The issue is resolved” or “The error no longer presents”. //// * link:https://issues.redhat.com/browse/TRACING-2009[TRACING-2009] The Jaeger Operator has been updated to include support for the Strimzi Kafka Operator 0.23.0. -* link:https://issues.redhat.com/browse/TRACING-1907[TRACING-1907] The Jaeger agent sidecar injection was failing due to missing config maps in the application namespace. The config maps were getting automatically deleted due to an incorrect `OwnerReference` field setting, and as a result, the application pods were not moving past the "ContainerCreating" stage. The incorrect settings have been removed. +* link:https://issues.redhat.com/browse/TRACING-1907[TRACING-1907] The Jaeger agent sidecar injection was failing due to missing config maps in the application namespace. The config maps were getting automatically deleted due to an incorrect `OwnerReference` field setting and as a result, the application pods were not moving past the "ContainerCreating" stage. The incorrect settings have been removed. * link:https://issues.redhat.com/browse/TRACING-1725[TRACING-1725] Follow-up to TRACING-1631. Additional fix to ensure that Elasticsearch certificates are properly reconciled when there are multiple Jaeger production instances, using same name but within different namespaces. See also link:https://bugzilla.redhat.com/show_bug.cgi?id=1918920[BZ-1918920]. @@ -24,6 +24,6 @@ Result - How has the behavior changed as a result? Try to avoid “It is fixed * link:https://issues.redhat.com/browse/TRACING-1300[TRACING-1300] Failed connection between Agent and Collector when using Istio sidecar. An update of the Jaeger Operator enabled TLS communication by default between a Jaeger sidecar agent and the Jaeger Collector. -* link:https://issues.redhat.com/browse/TRACING-1208[TRACING-1208] Authentication "500 Internal Error" when accessing Jaeger UI. When trying to authenticate to the UI using OAuth, you get a 500 error because the oauth-proxy sidecar does not trust the custom CA bundle defined at installation time with the `additionalTrustBundle`. This was due to the config map with the `additionalTrustBundle` not being created in the Jaeger namespace. +* link:https://issues.redhat.com/browse/TRACING-1208[TRACING-1208] Authentication "500 Internal Error" when accessing Jaeger UI. When trying to authenticate to the UI using OAuth, I get a 500 error because oauth-proxy sidecar doesn't trust the custom CA bundle defined at installation time with the `additionalTrustBundle`. * link:https://issues.redhat.com/browse/TRACING-1166[TRACING-1166] It is not currently possible to use the Jaeger streaming strategy within a disconnected environment. When a Kafka cluster is being provisioned, it results in a error: `Failed to pull image registry.redhat.io/amq7/amq-streams-kafka-24-rhel7@sha256:f9ceca004f1b7dccb3b82d9a8027961f9fe4104e0ed69752c0bdd8078b4a1076`. diff --git a/modules/distr-tracing-rn-known-issues.adoc b/modules/distr-tracing-rn-known-issues.adoc index 0e427776571a..78b02731618d 100644 --- a/modules/distr-tracing-rn-known-issues.adoc +++ b/modules/distr-tracing-rn-known-issues.adoc @@ -1,11 +1,11 @@ //// Module included in the following assemblies: * service_mesh/v2x/servicemesh-release-notes.adoc -* distr-tracing--release-notes.adoc +* distributed-tracing--release-notes.adoc //// [id="distr-tracing-rn-known-issues_{context}"] -= Distributed tracing known issues += {DTProductName} known issues //// Consequence - What user action or situation would make this problem appear (Selecting the Foo option with the Bar version 1.3 plugin enabled results in an error message)? What did the customer experience as a result of the issue? What was the symptom? @@ -14,21 +14,21 @@ Workaround (If there is one)- What can you do to avoid or negate the effects of Result - If the workaround does not completely address the problem. //// -The following limitations exist in Red Hat OpenShift distributed tracing platform: +These limitations exist in {DTProductName}: * Apache Spark is not supported. -* Jaeger streaming via AMQ/Kafka is unsupported on IBM Z and IBM Power Systems. +* The streaming deployment via AMQ/Kafka is unsupported on IBM Z and IBM Power Systems. -These are the known issues in Red Hat OpenShift distributed tracing platform: +These are the known issues for {DTProductName}: -* link:https://issues.redhat.com/browse/TRACING-2057[TRACING-2057] The Kafka API has been updated to `v1beta2` to support the Strimzi Kafka Operator 0.23.0. However, this API version is not supported by AMQ Streams 1.6.3. If you have the following environment, your Jaeger services does not upgrade, and you cannot create new Jaeger services or modify existing Jaeger services: +* link:https://issues.redhat.com/browse/TRACING-2057[TRACING-2057] The Kafka API has been updated to `v1beta2` to support the Strimzi Kafka Operator 0.23.0. However, this API version is not supported by AMQ Streams 1.6.3. If you have the following environment, your Jaeger services will not be upgraded, and you cannot create new Jaeger services or modify existing Jaeger services: ** Jaeger Operator channel: *1.17.x stable* or *1.20.x stable* ** AMQ Streams Operator channel: *amq-streams-1.6.x* + To resolve this issue, switch the subscription channel for your AMQ Streams Operator to either *amq-streams-1.7.x* or *stable*. -* link:https://bugzilla.redhat.com/show_bug.cgi?id=1918920[BZ-1918920] The Elasticsearch pods do not get restarted automatically after an update. As a workaround, restart the pods manually. +* link:https://bugzilla.redhat.com/show_bug.cgi?id=1918920[BZ-1918920] The Elasticsearch pods does not get restarted automatically after an update. As a workaround, restart the pods manually. -* link:https://issues.redhat.com/browse/TRACING-809[TRACING-809] Jaeger Ingester is incompatible with Kafka 2.3. When there are two or more instances of the Jaeger Ingester and enough traffic, the Ingester generates continuous rebalancing messages in the logs. The Ingester generates these logs because of a regression in Kafka 2.3. This regression was fixed in Kafka 2.3.1. For more information, see https://github.com/jaegertracing/jaeger/issues/1819[Jaegertracing-1819]. +* link:https://issues.redhat.com/browse/TRACING-809[TRACING-809] Jaeger Ingester is incompatible with Kafka 2.3. When there are two or more instances of the Jaeger Ingester and enough traffic it will continuously generate rebalancing messages in the logs. This is due to a regression in Kafka 2.3 that was fixed in Kafka 2.3.1. For more information, see https://github.com/jaegertracing/jaeger/issues/1819[Jaegertracing-1819]. diff --git a/modules/distr-tracing-rn-new-features.adoc b/modules/distr-tracing-rn-new-features.adoc index 01a4aa3e9a26..acf920592be8 100644 --- a/modules/distr-tracing-rn-new-features.adoc +++ b/modules/distr-tracing-rn-new-features.adoc @@ -1,6 +1,6 @@ //// Module included in the following assemblies: -* distr-tracing--release-notes.adoc +- distributed-tracing-release-notes.adoc //// //// Feature – Describe the new functionality available to the customer. For enhancements, try to describe as specifically as possible where the customer will see changes. @@ -9,13 +9,19 @@ Result – If changed, describe the current user experience. //// [id="distr-tracing-rn-new-features_{context}"] -== New features and enhancements {ProductName} 2.0.0 +== New features and enhancements {DTProductName} 2.0.0 -This release marks the rebranding of Red Hat OpenShift Jaeger to {ProductName}. This effort includes the following: +This release marks the rebranding of Red Hat OpenShift Jaeger to {DTProductName}. This release consists of the following changes, additions, and improvements: -* Updates {ProductShortName} Operator to Jaeger 1.28. Going forward, {ProductName} will only support the `stable` Operator channel. Channels for individual releases are no longer supported. +* {DTProductName} now consists of the following two main components: -* Introduces a new OpenTelemetry Operator based on OpenTelemetry 0.33. Note that this Operator is a Technology Preview. +** *{JaegerName}* - This component is based on the open source link:https://www.jaegertracing.io/[Jaeger project]. + +** *{OTELNAME}* - This component is based on the open source link:https://opentelemetry.io/[OpenTelemetry project]. + +* Updates {JaegerName} Operator to Jaeger 1.28. Going forward, {DTProductName} will only support the `stable` Operator channel. Channels for individual releases are no longer supported. + +* Introduces a new {OTELName} Operator based on OpenTelemetry 0.33. Note that this Operator is a Technology Preview feature. * Adds support for OpenTelemetry protocol (OTLP) to the Query service. @@ -25,7 +31,7 @@ This release marks the rebranding of Red Hat OpenShift Jaeger to {ProductName}. This release also addresses Common Vulnerabilities and Exposures (CVEs) and bug fixes. -== Component versions supported in {ProductName} version {ProductVersion} +== Component versions supported in {DTProductName} version 2.0.0 |=== |Component |Version diff --git a/modules/distr-tracing-rn-technology-preview.adoc b/modules/distr-tracing-rn-technology-preview.adoc index aab25aa0249c..fcf7c3fd2a1e 100644 --- a/modules/distr-tracing-rn-technology-preview.adoc +++ b/modules/distr-tracing-rn-technology-preview.adoc @@ -1,14 +1,14 @@ //// Module included in the following assemblies: -* distr-tracing--release-notes.adoc +- rhbjaeger-release-notes.adoc //// [id="distr-tracing-rn-tech-preview_{context}"] -= Technology Preview += {DTProductName} Technology Preview //// Provide the following info for each issue if possible: -Description - Describe the new functionality available to the customer. For enhancements, try to describe as specifically as possible where the customer will see changes. Avoid the word “supports” as in [product] now supports [feature] to avoid customer confusion with full support. Say, for example, “available as a Technology Preview.” -Package - A brief description of what the customer has to install or enable to use the Technology Preview feature. (e.g., available in quickstart.zip on customer portal, JDF website, container on registry, enable option, etc.) +Description - Describe the new functionality available to the customer. For enhancements, try to describe as specifically as possible where the customer will see changes. Avoid the word “supports” as in [product] now supports [feature] to avoid customer confusion with full support. Say, for example, “available as a Technology Preview.” +Package - A brief description of what the customer has to install or enable to use the Technology Preview feature. (e.g., available in quickstart.zip on customer portal, JDF website, container on registry, enable option, etc.) //// [IMPORTANT] @@ -17,10 +17,10 @@ Technology Preview features are not supported with Red Hat production service le These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. For more information about the support scope of Red Hat Technology Preview features, see https://access.redhat.com/support/offerings/techpreview/. ==== -== {ProductName} 2.0.0 Technology Preview +== {DTProductName} 2.0.0 Technology Preview -This release includes the addition of the distributed tracing data collection, which you install using the OpenTelemetry Operator. +This release includes the addition of the {OTELName}, which you install using the {OTELName} Operator. {OTELName} is based on the link:https://opentelemetry.io/[OpenTelemetry] APIs and instrumentation. -{ProductName} data collection is based on the OpenTelemetry Operator and Collector. The Collector can be used to receive traces in either the OpenTelemetry or Jaeger protocol and send the trace data to the OpenShift distributed tracing platform. Other capabilities of the Collector are not supported at this time. +{OTELName} includes the OpenTelemetry Operator and Collector. The Collector can be used to receive traces in either the OpenTelemetry or Jaeger protocol and send the trace data to {DTProductName}. Other capabilities of the Collector are not supported at this time. -The OpenTelemetry collector allows developers to instrument their code with vendor agnostic APIs, avoiding vendor lock-in and enabling a growing ecosystem of observability tooling. +The OpenTelemetry Collector allows developers to instrument their code with vendor agnostic APIs, avoiding vendor lock-in and enabling a growing ecosystem of observability tooling. diff --git a/modules/distr-tracing-sidecar-automatic.adoc b/modules/distr-tracing-sidecar-automatic.adoc new file mode 100644 index 000000000000..bf579ac0d79e --- /dev/null +++ b/modules/distr-tracing-sidecar-automatic.adoc @@ -0,0 +1,39 @@ +//// +This module included in the following assemblies: +- distr_tracing_install/distr-tracing-deploying.adoc +//// + +[id="dist-tracing-sidecar-automatic_{context}"] += Automatically injecting sidecars + +To enable this feature, add the `sidecar.jaegertracing.io/inject` annotation to either the string `true` or to the {JaegerShortName} instance name that is returned by running `$ oc get jaegers`. +When you specify `true`, there should be only a single {JaegerShortName} instance for the same namespace as the deployment, otherwise, the Operator cannot determine which {JaegerShortName} instance to use. A specific {JaegerShortName} instance name on a deployment has a higher precedence than `true` applied on its namespace. + +The following snippet shows a simple application that will inject a sidecar, with the agent pointing to the single {JaegerShortName} instance available in the same namespace: + +.Automatic sidecar injection example +[source,yaml] +---- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: myapp + annotations: + "sidecar.jaegertracing.io/inject": "true" # <1> +spec: + selector: + matchLabels: + app: myapp + template: + metadata: + labels: + app: myapp + spec: + containers: + - name: myapp + image: acme/myapp:myversion +---- + + Set to either the string `true` or to the Jaeger instance name. + +When the sidecar is injected, the agent can then be accessed at its default location on `localhost`. diff --git a/modules/distr-tracing-sidecar-manual.adoc b/modules/distr-tracing-sidecar-manual.adoc new file mode 100644 index 000000000000..80632e9b9f38 --- /dev/null +++ b/modules/distr-tracing-sidecar-manual.adoc @@ -0,0 +1,57 @@ +//// +This module included in the following assemblies: +- distr_tracing_install/distr-tracing-deploying.adoc +//// + +[id="distr-tracing-sidecar-manual_{context}"] += Manually injecting sidecars + +For controller types other than `Deployments`, such as `StatefulSets`and `DaemonSets`, you can manually define the {JaegerShortName} agent sidecar in your specification. + +The following snippet shows the manual definition you can include in your containers section for a {JaegerShortName} agent sidecar: + +.Sidecar definition example for a `StatefulSet` +[source,yaml] +---- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: example-statefulset + namespace: example-ns + labels: + app: example-app +spec: + + spec: + containers: + - name: example-app + image: acme/myapp:myversion + ports: + - containerPort: 8080 + protocol: TCP + - name: jaeger-agent + image: registry.redhat.io/distributed-tracing/jaeger-agent-rhel7: + # The agent version must match the Operator version + imagePullPolicy: IfNotPresent + ports: + - containerPort: 5775 + name: zk-compact-trft + protocol: UDP + - containerPort: 5778 + name: config-rest + protocol: TCP + - containerPort: 6831 + name: jg-compact-trft + protocol: UDP + - containerPort: 6832 + name: jg-binary-trft + protocol: UDP + - containerPort: 14271 + name: admin-http + protocol: TCP + args: + - --reporter.grpc.host-port=dns:///jaeger-collector-headless.example-ns:14250 + - --reporter.type=grpc +---- + +The agent can then be accessed at its default location on localhost. diff --git a/modules/distr-tracing-upgrading-es5-es6.adoc b/modules/distr-tracing-upgrading-es5-es6.adoc new file mode 100644 index 000000000000..710e558a6345 --- /dev/null +++ b/modules/distr-tracing-upgrading-es5-es6.adoc @@ -0,0 +1,88 @@ +//// +This module included in the following assemblies: +- distr_tracing_install/distr-tracing-updating +//// + +[id="upgrading_es5_es6_{context}"] += Upgrading from Elasticsearch 5 to 6 + +When updating from Elasticsearch 5 to 6, you must delete your {JaegerShortName} instance, and then recreate the {JaegerShortName} instance because of an issue with certificates. Re-creating the {JaegerShortName} instance triggers the creation of a new set of certificates. If you are using persistent storage, the same volumes can be mounted for the new {JaegerShortName} instance as long as the {JaegerShortName} name and namespace for the new {JaegerShortName} instance are the same as the deleted {JaegerShortName} instance. + +.Procedure if {JaegerShortName} is installed as part of Red Hat Service Mesh + +. Determine the name of your Jaeger custom resource file. In this example, `istio-system` is the control plane namespace. ++ +[source,terminal] +---- +$ oc get jaeger -n +---- ++ +You should see something like the following: ++ +[source,terminal] +---- +NAME STATUS VERSION STRATEGY STORAGE AGE +jaeger Running 1.24.1 production elasticsearch d21h +---- ++ +. Copy the generated custom resource file into a temporary directory: ++ +[source,terminal] +---- +$ oc get jaeger jaeger -oyaml -n > /tmp/jaeger-cr.yaml +---- ++ +. Delete the {JaegerShortName} instance: ++ +[source,terminal] +---- +$ oc delete jaeger jaeger -n +---- ++ +. Recreate the {JaegerShortName} instance from your copy of the custom resource file: ++ +[source,terminal] +---- +$ oc create -f /tmp/jaeger-cr.yaml -n +---- ++ +. Delete the copy of the generated custom resource file: ++ +[source,terminal] +---- +$ rm /tmp/jaeger-cr.yaml +---- + + +.Procedure if {JaegerShortName} not installed as part of Red Hat Service Mesh + +Before you begin, create a copy of your Jaeger custom resource file. + +. Delete the {JaegerShortName} instance by deleting the custom resource file: ++ +[source,terminal] +---- +$ oc delete -f +---- ++ +For example: ++ +[source,terminal] +---- +$ oc delete -f jaeger-prod-elasticsearch.yaml +---- ++ +. Recreate your {JaegerShortName} instance from the backup copy of your custom resource file: ++ +[source,terminal] +---- +$ oc create -f +---- ++ +. Validate that your pods have restarted: ++ +[source,terminal] +---- +$ oc get pods -n -w +---- ++ diff --git a/modules/ossm-config-sampling.adoc b/modules/ossm-config-sampling.adoc index ed99928faa0e..da49adc18157 100644 --- a/modules/ossm-config-sampling.adoc +++ b/modules/ossm-config-sampling.adoc @@ -20,7 +20,7 @@ In a basic installation, `spec.tracing.sampling` is set to `10000`, which sample ==== The Envoy proxy sampling rate applies for applications that are available to a Service Mesh, and use the Envoy proxy. This sampling rate determines how much data the Envoy proxy collects and tracks. -The Jaeger remote sampling rate applies to applications that are external to the Service Mesh, and do not use the Envoy proxy, such as a database. This sampling rate determines how much data the distributed tracing system collects and stores. For more information, see xref:../../jaeger/jaeger_install/rhbjaeger-deploying.adoc#jaeger-config-sampling_jaeger-deploying[Configurating Jaeger sampling]. +The Jaeger remote sampling rate applies to applications that are external to the Service Mesh, and do not use the Envoy proxy, such as a database. This sampling rate determines how much data the distributed tracing system collects and stores. For more information, see xref:../../distr_tracing/distr_tracing_install/distr-tracing-deploying.adoc#distr-tracing-config-sampling_deploying-distributed-tracing[Distributed tracing configuration options]. ==== .Procedure diff --git a/modules/ossm-document-attributes.adoc b/modules/ossm-document-attributes.adoc index 714f912ec696..0c7c09348980 100644 --- a/modules/ossm-document-attributes.adoc +++ b/modules/ossm-document-attributes.adoc @@ -5,20 +5,30 @@ :toclevels: 4 :toc-title: :experimental: -// -// Product content attributes, that is, substitution variables in the files. -// +:DownloadURL: registry.redhat.io +:console-redhat-com: Red Hat OpenShift Cluster Manager +:kebab: image:kebab.png[title="Options menu"] + +// Service Mesh product content attributes, that is, substitution variables in the files. :product-title: OpenShift Container Platform -:ProductName: Red Hat OpenShift Service Mesh :product-dedicated: Red Hat OpenShift Dedicated +:ProductName: Red Hat OpenShift Service Mesh :ProductShortName: Service Mesh :ProductRelease: :ProductVersion: 2.1 :MaistraVersion: 2.0 :product-build: -:DownloadURL: registry.redhat.io -:cloud-redhat-com: Red Hat OpenShift Cluster Manager -:kebab: image:kebab.png[title="Options menu"] + +// Distributed Tracing product content attributes, for modules used in both products +:DTProductName: Red Hat OpenShift distributed tracing +:DTShortName: distributed tracing +:DTProductVersion: 2.0 +:JaegerName: Red Hat OpenShift distributed tracing platform +:JaegerShortName: distributed tracing platform +:JaegerVersion: 1.28.0 +:OTELName: Red Hat OpenShift distributed tracing data collection +:OTELShortName: distributed tracing data collection +:OTELVersion: 0.33.0 // // Documentation publishing attributes used in the master-docinfo.xml file // Note that the DocInfoProductName generates the URL for the product page. @@ -27,13 +37,11 @@ :DocInfoProductName: OpenShift Service Mesh :DocInfoProductNumber: 2.0 // -// Book Names: -// Defining the book names in document attributes instead of hard-coding them in -// the master.adoc files and in link references. This makes it easy to change the -// book name if necessary. -// Using the pattern ending in 'BookName' makes it easy to grep for occurrences -// throughout the topics -// + +//Book Names: +//Defining the book names in document attributes instead of hard-coding them in the master.adoc files and in link references. This makes it easy to change the book name if necessary. +//Using the pattern ending in 'BookName' makes it easy to grep for occurrences throughout the topics + +:RN_BookName: Red Hat OpenShift Service Mesh Release Notes :Install_BookName: Installing Red Hat OpenShift Service Mesh :Using_BookName: Using Red Hat OpenShift Service Mesh -:RN_BookName: Red Hat OpenShift Service Mesh Release Notes diff --git a/modules/ossm-jaeger-config-es-cleaner-v1x.adoc b/modules/ossm-jaeger-config-es-cleaner-v1x.adoc index aff48da49a54..43f09f493ab4 100644 --- a/modules/ossm-jaeger-config-es-cleaner-v1x.adoc +++ b/modules/ossm-jaeger-config-es-cleaner-v1x.adoc @@ -5,7 +5,7 @@ [id="ossm-jaeger-config-es-cleaner-v1x_{context}"] = Configuring the Elasticsearch index cleaner job -When the {ProductShortName} Operator creates the `ServiceMeshControlPlane` it also creates the custom resource (CR) for Jaeger. The Jaeger operator then uses this CR when creating Jaeger instances. +When the {ProductShortName} Operator creates the `ServiceMeshControlPlane` it also creates the custom resource (CR) for Jaeger. The {JaegerName} Operator then uses this CR when creating Jaeger instances. When using Elasticsearch storage, by default a job is created to clean old traces from it. To configure the options for this job, you edit the Jaeger custom resource (CR), to customize it for your use case. The relevant options are listed below. diff --git a/modules/ossm-rn-fixed-issues-1x.adoc b/modules/ossm-rn-fixed-issues-1x.adoc index d466aa671a33..5f399fffad15 100644 --- a/modules/ossm-rn-fixed-issues-1x.adoc +++ b/modules/ossm-rn-fixed-issues-1x.adoc @@ -38,7 +38,7 @@ The following issues been resolved in the current release: * link:https://issues.jboss.org/browse/MAISTRA-1001[MAISTRA-1001] Closing HTTP/2 connections could lead to segmentation faults in `istio-proxy`. -* link:https://issues.jboss.org/browse/MAISTRA-932[MAISTRA-932] Added the `requires` metadata to add dependency relationship between Jaeger operator and OpenShift Elasticsearch Operator. Ensures that when the Jaeger operator is installed, it automatically deploys the OpenShift Elasticsearch Operator if it is not available. +* link:https://issues.jboss.org/browse/MAISTRA-932[MAISTRA-932] Added the `requires` metadata to add dependency relationship between Jaeger Operator and OpenShift Elasticsearch Operator. Ensures that when the Jaeger Operator is installed, it automatically deploys the OpenShift Elasticsearch Operator if it is not available. * link:https://issues.jboss.org/browse/MAISTRA-862[MAISTRA-862] Galley dropped watches and stopped providing configuration to other components after many namespace deletions and re-creations. diff --git a/serverless/monitor/serverless-tracing.adoc b/serverless/monitor/serverless-tracing.adoc index 3067916ebfc8..26579893d119 100644 --- a/serverless/monitor/serverless-tracing.adoc +++ b/serverless/monitor/serverless-tracing.adoc @@ -15,7 +15,7 @@ The units of work might be executed in different processes or hosts. Developers can visualize call flows in large architectures with distributed tracing. which is useful for understanding serialization, parallelism, and sources of latency. -For more information about Jaeger, see xref:../../jaeger/jaeger_arch/rhbjaeger-architecture.adoc#rhbjaeger-architecture[Jaeger architecture] and xref:../../jaeger/jaeger_install/rhbjaeger-installation.adoc#rhbjaeger-installation[Installing Jaeger]. +For more information about distributed tracing, see xref:../../distr_tracing/distr_tracing_arch/distr-tracing-architecture.adoc#distributed-tracing-architecture[distributed tracing architecture] and xref:../../distr_tracing/distr_tracing_install/distr-tracing-installing.adoc#install-distributed-tracing[Installing distributed tracing]. // Serverless specific modules include::modules/serverless-jaeger-config.adoc[leveloffset=+1] diff --git a/service_mesh/v2x/ossm-observability.adoc b/service_mesh/v2x/ossm-observability.adoc index 98dfabe0b7be..2a4bda218947 100644 --- a/service_mesh/v2x/ossm-observability.adoc +++ b/service_mesh/v2x/ossm-observability.adoc @@ -23,7 +23,7 @@ include::modules/ossm-config-sampling.adoc[leveloffset=+2] include::modules/ossm-config-external-jaeger.adoc[leveloffset=+2] -For more information about configuring Jaeger, see the xref:../../jaeger/jaeger_install/rhbjaeger-deploying.adoc#jaeger-deploy-default_jaeger-deploying[Jaeger documentation]. +For more information about configuring Jaeger, see the xref:../../distr_tracing/distr_tracing_install/distr-tracing-deploying.adoc#distr-tracing-deploy-default_deploying-distributed-tracing[distributed tracing documentation]. include::modules/ossm-access-grafana.adoc[leveloffset=+1] diff --git a/service_mesh/v2x/ossm-reference-jaeger.adoc b/service_mesh/v2x/ossm-reference-jaeger.adoc index df2d36ba4952..b3c808ca27a0 100644 --- a/service_mesh/v2x/ossm-reference-jaeger.adoc +++ b/service_mesh/v2x/ossm-reference-jaeger.adoc @@ -25,9 +25,9 @@ include::modules/jaeger-config-sampling.adoc[leveloffset=+2] include::modules/jaeger-config-storage.adoc[leveloffset=+2] -For more information about configuring Elasticsearch with {product-title}, see xref:../../logging/config/cluster-logging-log-store.adoc[Configuring the log store] or xref:../../jaeger/jaeger_install/rhbjaeger-deploying.adoc[Configuring and deploying Jaeger]. +For more information about configuring Elasticsearch with {product-title}, see xref:../../logging/config/cluster-logging-log-store.adoc[Configuring the log store] or xref:../../distr_tracing/distr_tracing_install/distr-tracing-deploying.adoc[Configuring and deploying distributed tracing]. -For information about connecting to an external Elasticsearch instance, see xref:../../jaeger/jaeger_install/rhbjaeger-deploying.adoc#jaeger-config-external-es_jaeger-deploying[Connecting to an existing Elasticsearch instance]. +//TO DO For information about connecting to an external Elasticsearch instance, see xref:../../distr_tracing/distr_tracing_install/distr-tracing-deploying.adoc#jaeger-config-external-es_jaeger-deploying[Connecting to an existing Elasticsearch instance]. include::modules/jaeger-config-query.adoc[leveloffset=+2] diff --git a/welcome/oke_about.adoc b/welcome/oke_about.adoc index 7030e8281c76..9462eb33bcf9 100644 --- a/welcome/oke_about.adoc +++ b/welcome/oke_about.adoc @@ -221,270 +221,116 @@ The following table is a summary of the feature availability in {oke} and .Features in {oke} and {product-title} |=== -2+| |{oke} |{product-title} - -2+h|Life Cycle and Kubernetes -h| -h| - -.11+| -| Fully Automated Installers (IPI) -| Yes -| Yes - -| Customizable Installers (UPI) -| Yes -| Yes - -| Disconnect Installation -| Yes -| Yes - -| {op-system-base} or {op-system} entitlement -| Yes -| Yes - -| Automated Operating System Management (CoreOS) -| Yes -| Yes - -| Existing RHEL manual attach to cluster (BYO) -| Yes -| Yes - -| CRIO Runtime -| Yes -| Yes - -| OpenShift Virtualization -| Yes -| Yes - -| Enterprise Secured Kubernetes -| Yes -| Yes - -| Auth Integrations, RBAC, SCC, Multi-Tenancy Admission Controller -| Yes -| Yes - -| Kubectl and oc automated command line -| Yes -| Yes - -2+h|Day 2 Management -h| -h| - -.13+| -| Operator Enabled Platform (CVO) -| Yes -| Yes - -| Operator Lifecycle Manager (OLM) -| Yes -| Yes - -| Cluster Monitoring (Prometheus) -| Yes -| Yes - -| User Workload Monitoring -| -| Yes - -| Device Manager (i.e., GPU) -| Yes -| Yes - -| EFK Logging (ElasticSearch and Kibana) -| -| Yes - -| Log Pipelines (fluentd forwarding) -| Yes -| Yes - -| Administrator Web Console -| Yes -| Yes - -| Service Mesh (Kiali, Jaeger, and OpenTracing) -| -| Yes - -| Over the Air Smart Upgrades -| Yes -| Yes - -| Telemeter and Insights Connected Experience -| Yes -| Yes - -| Metering and Cost Management SaaS Service -| -| Yes - -| OCM SaaS Service -| Yes -| Yes - -2+h|Network and Storage -h| -h| - -.15+| -| OVS and OVN SDN -| Yes -| Yes - -| HAProxy Ingress Controller -| Yes -| Yes - -| Kourier Ingress Controller -| -| Yes - -| OpenStack Kuryr Integration -| Yes -| Yes - -| Ingress Cluster-wide Firewall -| Yes -| Yes - -| Egress Pod and Namespace Granular Control -| Yes -| Yes - -| Ingress Non-Standard Ports -| Yes -| Yes - -| Service Mesh (Istio and Envoy) -| -| Yes - -| Multus and Available Multus Plugins -| Yes -| Yes - -| Network Policies -| Yes -| Yes - -| IPv6 Single and Dual Stack (primary interface) -| -| - -| IPv6 Single and Dual Stack (Multus secondary interfaces) -| Yes -| Yes - -| CNI Plugin ISV Compatibility -| Yes -| Yes - -| OpenShift Container Storage Sub Compatibility (not included in OCP or {oke}) -| Yes -| Yes - -| CSI Plugin ISV Compatibility -| Yes -| Yes - -2+h|Application Services -h| -h| - -.10+| -| RHT Middleware Bundles Sub Compatibility (not included in OCP) -| -| Yes - -| IBM Cloud Pak Sub Compatibility (not included in OCP) -| -| Yes - -| RHT and IBM middleware a la carte purchases (not included in OCP) -| Yes -| Yes - -| ISV or Partner Operator and Container Compatibility (not included in OCP or {oke}) -| Yes -| Yes - -| Embedded OperatorHub -| Yes -| Yes - -| Embedded Marketplace -| Yes -| Yes - -| Developer Application Catalog -| -| Yes - -| Quay Compatibility (not included) -| Yes -| Yes - -| RHEL Software Collections and RHT SSO Common Service (included) -| Yes -| Yes - -| OpenShift Serverless -| -| Yes - -2+h|Developer Experience -h| -h| - -.10+| -| Embedded Registry -| Yes -| Yes - -| CodeReady Containers -| -| Yes - -| CodeReady Workspaces -| -| Yes - -| Developer Web Console -| -| Yes - -| Helm -| Yes -| Yes - -| OpenShift Pipelines (Jenkins and Tekton) -| -| Yes - -| odo -| -| Yes - -| Source to Image and Tekton Builders -| -| Yes - -| OpenShift Serverless FaaS -| -| Yes - -| IDE Integrations -| -| Yes - - +| Feature | {oke} | {product-title} | Operator name +| Fully Automated Installers (IPI) | Included | Included | N/A +| Customizable Installers (UPI) | Included | Included | N/A +| Disconnected Installation | Included | Included | N/A +| {op-system-base-full} or {op-system-first} entitlement | Included | Included | N/A +| Existing RHEL manual attach to cluster (BYO) | Included | Included | N/A +| CRIO Runtime | Included | Included | N/A +| Over the Air Smart Upgrades and Operating System ({op-system}) Management | Included | Included | N/A +| Enterprise Secured Kubernetes | Included | Included | N/A +| Kubectl and `oc` automated command line | Included | Included | N/A +| Auth Integrations, RBAC, SCC, Multi-Tenancy Admission Controller | Included | Included | N/A +| Operator Lifecycle Manager (OLM) | Included | Included | N/A +| Administrator web console | Included | Included | N/A +| OpenShift Virtualization | Included | Included | OpenShift Virtualization Operator +| Compliance Operator provided by Red Hat | Included | Included | Compliance Operator +| File Integrity Operator | Included | Included | File Integrity Operator +| Gatekeeper Operator | Included | Included | Gatekeeper Operator +| Klusterlet provided by Red Hat | Included | Included | N/A +| Kube Descheduler Operator provided by Red Hat | Included | Included | Kube Descheduler Operator +| Local Storage provided by Red Hat | Included | Included | Local Storage Operator +| Node Feature Discovery provided by Red Hat | Included | Included | Node Feature Discovery Operator +| Performance Add-on Operator | Included | Included | Performance Add-on Operator +| PTP Operator provided by Red Hat | Included | Included | PTP Operator +| Service Telemetry Operator provided by Red Hat | Included | Included | Service Telemetry Operator +| SR-IOV Network Operator | Included | Included | SR-IOV Network Operator +| Vertical Pod Autoscaler | Included | Included | Vertical Pod Autoscaler +| Cluster Monitoring (Prometheus) | Included | Included | Cluster Monitoring +| Device Manager (for example, GPU) | Included | Included | N/A +| Log Forwarding (with fluentd) | Included | Included | Red Hat OpenShift Logging Operator (for log forwarding with fluentd) +| Telemeter and Insights Connected Experience | Included | Included | N/A +s| Feature s| {oke} s| {product-title} s| Operator name +| OpenShift Cloud Manager(OCM) SaaS Service | Included | Included | N/A +| OVS and OVN SDN | Included | Included | N/A +| HAProxy Ingress Controller | Included | Included | N/A +| {rh-openstack-first} Kuryr Integration | Included | Included | N/A +| Ingress Cluster-wide Firewall | Included | Included | N/A +| Egress Pod and Namespace Granular Control | Included | Included | N/A +| Ingress Non-Standard Ports | Included | Included | N/A +| Multus and Available Multus Plugins | Included | Included | N/A +| Network Policies | Included | Included | N/A +| IPv6 Single and Dual Stack | Included | Included | N/A +| CNI Plugin ISV Compatibility | Included | Included | N/A +| CSI Plugin ISV Compatibility | Included | Included | N/A +| RHT and IBM middleware à la carte purchases (not included in {product-title} or {oke}) | Included | Included | N/A +| ISV or Partner Operator and Container Compatibility (not included in {product-title} or {oke}) | Included | Included | N/A +| Embedded OperatorHub | Included | Included | N/A +| Embedded Marketplace | Included | Included | N/A +| Quay Compatibility (not included) | Included | Included | N/A +| RHEL Software Collections and RHT SSO Common Service (included) | Included | Included | N/A +| Embedded Registry | Included | Included | N/A +| Helm | Included | Included | N/A +| User Workload Monitoring | Not Included | Included | N/A +| Metering and Cost Management SaaS Service | Not Included | Included | N/A +| Platform Logging | Not Included | Included | Red Hat OpenShift Logging Operator +| OpenShift Elasticsearch Operator provided by Red Hat | Not Included | Cannot be run standalone | N/A +| Developer Web Console | Not Included | Included | N/A +| Developer Application Catalog | Not Included | Included | N/A +| Source to Image and Builder Automation (Tekton) | Not Included | Included | N/A +| OpenShift Service Mesh | Not Included | Included | OpenShift Service Mesh Operator +| Service Binding Operator | Not Included | Included | Service Binding Operator +s| Feature s| {oke} s| {product-title} s| Operator name +| Red Hat OpenShift Serverless | Not Included | Included | OpenShift Serverless Operator +| Web Terminal provided by Red Hat | Not Included | Included | Web Terminal Operator +| Jenkins Operator provided by Red Hat | Not Included | Included | Jenkins Operator +| Red Hat OpenShift Pipelines Operator | Not Included | Included | OpenShift Pipelines Operator +| Embedded Component of IBM Cloud Pak and RHT MW Bundles | Not Included | Included | N/A +| Red Hat OpenShift GitOps | Not Included | Included | OpenShift GitOps +| Red Hat CodeReady Workspaces | Not Included | Included | CodeReady Workspaces +| Red Hat CodeReady Containers | Not Included | Included | N/A +| Quay Bridge Operator provided by Red Hat | Not Included | Included | Quay Bridge Operator +| Quay Container Security provided by Red Hat | Not Included | Included | Quay Operator +| Red Hat OpenShift distributed tracing platform | Not Included | Included | Red Hat OpenShift distributed tracing platform Operator +| Red Hat OpenShift Kiali | Not Included | Included | Kiali Operator +| Metering provided by Red Hat (deprecated) | Not Included | Included | N/A +| Migration Toolkit for Containers Operator | Not Included | Included | Migration Toolkit for Containers Operator +| Cost management for OpenShift | Not included | Included | N/A +| Red Hat JBoss Web Server | Not included | Included | JWS Operator +| Red Hat Build of Quarkus | Not included | Included | N/A +| Kourier Ingress Controller | Not included | Included | N/A +| RHT Middleware Bundles Sub Compatibility (not included in {product-title}) | Not included | Included | N/A +| IBM Cloud Pak Sub Compatibility (not included in {product-title}) | Not included | Included | N/A +| OpenShift Do (`odo`) | Not included | Included | N/A +| Source to Image and Tekton Builders | Not included | Included | N/A +| OpenShift Serverless FaaS | Not included | Included | N/A +| IDE Integrations | Not included | Included | N/A +| Windows Machine Config Operator | Community Windows Machine Config Operator included - no subscription required | Red Hat Windows Machine Config Operator included - Requires separate subscription | Windows Machine Config Operator +| Red Hat Quay | Not Included - Requires separate subscription | Not Included - Requires separate subscription | Quay Operator +| Red Hat Advanced Cluster Management | Not Included - Requires separate subscription | Not Included - Requires separate subscription | Advanced Cluster Management for Kubernetes +| Red Hat Advanced Cluster Security | Not Included - Requires separate subscription | Not Included - Requires separate subscription | N/A +| OpenShift Container Storage | Not Included - Requires separate subscription | Not Included - Requires separate subscription | OpenShift Container Storage +s| Feature s| {oke} s| {product-title} s| Operator name +| Ansible Automation Platform Resource Operator | Not Included - Requires separate subscription | Not Included - Requires separate subscription | Ansible Automation Platform Resource Operator +| Business Automation provided by Red Hat | Not Included - Requires separate subscription | Not Included - Requires separate subscription | Business Automation Operator +| Data Grid provided by Red Hat | Not Included - Requires separate subscription | Not Included - Requires separate subscription | Data Grid Operator +| Red Hat Integration provided by Red Hat | Not Included - Requires separate subscription | Not Included - Requires separate subscription | Red Hat Integration Operator +| Red Hat Integration - 3Scale provided by Red Hat | Not Included - Requires separate subscription | Not Included - Requires separate subscription | 3scale +| Red Hat Integration - 3Scale APICast gateway provided by Red Hat | Not Included - Requires separate subscription | Not Included - Requires separate subscription | 3scale APIcast +| Red Hat Integration - AMQ Broker | Not Included - Requires separate subscription | Not Included - Requires separate subscription | AMQ Broker +| Red Hat Integration - AMQ Broker LTS | Not Included - Requires separate subscription | Not Included - Requires separate subscription | +| Red Hat Integration - AMQ Interconnect | Not Included - Requires separate subscription | Not Included - Requires separate subscription | AMQ Interconnect +| Red Hat Integration - AMQ Online | Not Included - Requires separate subscription | Not Included - Requires separate subscription | +| Red Hat Integration - AMQ Streams | Not Included - Requires separate subscription | Not Included - Requires separate subscription | AMQ Streams +| Red Hat Integration - Camel K | Not Included - Requires separate subscription | Not Included - Requires separate subscription | Camel K +| Red Hat Integration - Fuse Console | Not Included - Requires separate subscription | Not Included - Requires separate subscription | Fuse Console +| Red Hat Integration - Fuse Online | Not Included - Requires separate subscription | Not Included - Requires separate subscription | Fuse Online +| Red Hat Integration - Service Registry Operator | Not Included - Requires separate subscription | Not Included - Requires separate subscription | Service Registry +| API Designer provided by Red Hat | Not Included - Requires separate subscription | Not Included - Requires separate subscription | API Designer +| JBoss EAP provided by Red Hat | Not Included - Requires separate subscription | Not Included - Requires separate subscription | JBoss EAP +| JBoss Web Server provided by Red Hat | Not Included - Requires separate subscription | Not Included - Requires separate subscription | JBoss Web Server +| Smart Gateway Operator | Not Included - Requires separate subscription | Not Included - Requires separate subscription | Smart Gateway Operator |===