diff --git a/_attributes/common-attributes.adoc b/_attributes/common-attributes.adoc index 919a7cd92d..44f88e1676 100644 --- a/_attributes/common-attributes.adoc +++ b/_attributes/common-attributes.adoc @@ -116,18 +116,18 @@ endif::[] //distributed tracing :DTProductName: Red Hat OpenShift distributed tracing platform :DTShortName: distributed tracing platform -:DTProductVersion: 2.9 +:DTProductVersion: 3.0 :JaegerName: Red Hat OpenShift distributed tracing platform (Jaeger) :JaegerShortName: distributed tracing platform (Jaeger) -:JaegerVersion: 1.47.0 -:OTELName: Red Hat OpenShift distributed tracing data collection -:OTELShortName: distributed tracing data collection -:OTELOperator: Red Hat OpenShift distributed tracing data collection Operator -:OTELVersion: 0.81.0 +:JaegerVersion: 1.51.0 +:OTELName: Red Hat build of OpenTelemetry +:OTELShortName: Red Hat build of OpenTelemetry +:OTELOperator: Red Hat build of OpenTelemetry Operator +:OTELVersion: 0.89.0 :TempoName: Red Hat OpenShift distributed tracing platform (Tempo) :TempoShortName: distributed tracing platform (Tempo) :TempoOperator: Tempo Operator -:TempoVersion: 2.1.1 +:TempoVersion: 2.3.0 //logging :logging-title: logging subsystem for Red Hat OpenShift :logging-title-uc: Logging subsystem for Red Hat OpenShift diff --git a/_topic_maps/_topic_map.yml b/_topic_maps/_topic_map.yml index 61bd8b37ea..8d74bd306f 100644 --- a/_topic_maps/_topic_map.yml +++ b/_topic_maps/_topic_map.yml @@ -2649,6 +2649,90 @@ Topics: - Name: Configuring the Cluster Observability Operator to monitor a service File: configuring-the-cluster-observability-operator-to-monitor-a-service --- +Name: Distributed tracing +Dir: distr_tracing +Distros: openshift-enterprise +Topics: +- Name: Distributed tracing release notes + Dir: distr_tracing_rn + Topics: + - Name: "3.0" + File: distr-tracing-rn-3-0 + - Name: "2.9.2" + File: distr-tracing-rn-2-9-2 + - Name: "2.9.1" + File: distr-tracing-rn-2-9-1 + - Name: "2.9" + File: distr-tracing-rn-2-9 + - Name: "2.8" + File: distr-tracing-rn-2-8 + - Name: "2.7" + File: distr-tracing-rn-2-7 + - Name: "2.6" + File: distr-tracing-rn-2-6 + - Name: "2.5" + File: distr-tracing-rn-2-5 + - Name: "2.4" + File: distr-tracing-rn-2-4 + - Name: "2.3" + File: distr-tracing-rn-2-3 + - Name: "2.2" + File: distr-tracing-rn-2-2 + - Name: "2.1" + File: distr-tracing-rn-2-1 + - Name: "2.0" + File: distr-tracing-rn-2-0 +- Name: Distributed tracing architecture + Dir: distr_tracing_arch + Topics: + - Name: Distributed tracing architecture + File: distr-tracing-architecture +- Name: Distributed tracing platform (Jaeger) + Dir: distr_tracing_jaeger + Topics: + - Name: Installation + File: distr-tracing-jaeger-installing + - Name: Configuration + File: distr-tracing-jaeger-configuring + - Name: Updating + File: distr-tracing-jaeger-updating + - Name: Removal + File: distr-tracing-jaeger-removing +- Name: Distributed tracing platform (Tempo) + Dir: distr_tracing_tempo + Topics: + - Name: Installation + File: distr-tracing-tempo-installing + - Name: Configuration + File: distr-tracing-tempo-configuring + - Name: Updating + File: distr-tracing-tempo-updating + - Name: Removal + File: distr-tracing-tempo-removing +--- +Name: Red Hat build of OpenTelemetry +Dir: otel +Distros: openshift-enterprise +Topics: +- Name: Release notes + File: otel-release-notes +- Name: Installation + File: otel-installing +- Name: Collector configuration + File: otel-configuring +- Name: Instrumentation + File: otel-instrumentation +- Name: Use + File: otel-using +- Name: Troubleshooting + File: otel-troubleshooting +- Name: Migration + File: otel-migrating +- Name: Updating + File: otel-updating +- Name: Removal + File: otel-removing +--- Name: Network Observability Dir: network_observability Distros: openshift-enterprise,openshift-origin @@ -3633,80 +3717,6 @@ Topics: - Name: Removing Service Mesh File: removing-ossm --- -Name: Distributed tracing -Dir: distr_tracing -Distros: openshift-enterprise -Topics: -- Name: Distributed tracing release notes - Dir: distr_tracing_rn - Topics: - - Name: "2.9.2" - File: distr-tracing-rn-2-9-2 - - Name: "2.9.1" - File: distr-tracing-rn-2-9-1 - - Name: "2.9" - File: distr-tracing-rn-2-9 - - Name: "2.8" - File: distr-tracing-rn-2-8 - - Name: "2.7" - File: distr-tracing-rn-2-7 - - Name: "2.6" - File: distr-tracing-rn-2-6 - - Name: "2.5" - File: distr-tracing-rn-2-5 - - Name: "2.4" - File: distr-tracing-rn-2-4 - - Name: "2.3" - File: distr-tracing-rn-2-3 - - Name: "2.2" - File: distr-tracing-rn-2-2 - - Name: "2.1" - File: distr-tracing-rn-2-1 - - Name: "2.0" - File: distr-tracing-rn-2-0 -- Name: Distributed tracing architecture - Dir: distr_tracing_arch - Topics: - - Name: Distributed tracing architecture - File: distr-tracing-architecture -- Name: Distributed tracing platform (Jaeger) - Dir: distr_tracing_jaeger - Topics: - - Name: Installation - File: distr-tracing-jaeger-installing - - Name: Configuration - File: distr-tracing-jaeger-configuring - - Name: Updating - File: distr-tracing-jaeger-updating - - Name: Removal - File: distr-tracing-jaeger-removing -- Name: Distributed tracing platform (Tempo) - Dir: distr_tracing_tempo - Topics: - - Name: Installation - File: distr-tracing-tempo-installing - - Name: Configuration - File: distr-tracing-tempo-configuring - - Name: Updating - File: distr-tracing-tempo-updating - - Name: Removal - File: distr-tracing-tempo-removing -- Name: Distributed tracing data collection (OpenTelemetry) - Dir: distr_tracing_otel - Topics: - - Name: Installation - File: distr-tracing-otel-installing - - Name: Configuration - File: distr-tracing-otel-configuring - - Name: Use - File: distr-tracing-otel-using - - Name: Troubleshooting - File: distr-tracing-otel-troubleshooting - - Name: Migration - File: distr-tracing-otel-migrating - - Name: Removal - File: distr-tracing-otel-removing ---- Name: Virtualization Dir: virt Distros: openshift-enterprise,openshift-origin diff --git a/_unused_topics/distr-tracing-deploy-otel-collector.adoc b/_unused_topics/distr-tracing-deploy-otel-collector.adoc deleted file mode 100644 index 2542e31c7b..0000000000 --- a/_unused_topics/distr-tracing-deploy-otel-collector.adoc +++ /dev/null @@ -1,128 +0,0 @@ -//// -This module included in the following assemblies: -- distr_tracing_install/distr-tracing-deploying.adoc -//// - -:_mod-docs-content-type: PROCEDURE -[id="distr-tracing-deploy-otel-collector_{context}"] -= Deploying distributed tracing data collection - -The custom resource definition (CRD) defines the configuration used when you deploy an instance of {OTELName}. - -.Prerequisites - -* The {OTELName} Operator has been installed. -//* You have reviewed the instructions for how to customize the deployment. -* You have access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Log in to the OpenShift web console as a user with the `cluster-admin` role. - -. Create a new project, for example `tracing-system`. -+ -[NOTE] -==== -If you are installing distributed tracing as part of Service Mesh, the {DTShortName} resources must be installed in the same namespace as the `ServiceMeshControlPlane` resource, for example `istio-system`. -==== -+ -.. Navigate to *Home* -> *Projects*. - -.. Click *Create Project*. - -.. Enter `tracing-system` in the *Name* field. - -.. Click *Create*. - -. Navigate to *Operators* -> *Installed Operators*. - -. If necessary, select `tracing-system` from the *Project* menu. You might have to wait a few moments for the Operators to be copied to the new project. - -. Click the *{OTELName} Operator*. On the *Details* tab, under *Provided APIs*, the Operator provides a single link. - -. Under *OpenTelemetryCollector*, click *Create Instance*. - -. On the *Create OpenTelemetry Collector* page, to install using the defaults, click *Create* to create the {OTELShortName} instance. - -. On the *OpenTelemetryCollectors* page, click the name of the {OTELShortName} instance, for example, `opentelemetrycollector-sample`. - -. On the *Details* page, click the *Resources* tab. Wait until the pod has a status of "Running" before continuing. - -[id="distr-tracing-deploy-otel-collector-cli_{context}"] -= Deploying {OTELShortName} from the CLI - -Follow this procedure to create an instance of {OTELShortName} from the command line. - -.Prerequisites - -* The {OTELName} Operator has been installed and verified. -+ -//* You have reviewed the instructions for how to customize the deployment. -+ -* You have access to the OpenShift CLI (`oc`) that matches your {product-title} version. -* You have access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Log in to the {product-title} CLI as a user with the `cluster-admin` role. -+ -[source,terminal] ----- -$ oc login https://:8443 ----- - -. Create a new project named `tracing-system`. -+ -[source,terminal] ----- -$ oc new-project tracing-system ----- - -. Create a custom resource file named `jopentelemetrycollector-sample.yaml` that contains the following text: -+ -.Example opentelemetrycollector.yaml -[source,yaml] ----- - apiVersion: opentelemetry.io/v1alpha1 - kind: OpenTelemetryCollector - metadata: - name: opentelemetrycollector-sample - namespace: openshift-operators - spec: - image: >- - registry.redhat.io/rhosdt/opentelemetry-collector-rhel8@sha256:61934ea5793c55900d09893e8f8b1f2dbd2e712faba8e97684e744691b29f25e - config: | - receivers: - jaeger: - protocols: - grpc: - exporters: - logging: - service: - pipelines: - traces: - receivers: [jaeger] - exporters: [logging] ----- - -. Run the following command to deploy {JaegerShortName}: -+ -[source,terminal] ----- -$ oc create -n tracing-system -f opentelemetrycollector.yaml ----- - -. Run the following command to watch the progress of the pods during the installation process: -+ -[source,terminal] ----- -$ oc get pods -n tracing-system -w ----- -+ -After the installation process has completed, you should see output similar to the following example: -+ -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -opentelemetrycollector-cdff7897b-qhfdx 2/2 Running 0 24s ----- diff --git a/distr_tracing/distr_tracing_jaeger/distr-tracing-jaeger-installing.adoc b/distr_tracing/distr_tracing_jaeger/distr-tracing-jaeger-installing.adoc index ae40acff0f..d6d30b1ddf 100644 --- a/distr_tracing/distr_tracing_jaeger/distr-tracing-jaeger-installing.adoc +++ b/distr_tracing/distr_tracing_jaeger/distr-tracing-jaeger-installing.adoc @@ -6,6 +6,11 @@ include::_attributes/common-attributes.adoc[] toc::[] +[WARNING] +==== +Jaeger is deprecated in Red Hat OpenShift distributed tracing 3.0. See the xref:../distr_tracing_rn/distr-tracing-rn-3-0.adoc[release notes] for more information. +==== + You can install {DTProductName} on {product-title} in either of two ways: * You can install {DTProductName} as part of {SMProductName}. Distributed tracing is included by default in the Service Mesh installation. To install {DTProductName} as part of a service mesh, follow the xref:../../service_mesh/v2x/preparing-ossm-installation.adoc#preparing-ossm-installation[Red Hat Service Mesh Installation] instructions. You must install {DTProductName} in the same namespace as your service mesh, that is, the `ServiceMeshControlPlane` and the {DTProductName} resources must be in the same namespace. diff --git a/distr_tracing/distr_tracing_jaeger/distr-tracing-jaeger-updating.adoc b/distr_tracing/distr_tracing_jaeger/distr-tracing-jaeger-updating.adoc index d12c2cd166..bc978b2e1a 100644 --- a/distr_tracing/distr_tracing_jaeger/distr-tracing-jaeger-updating.adoc +++ b/distr_tracing/distr_tracing_jaeger/distr-tracing-jaeger-updating.adoc @@ -6,6 +6,11 @@ include::_attributes/common-attributes.adoc[] toc::[] +[WARNING] +==== +Jaeger is deprecated in Red Hat OpenShift distributed tracing 3.0. See the xref:../distr_tracing_rn/distr-tracing-rn-3-0.adoc[release notes] for more information. +==== + Operator Lifecycle Manager (OLM) controls the installation, upgrade, and role-based access control (RBAC) of Operators in a cluster. The OLM runs by default in {product-title}. OLM queries for available Operators as well as upgrades for installed Operators. @@ -22,4 +27,4 @@ If you have not already updated your OpenShift Elasticsearch Operator as describ * xref:../../operators/understanding/olm/olm-understanding-olm.adoc#olm-understanding-olm[Operator Lifecycle Manager concepts and resources] * xref:../../operators/admin/olm-upgrading-operators.adoc#olm-upgrading-operators[Updating installed Operators] -* xref:../../logging/cluster-logging-upgrading.adoc#cluster-logging-upgrading_cluster-logging-upgrading[Updating OpenShift Logging] \ No newline at end of file +* xref:../../logging/cluster-logging-upgrading.adoc#cluster-logging-upgrading_cluster-logging-upgrading[Updating OpenShift Logging] diff --git a/distr_tracing/distr_tracing_otel/_attributes b/distr_tracing/distr_tracing_otel/_attributes deleted file mode 120000 index 20cc1dcb77..0000000000 --- a/distr_tracing/distr_tracing_otel/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_otel/distr-tracing-otel-configuring.adoc b/distr_tracing/distr_tracing_otel/distr-tracing-otel-configuring.adoc deleted file mode 100644 index 54a75b29ba..0000000000 --- a/distr_tracing/distr_tracing_otel/distr-tracing-otel-configuring.adoc +++ /dev/null @@ -1,18 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="distr-tracing-otel-configuring"] -= Configuring and deploying the {OTELShortName} -include::_attributes/common-attributes.adoc[] -:context: distr-tracing-otel-configuring - -toc::[] - -The {OTELName} Operator uses a custom resource definition (CRD) file that defines the architecture and configuration settings to be used when creating and deploying the {OTELShortName} resources. You can install the default configuration or modify the file. - -include::modules/distr-tracing-otel-config-collector.adoc[leveloffset=+1] - -include::modules/distr-tracing-otel-config-send-metrics-monitoring-stack.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_deploy-otel"] -== Additional resources -* xref:../../monitoring/enabling-monitoring-for-user-defined-projects.adoc#enabling-monitoring-for-user-defined-projects[Enabling monitoring for user-defined projects] diff --git a/distr_tracing/distr_tracing_otel/distr-tracing-otel-installing.adoc b/distr_tracing/distr_tracing_otel/distr-tracing-otel-installing.adoc deleted file mode 100644 index 2ad5faa7e3..0000000000 --- a/distr_tracing/distr_tracing_otel/distr-tracing-otel-installing.adoc +++ /dev/null @@ -1,28 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="install-distributed-tracing-otel"] -= Installing the {OTELShortName} -include::_attributes/common-attributes.adoc[] -:context: install-distributed-tracing-otel - -toc::[] - -:FeatureName: The {OTELOperator} -include::snippets/technology-preview.adoc[leveloffset=+1] - -Installing the {OTELShortName} involves the following steps: - -. Installing the {OTELOperator}. -. Creating a namespace for an OpenTelemetry Collector instance. -. Creating an `OpenTelemetryCollector` custom resource to deploy the OpenTelemetry Collector instance. - -include::modules/distr-tracing-otel-install-web-console.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_dist-tracing-otel-installing"] -== Additional resources -* xref:../../post_installation_configuration/preparing-for-users.adoc#creating-cluster-admin_post-install-preparing-for-users[Creating a cluster admin] -* link:https://operatorhub.io/[OperatorHub.io] -* xref:../../web_console/web-console.adoc#web-console[Accessing the web console] -* xref:../../operators/admin/olm-adding-operators-to-cluster.adoc#olm-installing-from-operatorhub-using-web-console_olm-adding-operators-to-a-cluster[Installing from OperatorHub using the web console] -* xref:../../operators/user/olm-creating-apps-from-installed-operators.adoc#olm-creating-apps-from-installed-operators[Creating applications from installed Operators] -* xref:../../cli_reference/openshift_cli/getting-started-cli.adoc#getting-started-cli[Getting started with the OpenShift CLI] diff --git a/distr_tracing/distr_tracing_otel/distr-tracing-otel-troubleshooting.adoc b/distr_tracing/distr_tracing_otel/distr-tracing-otel-troubleshooting.adoc deleted file mode 100644 index 1f90fbb8ee..0000000000 --- a/distr_tracing/distr_tracing_otel/distr-tracing-otel-troubleshooting.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="distr-tracing-otel-troubleshoot"] -= Troubleshooting the {OTELShortName} -include::_attributes/common-attributes.adoc[] -:context: distr-tracing-otel-troubleshoot - -toc::[] - -The OpenTelemetry Collector offers multiple ways to measure its health as well as investigate data ingestion issues. - -include::modules/distr-tracing-otel-troubleshoot-logs.adoc[leveloffset=+1] -include::modules/distr-tracing-otel-troubleshoot-metrics.adoc[leveloffset=+1] -include::modules/distr-tracing-otel-troubleshoot-logging-exporter.adoc[leveloffset=+1] diff --git a/distr_tracing/distr_tracing_otel/distr-tracing-otel-using.adoc b/distr_tracing/distr_tracing_otel/distr-tracing-otel-using.adoc deleted file mode 100644 index b09501c502..0000000000 --- a/distr_tracing/distr_tracing_otel/distr-tracing-otel-using.adoc +++ /dev/null @@ -1,18 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="distr-tracing-otel-temp"] -= Using the {OTELShortName} -include::_attributes/common-attributes.adoc[] -:context: distr-tracing-otel-temp - -toc::[] - -include::modules/distr-tracing-otel-forwarding.adoc[leveloffset=+1] - -[id="distr-tracing-otel-send-traces-and-metrics-to-otel-collector_{context}"] -== Sending traces and metrics to the OpenTelemetry Collector - -Sending tracing and metrics to the OpenTelemetry Collector is possible with or without sidecar injection. - -include::modules/distr-tracing-otel-send-traces-and-metrics-to-otel-collector-with-sidecar.adoc[leveloffset=+2] - -include::modules/distr-tracing-otel-send-traces-and-metrics-to-otel-collector-without-sidecar.adoc[leveloffset=+2] diff --git a/distr_tracing/distr_tracing_otel/images b/distr_tracing/distr_tracing_otel/images deleted file mode 120000 index 847b03ed05..0000000000 --- a/distr_tracing/distr_tracing_otel/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_otel/modules b/distr_tracing/distr_tracing_otel/modules deleted file mode 120000 index 36719b9de7..0000000000 --- a/distr_tracing/distr_tracing_otel/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_otel/snippets b/distr_tracing/distr_tracing_otel/snippets deleted file mode 120000 index 5a3f5add14..0000000000 --- a/distr_tracing/distr_tracing_otel/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_rn/distr-tracing-rn-3-0.adoc b/distr_tracing/distr_tracing_rn/distr-tracing-rn-3-0.adoc new file mode 100644 index 0000000000..63d903536b --- /dev/null +++ b/distr_tracing/distr_tracing_rn/distr-tracing-rn-3-0.adoc @@ -0,0 +1,95 @@ +:_mod-docs-content-type: ASSEMBLY +include::_attributes/common-attributes.adoc[] +[id="distributed-tracing-rn-3-0"] += Release notes for {DTProductName} 3.0 +:context: distributed-tracing-rn-3-0 + +toc::[] + +include::modules/distr-tracing-product-overview.adoc[leveloffset=+1] + +[id="component-versions_distributed-tracing-rn-3-0"] +== Component versions in the {DTProductName} 3.0 + +[options="header"] +|=== +|Operator |Component |Version +|{JaegerName} +|Jaeger +|1.51.0 + +|xref:../../otel/otel-release-notes.adoc[{OTELName}] +|OpenTelemetry +|0.89.0 + +|{TempoName} +|Tempo +|2.3.0 +|=== + +// Jaeger section +[id="jaeger-release-notes_distributed-tracing-rn-3-0"] +== {JaegerName} + +[id="deprecated-functionality_jaeger-release-notes_distributed-tracing-rn-3-0"] +=== Deprecated functionality + +In Red Hat OpenShift distributed tracing 3.0, Jaeger and Elasticsearch are deprecated, and both are planned to be removed in a future release. Red Hat will provide critical and above CVE bug fixes and support for these components during the current release lifecycle, but these components will no longer receive feature enhancements. + +In Red Hat OpenShift distributed tracing 3.0, Tempo provided by the {TempoOperator} and the OpenTelemetry collector provided by the Red Hat build of OpenTelemetry are the preferred Operators for distributed tracing collection and storage. The OpenTelemetry and Tempo distributed tracing stack is to be adopted by all users because this will be the stack that will be enhanced going forward. + +[id="new-features-and-enhancements_jaeger-release-notes_distributed-tracing-rn-3-0"] +=== New features and enhancements + +This update introduces the following enhancements for the {JaegerShortName}: + +* Support for the ARM architecture. +* Support for cluster-wide proxy environments. + +[id="bug-fixes_jaeger-release-notes_distributed-tracing-rn-3-0"] +=== Bug fixes + +This update introduces the following bug fixes for the {JaegerShortName}: + +* Fixed support for disconnected environments when using the `oc adm catalog mirror` CLI command. (link:https://issues.redhat.com/browse/TRACING-3546[TRACING-3546]) + +[id="known-issues_jaeger-release-notes_distributed-tracing-rn-3-0"] +=== Known issues + +* Currently, Apache Spark is not supported. + +ifndef::openshift-rosa[] + +* Currently, the streaming deployment via AMQ/Kafka is not supported on the IBM Z and IBM Power Systems architectures. +endif::openshift-rosa[] + +// Tempo section +[id="tempo-release-notes_distributed-tracing-rn-3-0"] +== {TempoName} + +[id="new-features-and-enhancements_tempo-release-notes_distributed-tracing-rn-3-0"] +=== New features and enhancements + +This update introduces the following enhancements for the {TempoShortName}: + +* Support for the ARM architecture. +* Support for span request count, duration, and error count (RED) metrics. The metrics can be visualized in the Jaeger console deployed as part of Tempo or in the web console in the *Observe* menu. + +[id="bug-fixes_tempo-release-notes_distributed-tracing-rn-3-0"] +=== Bug fixes + +This update introduces the following bug fixes for the {TempoShortName}: + +* Fixed support for the custom TLS CA option for connecting to object storage. (link:https://issues.redhat.com/browse/TRACING-3462[TRACING-3462]) +* Fixed support for disconnected environments when using the `oc adm catalog mirror` CLI command. (link:https://issues.redhat.com/browse/TRACING-3523[TRACING-3523]) +* Fixed mTLS when Gateway is not deployed. (link:https://issues.redhat.com/browse/TRACING-3510[TRACING-3510]) + +[id="known-issues_tempo-release-notes_distributed-tracing-rn-3-0"] +=== Known issues + +* Currently, when used with the {TempoOperator}, the Jaeger UI only displays services that have sent traces in the last 15 minutes. For services that did not send traces in the last 15 minutes, traces are still stored but not displayed in the Jaeger UI. (link:https://issues.redhat.com/browse/TRACING-3139[TRACING-3139]) +* Currently, the {TempoShortName} fails on the IBM Z (`s390x`) architecture. (link:https://issues.redhat.com/browse/TRACING-3545[TRACING-3545]) + +include::modules/support.adoc[leveloffset=+1] + +include::modules/making-open-source-more-inclusive.adoc[leveloffset=+1] diff --git a/distr_tracing/distr_tracing_tempo/distr-tracing-tempo-configuring.adoc b/distr_tracing/distr_tracing_tempo/distr-tracing-tempo-configuring.adoc index dfc9e2111b..841663f9be 100644 --- a/distr_tracing/distr_tracing_tempo/distr-tracing-tempo-configuring.adoc +++ b/distr_tracing/distr_tracing_tempo/distr-tracing-tempo-configuring.adoc @@ -21,6 +21,10 @@ include::modules/distr-tracing-tempo-config-storage.adoc[leveloffset=+2] include::modules/distr-tracing-tempo-config-query-frontend.adoc[leveloffset=+2] +include::modules/distr-tracing-tempo-config-spanmetrics.adoc[leveloffset=+2] + +include::modules/distr-tracing-tempo-config-multitenancy.adoc[leveloffset=+2] + [id="setting-up-monitoring-for-tempo"] == Setting up monitoring for the {TempoShortName} diff --git a/distr_tracing/distr_tracing_tempo/distr-tracing-tempo-installing.adoc b/distr_tracing/distr_tracing_tempo/distr-tracing-tempo-installing.adoc index 7b597ad854..a18c77c98c 100644 --- a/distr_tracing/distr_tracing_tempo/distr-tracing-tempo-installing.adoc +++ b/distr_tracing/distr_tracing_tempo/distr-tracing-tempo-installing.adoc @@ -6,9 +6,6 @@ include::_attributes/common-attributes.adoc[] toc::[] -:FeatureName: The {TempoOperator} -include::snippets/technology-preview.adoc[leveloffset=+1] - Installing the {TempoShortName} involves the following steps: . Setting up supported object storage. diff --git a/distr_tracing/distr_tracing_tempo/distr-tracing-tempo-updating.adoc b/distr_tracing/distr_tracing_tempo/distr-tracing-tempo-updating.adoc index c6c708d469..05a7b4dd24 100644 --- a/distr_tracing/distr_tracing_tempo/distr-tracing-tempo-updating.adoc +++ b/distr_tracing/distr_tracing_tempo/distr-tracing-tempo-updating.adoc @@ -6,8 +6,11 @@ include::_attributes/common-attributes.adoc[] toc::[] -include::modules/distr-tracing-tempo-update-olm.adoc[leveloffset=+1] +For version upgrades, the {TempoOperator} uses the Operator Lifecycle Manager (OLM), which controls installation, upgrade, and role-based access control (RBAC) of Operators in a cluster. +The OLM runs in the {product-title} by default. The OLM queries for available Operators as well as upgrades for installed Operators. + +When the {TempoOperator} is upgraded to the new version, it scans for running TempoStack instances that it manages and upgrades them to the version corresponding to the Operator's new version. [role="_additional-resources"] [id="additional-resources_dist-tracing-tempo-updating"] == Additional resources diff --git a/modules/distr-tracing-config-default.adoc b/modules/distr-tracing-config-default.adoc index 6c58dc769e..36f14cb22c 100644 --- a/modules/distr-tracing-config-default.adoc +++ b/modules/distr-tracing-config-default.adoc @@ -109,6 +109,7 @@ spec: |Configuration options that define the Ingester service. | | + |=== The following example YAML is the minimum required to create a {JaegerName} deployment using the default settings. diff --git a/modules/distr-tracing-config-jaeger-collector.adoc b/modules/distr-tracing-config-jaeger-collector.adoc index 150385389f..2f10da1bdb 100644 --- a/modules/distr-tracing-config-jaeger-collector.adoc +++ b/modules/distr-tracing-config-jaeger-collector.adoc @@ -63,4 +63,46 @@ The Collectors are stateless and thus many instances of Jaeger Collector can be log-level: |Logging level for the Collector. |Possible values: `debug`, `info`, `warn`, `error`, `fatal`, `panic`. + +|options: + otlp: + enabled: true + grpc: + host-port: 4317 + max-connection-age: 0s + max-connection-age-grace: 0s + max-message-size: 4194304 + tls: + enabled: false + cert: /path/to/cert.crt + cipher-suites: "TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256" + client-ca: /path/to/cert.ca + reload-interval: 0s + min-version: 1.2 + max-version: 1.3 +|To accept OTLP/gRPC, explicitly enable the `otlp`. All the other options are optional. + +|options: + otlp: + enabled: true + http: + cors: + allowed-headers: [[, ]*] + allowed-origins: * + host-port: 4318 + max-connection-age: 0s + max-connection-age-grace: 0s + max-message-size: 4194304 + read-timeout: 0s + read-header-timeout: 2s + idle-timeout: 0s + tls: + enabled: false + cert: /path/to/cert.crt + cipher-suites: "TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256" + client-ca: /path/to/cert.ca + reload-interval: 0s + min-version: 1.2 + max-version: 1.3 +|To accept OTLP/HTTP, explicitly enable the `otlp`. All the other options are optional. |=== diff --git a/modules/distr-tracing-config-otel-collector.adoc b/modules/distr-tracing-config-otel-collector.adoc deleted file mode 100644 index 20348e7570..0000000000 --- a/modules/distr-tracing-config-otel-collector.adoc +++ /dev/null @@ -1,128 +0,0 @@ -//// -This module included in the following assemblies: --distr_tracing_install/distributed-tracing-deploying-otel.adoc -//// -:_mod-docs-content-type: REFERENCE -[id="distr-tracing-config-otel-collector_{context}"] -= OpenTelemetry Collector configuration options - -:FeatureName: The {OTELName} Operator -include::snippets/technology-preview.adoc[leveloffset=+1] - -The OpenTelemetry Collector consists of three components that access telemetry data: - -* *Receivers* - A receiver, which can be push or pull based, is how data gets into the Collector. Generally, a receiver accepts data in a specified format, translates it into the internal format and passes it to processors and exporters defined in the applicable pipelines. By default, no receivers are configured. One or more receivers must be configured. Receivers may support one or more data sources. - -* *Processors* - (Optional) Processors are run on data between being received and being exported. By default, no processors are enabled. Processors must be enabled for every data source. Not all processors support all data sources. Depending on the data source, it may be recommended that multiple processors be enabled. In addition, it is important to note that the order of processors matters. - -* *Exporters* - An exporter, which can be push or pull based, is how you send data to one or more backends/destinations. By default, no exporters are configured. One or more exporters must be configured. Exporters may support one or more data sources. Exporters may come with default settings, but many require configuration to specify at least the destination and security settings. - -You can define multiple instances of components in a custom resource YAML file. Once configured, these components must be enabled through pipelines defined in the `spec.config.service` section of the YAML file. As a best practice you should only enable the components that you need. - -.sample OpenTelemetry collector custom resource file -[source,yaml] ----- -apiVersion: opentelemetry.io/v1alpha1 -kind: OpenTelemetryCollector -metadata: - name: cluster-collector - namespace: tracing-system -spec: - mode: deployment - config: | - receivers: - otlp: - protocols: - grpc: - http: - processors: - exporters: - jaeger: - endpoint: jaeger-production-collector-headless.tracing-system.svc:14250 - tls: - ca_file: "/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt" - service: - pipelines: - traces: - receivers: [otlp] - processors: [] - exporters: [jaeger] ----- - -[NOTE] -==== -If a component is configured, but not defined within the `service` section then it is not enabled. -==== - -.Parameters used by the Operator to define the OpenTelemetry Collector -[options="header"] -[cols="l, a, a, a"] -|=== -|Parameter |Description |Values |Default -|receivers: -|A receiver is how data gets into the Collector. By default, no receivers are configured. There must be at least one enabled receiver for a configuration to be considered valid. Receivers are enabled by being added to a pipeline. -|`otlp`, `jaeger` -|None - -|receivers: - otlp: -|The `oltp` and `jaeger` receivers come with default settings, specifying the name of the receiver is enough to configure it. -| -| - -|processors: -|Processors run on data between being received and being exported. By default, no processors are enabled. -| -|None - -|exporters: -|An exporter sends data to one or more backends/destinations. By default, no exporters are configured. There must be at least one enabled exporter for a configuration to be considered valid. Exporters are enabled by being added to a pipeline. Exporters may come with default settings, but many require configuration to specify at least the destination and security settings. -|`logging`, `jaeger` -|None - -|exporters: - jaeger: - endpoint: - -|The `jaeger` exporter’s endpoint must be of the form `-collector-headless..svc`, with the name and namespace of the Jaeger deployment, for a secure connection to be established. -| -| - -|exporters: - jaeger: - tls: - ca_file: -|Path to the CA certificate. For a client this verifies the server certificate. For a server this verifies client certificates. If empty uses system root CA. -| -| - -|service: - pipelines: -|Components are enabled by adding them to a pipeline under `services.pipeline`. -| -| - -|service: - pipelines: - traces: - receivers: -|You enable receivers for tracing by adding them under `service.pipelines.traces`. -| -|None - -|service: - pipelines: - traces: - processors: -|You enable processors for tracing by adding them under `service.pipelines.traces`. -| -|None - -|service: - pipelines: - traces: - exporters: -|You enable exporters for tracing by adding them under `service.pipelines.traces`. -| -|None -|=== diff --git a/modules/distr-tracing-config-storage.adoc b/modules/distr-tracing-config-storage.adoc index 54734399a5..8e02995c6a 100644 --- a/modules/distr-tracing-config-storage.adoc +++ b/modules/distr-tracing-config-storage.adoc @@ -159,10 +159,10 @@ Minimum deployment = 16Gi* |`true`/`false` |`true` -| -3+|*Each Elasticsearch node can operate with a lower memory setting though this is NOT recommended for production deployments. For production use, you should have no less than 16Gi allocated to each pod by default, but preferably allocate as much as you can, up to 64Gi per pod. |=== +Each Elasticsearch node can operate with a lower memory setting though this is NOT recommended for production deployments. For production use, you must have no less than 16 Gi allocated to each pod by default, but preferably allocate as much as you can, up to 64 Gi per pod. + .Production storage example [source,yaml] ---- diff --git a/modules/distr-tracing-deploy-streaming.adoc b/modules/distr-tracing-deploy-streaming.adoc index 6c48fe3cb7..341f038b68 100644 --- a/modules/distr-tracing-deploy-streaming.adoc +++ b/modules/distr-tracing-deploy-streaming.adoc @@ -71,8 +71,7 @@ spec: kafka: producer: topic: jaeger-spans - #Note: If brokers are not defined,AMQStreams 1.4.0+ will self-provision Kafka. - brokers: my-cluster-kafka-brokers.kafka:9092 + brokers: my-cluster-kafka-brokers.kafka:9092 # <1> storage: type: elasticsearch ingester: @@ -83,6 +82,7 @@ spec: brokers: my-cluster-kafka-brokers.kafka:9092 ---- +<1> If the brokers are not defined, AMQStreams 1.4.0+ self-provisions Kafka. //TODO - find out if this storage configuration is correct for OpenShift . Click *Create* to create the {JaegerShortName} instance. diff --git a/modules/distr-tracing-install-otel-operator.adoc b/modules/distr-tracing-install-otel-operator.adoc deleted file mode 100644 index 1caf91d4ad..0000000000 --- a/modules/distr-tracing-install-otel-operator.adoc +++ /dev/null @@ -1,53 +0,0 @@ -//// -This module included in the following assemblies: -- distr_tracing_install/distr-tracing-installing.adoc -//// - -:_mod-docs-content-type: PROCEDURE -[id="distr-tracing-install-otel-operator_{context}"] -= Installing the {OTELName} Operator - -:FeatureName: The {OTELName} Operator -include::snippets/technology-preview.adoc[leveloffset=+1] - -To install {OTELName}, you use the link:https://operatorhub.io/[OperatorHub] to install the {OTELName} Operator. - -By default, the Operator is installed in the `openshift-operators` project. - -.Prerequisites -* You have access to the {product-title} web console. -* You have access to the cluster as a user with the `cluster-admin` role. If you use {product-dedicated}, you must have an account with the `dedicated-admin` role. - -[WARNING] -==== -Do not install Community versions of the Operators. Community Operators are not supported. -==== - -.Procedure - -. Log in to the {product-title} web console as a user with the `cluster-admin` role. If you use {product-dedicated}, you must have an account with the `dedicated-admin` role. - -. Navigate to *Operators* -> *OperatorHub*. - -. Type *distributed tracing data collection* into the filter to locate the {OTELName} Operator. - -. Click the *{OTELName} Operator* provided by Red Hat to display information about the Operator. - -. Click *Install*. - -. On the *Install Operator* page, accept the default *stable* Update channel. This automatically updates your Operator as new versions are released. - -. Accept the default *All namespaces on the cluster (default)*. This installs the Operator in the default `openshift-operators` project and makes the Operator available to all projects in the cluster. - -. Accept the default *Automatic* approval strategy. By accepting the default, when a new version of this Operator is available, Operator Lifecycle Manager (OLM) automatically upgrades the running instance of your Operator without human intervention. If you select *Manual* updates, when a newer version of an Operator is available, OLM creates an update request. As a cluster administrator, you must then manually approve that update request to have the Operator updated to the new version. -+ -[NOTE] -==== -The *Manual* approval strategy requires a user with appropriate credentials to approve the Operator install and subscription process. -==== - -. Click *Install*. - -. Go to *Operators* -> *Installed Operators*. - -. On the *Installed Operators* page, select the `openshift-operators` project. Wait until you see that the {OTELName} Operator shows a status of "Succeeded" before continuing. diff --git a/modules/distr-tracing-otel-config-collector.adoc b/modules/distr-tracing-otel-config-collector.adoc deleted file mode 100644 index 2b42e94227..0000000000 --- a/modules/distr-tracing-otel-config-collector.adoc +++ /dev/null @@ -1,553 +0,0 @@ -//// -This module included in the following assemblies: --distr_tracing_otel/distr-tracing-otel-configuring.adoc -//// -:_mod-docs-content-type: REFERENCE -[id="distr-tracing-config-otel-collector_{context}"] -= OpenTelemetry Collector configuration options - -The OpenTelemetry Collector consists of three components that access telemetry data: - -Receivers:: A receiver, which can be push or pull based, is how data gets into the Collector. Generally, a receiver accepts data in a specified format, translates it into the internal format, and passes it to processors and exporters defined in the applicable pipelines. By default, no receivers are configured. One or more receivers must be configured. Receivers may support one or more data sources. - -Processors:: Optional. Processors run through the data between it is received and exported. By default, no processors are enabled. Processors must be enabled for every data source. Not all processors support all data sources. Depending on the data source, multiple processors might be enabled. Note that the order of processors matters. - -Exporters:: An exporter, which can be push or pull based, is how you send data to one or more back ends or destinations. By default, no exporters are configured. One or more exporters must be configured. Exporters can support one or more data sources. Exporters might be used with their default settings, but many exporters require configuration to specify at least the destination and security settings. - -You can define multiple instances of components in a custom resource YAML file. When configured, these components must be enabled through pipelines defined in the `spec.config.service` section of the YAML file. As a best practice, only enable the components that you need. - -.Example of the OpenTelemetry Collector custom resource file -[source,yaml] ----- -apiVersion: opentelemetry.io/v1alpha1 -kind: OpenTelemetryCollector -metadata: - name: cluster-collector - namespace: tracing-system -spec: - mode: deployment - ports: - - name: promexporter - port: 8889 - protocol: TCP - config: | - receivers: - otlp: - protocols: - grpc: - http: - processors: - exporters: - jaeger: - endpoint: jaeger-production-collector-headless.tracing-system.svc:14250 - tls: - ca_file: "/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt" - prometheus: - endpoint: 0.0.0.0:8889 - resource_to_telemetry_conversion: - enabled: true # by default resource attributes are dropped - service: <1> - pipelines: - traces: - receivers: [otlp] - processors: [] - exporters: [jaeger] - metrics: - receivers: [otlp] - processors: [] - exporters: [prometheus] ----- -<1> If a component is configured but not defined in the `service` section, the component is not enabled. - -.Parameters used by the Operator to define the OpenTelemetry Collector -[options="header"] -[cols="l, a, a, a"] -|=== -|Parameter |Description |Values |Default -|receivers: -|A receiver is how data gets into the Collector. By default, no receivers are configured. There must be at least one enabled receiver for a configuration to be considered valid. Receivers are enabled by being added to a pipeline. -|`otlp`, `jaeger`, `zipkin` -|None - -|processors: -|Processors run through the data between it is received and exported. By default, no processors are enabled. -| -|None - -|exporters: -|An exporter sends data to one or more back ends or destinations. By default, no exporters are configured. There must be at least one enabled exporter for a configuration to be considered valid. Exporters are enabled by being added to a pipeline. Exporters might be used with their default settings, but many require configuration to specify at least the destination and security settings. -|`otlp`, `otlphttp`, `jaeger`, `logging`, `prometheus` -|None - -|service: - pipelines: -|Components are enabled by adding them to a pipeline under `services.pipeline`. -| -| - -|service: - pipelines: - traces: - receivers: -|You enable receivers for tracing by adding them under `service.pipelines.traces`. -| -|None - -|service: - pipelines: - traces: - processors: -|You enable processors for tracing by adding them under `service.pipelines.traces`. -| -|None - -|service: - pipelines: - traces: - exporters: -|You enable exporters for tracing by adding them under `service.pipelines.traces`. -| -|None - -|service: - pipelines: - metrics: - receivers: -|You enable receivers for metrics by adding them under `service.pipelines.metrics`. -| -|None - -|service: - pipelines: - metrics: - processors: -|You enable processors for metircs by adding them under `service.pipelines.metrics`. -| -|None - -|service: - pipelines: - metrics: - exporters: -|You enable exporters for metrics by adding them under `service.pipelines.metrics`. -| -|None -|=== - -[id="otel-collector-components_{context}"] -== OpenTelemetry Collector components - -[id="receivers_{context}"] -=== Receivers - -[id="otlp-receiver_{context}"] -==== OTLP Receiver - -The OTLP receiver ingests data using the OpenTelemetry protocol (OTLP). - -* Support level: link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] -* Supported signals: traces, metrics - -.OpenTelemetry Collector custom resource with an enabled OTLP receiver -[source,yaml] ----- - config: | - receivers: - otlp: - protocols: - grpc: - endpoint: 0.0.0.0:4317 <1> - tls: <2> - ca_file: ca.pem - cert_file: cert.pem - key_file: key.pem - client_ca_file: client.pem <3> - reload_interval: 1h <4> - http: - endpoint: 0.0.0.0:4318 <5> - tls: <6> - - service: - pipelines: - traces: - receivers: [otlp] - metrics: - receivers: [otlp] ----- -<1> The OTLP gRPC endpoint. If omitted, the default `+0.0.0.0:4317+` is used. -<2> The server-side TLS configuration. Defines paths to TLS certificates. If omitted, TLS is disabled. -<3> The path to the TLS certificate at which the server verifies a client certificate. This sets the value of `ClientCAs` and `ClientAuth` to `RequireAndVerifyClientCert` in the `TLSConfig`. For more information, see the link:https://godoc.org/crypto/tls#Config[`Config` of the Golang TLS package]. -<4> Specifies the time interval at which the certificate is reloaded. If the value is not set, the certificate is never reloaded. `reload_interval` accepts a string containing valid units of time such as `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`. -<5> The OTLP HTTP endpoint. The default value is `+0.0.0.0:4318+`. -<6> The server-side TLS configuration. For more information, see `grpc` protocol configuration section. - -[id="jaeger-receiver_{context}"] -==== Jaeger Receiver - -The Jaeger receiver ingests data in Jaeger formats. - -* Support level: link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] -* Supported signals: traces - -.OpenTelemetry Collector custom resource with an enabled Jaeger receiver -[source,yaml] ----- - config: | - receivers: - jaeger: - protocols: - grpc: - endpoint: 0.0.0.0:14250 <1> - thrift_http: - endpoint: 0.0.0.0:14268 <2> - thrift_compact: - endpoint: 0.0.0.0:6831 <3> - thrift_binary: - endpoint: 0.0.0.0:6832 <4> - tls: <5> - - service: - pipelines: - traces: - receivers: [jaeger] ----- -<1> The Jaeger gRPC endpoint. If omitted, the default `+0.0.0.0:14250+` is used. -<2> The Jaeger Thrift HTTP endpoint. If omitted, the default `+0.0.0.0:14268+` is used. -<3> The Jaeger Thrift Compact endpoint. If omitted, the default `+0.0.0.0:6831+` is used. -<4> The Jaeger Thrift Binary endpoint. If omitted, the default `+0.0.0.0:6832+` is used. -<5> The TLS server side configuration. See the OTLP receiver configuration section for more details. - -[id="zipkin-receiver_{context}"] -==== Zipkin Receiver - -The Zipkin receiver ingests data in the Zipkin v1 and v2 formats. - -* Support level: link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] -* Supported signals: traces - -.OpenTelemetry Collector custom resource with enabled Zipkin receiver -[source,yaml] ----- - config: | - receivers: - zipkin: - endpoint: 0.0.0.0:9411 <1> - tls: <2> - - service: - pipelines: - traces: - receivers: [zipkin] ----- -<1> The Zipkin HTTP endpoint. If omitted, the default `+0.0.0.0:9411+` is used. -<2> The TLS server side configuration. See the OTLP receiver configuration section for more details. - -[id="processors_{context}"] -=== Processors - - -[id="batch-processor_{context}"] -==== Batch processor - -The batch processor batches the data to reduce the number of outgoing connections needed to transfer the telemetry information. - -* Support level: link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] -* Supported signals: traces, metrics - -.Example of the OpenTelemetry Collector custom resource when using the batch processor -[source,yaml] ----- - config: | - processor: - batch: - timeout: 5s - send_batch_max_size: 10000 - service: - pipelines: - traces: - processors: [batch] - metrics: - processors: [batch] ----- - -.Parameters used by the batch processor -[cols="3",options="header"] -|=== -|Parameter |Description |Default - -| `timeout` -| Sends the batch after a specific time duration, irrespective of its size. -| 200ms - -| `send_batch_size` -| Sends the batch of telemetry data after the specified number of spans or metrics. -| 8192 - -| `send_batch_max_size` -| The maximum allowable size of the batch. Must be equal or greater than `send_batch_size`. -| 0 - -| `metadata_keys` -| When activated, a batcher instance is created for each unique set of values found in the `client.Metadata`. -| [] - -| `metadata_cardinality_limit` -| When the `metadata_keys` are populated, this configuration restricts the number of distinct metadata key-value combinations processed throughout the duration of the process. -| 1000 -|=== - -[id="memorylimiter-processor_{context}"] -==== Memory Limiter processor - -The Memory Limiter processor periodically checks the Collector's memory usage and pauses data processing when the soft memory limit is reached. -The preceding component, which is typically a receiver, is expected to retry sending the same data and may apply a backpressure to the incoming data. -When memory usage exceeds the hard limit, the Memory Limiter processor forces garbage collection to run. - -* Support level: General Availability -* Supported signals: traces, metrics, logs - -.Example of the OpenTelemetry Collector custom resource when using the Memory Limiter processor -[source,yaml] ----- - config: | - processor: - memory_limiter: - check_interval: 1s - limit_mib: 4000 - spike_limit_mib: 800 - service: - pipelines: - traces: - processors: [batch] - metrics: - processors: [batch] ----- - -.Parameters used by the Memory Limiter processor -[cols="3",options="header"] -|=== -| Parameter | Description | Default - -| `check_interval` -| Time between memory usage measurements. The optimal value is 1s. For spiky traffic patterns, you can decrease the `check_interval` or increase the `spike_limit_mib`. -| `0s` - -| `limit_mib` -| The hard limit, which is the maximum amount of memory in MiB allocated on the heap. Typically, the total memory usage of the OpenTelemetry Collector is about 50 MiB greater than this value. -| `0` - -| `spike_limit_mib` -| Spike limit, which is the maximum expected spike of memory usage in MiB. The optimal value is approximately 20% of `limit_mib`. To calculate the soft limit, subtract the `spike_limit_mib` from the `limit_mib`. -| 20% of `limit_mib` - -| `limit_percentage` -| Same as the `limit_mib` but expressed as a percentage of the total available memory. The `limit_mib` setting takes precedence over this setting. -| `0` - -| `spike_limit_percentage` -| Same as the `spike_limit_mib` but expressed as a percentage of the total available memory. Intended to be used with the `limit_percentage` setting. -| `0` - -|=== - -[id="resource-detection-processor_{context}"] -==== Resource Detection processor - -The Resource Detection processor is designed to identify host resource details in alignment with OpenTelemetry's resource semantic standards. Using this detected information, it can add or replace the resource values in telemetry data. - -* Support level: link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] -* Supported signals: traces, metrics - -.{product-title} permissions required for the Resource Detection processor -[source,yaml] ----- -kind: ClusterRole -metadata: - name: otel-collector -rules: -- apiGroups: ["config.openshift.io"] - resources: ["infrastructures", "infrastructures/status"] - verbs: ["get", "watch", "list"] ----- - -.OpenTelemetry Collector using the Resource Detection processor -[source,yaml] ----- - config: | - processor: - resourcedetection: - detectors: [openshift] - override: true - service: - pipelines: - traces: - processors: [resourcedetection] - metrics: - processors: [resourcedetection] ----- - -[id="exporters_{context}"] -=== Exporters - -[id="otlp-exporter_{context}"] -==== OTLP exporter - -The OTLP gRPC exporter exports data using the OpenTelemetry protocol (OTLP). - -* Support level: link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] -* Supported signals: traces, metrics - -.OpenTelemetry Collector custom resource with an enabled OTLP exporter -[source,yaml] ----- - config: | - exporters: - otlp: - endpoint: tempo-ingester:4317 <1> - tls: <2> - ca_file: ca.pem - cert_file: cert.pem - key_file: key.pem - insecure: false <3> - insecure_skip_verify: false <4> - reload_interval: 1h <5> - server_name_override: <6> - headers: <7> - X-Scope-OrgID: "dev" - service: - pipelines: - traces: - exporters: [otlp] - metrics: - exporters: [otlp] ----- -<1> The OTLP gRPC endpoint. If the `+https://+` scheme is used, then client transport security is enabled and overrides the `insecure` setting in the `tls`. -<2> The client side TLS configuration. Defines paths to TLS certificates. -<3> Disables client transport security when set to `true`. The default value is `false` by default. -<4> Skips verifying the certificate when set to `true`. The default value is `false`. -<5> Specifies the time interval at which the certificate is reloaded. If the value is not set, the certificate is never reloaded. `reload_interval` accepts a string containing valid units of time such as `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`. -<6> Overrides the virtual host name of authority such as the authority header field in requests. You can use this for testing. -<7> Headers are sent for every request performed during an established connection. - -[id="otlp-http-exporter_{context}"] -==== OTLP HTTP exporter - -The OTLP HTTP exporter exports data using the OpenTelemetry protocol (OTLP). - -* Support level: link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] -* Supported signals: traces, metrics - -.OpenTelemetry Collector custom resource with an enabled OTLP exporter -[source,yaml] ----- - config: | - exporters: - otlphttp: - endpoint: http://tempo-ingester:4318 <1> - tls: <2> - headers: <3> - X-Scope-OrgID: "dev" - - service: - pipelines: - traces: - exporters: [otlphttp] - metrics: - expoters: [otlphttp] ----- -<1> The OTLP HTTP endpoint. If the `+https://+` scheme is used, then client transport security is enabled and overrides the `insecure` setting in the `tls`. -<2> The client side TLS configuration. Defines paths to TLS certificates. -<3> Headers are sent in every HTTP request. - -[id="jaeger-exporter_{context}"] -==== Jaeger exporter - -The Jaeger exporter exports data using the Jaeger proto format through gRPC. - -* Support level: link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] -* Supported signals: traces - -.OpenTelemetry Collector custom resource with enabled Jaeger exporter -[source,yaml] ----- - config: | - exporters: - jaeger: - endpoint: jaeger-all-in-one:14250 <1> - tls: <2> - service: - pipelines: - traces: - exporters: [jaeger] ----- -<1> The Jaeger gRPC endpoint. -<2> The client side TLS configuration. Defines paths to TLS certificates. - -[id="logging-exporter_{context}"] -==== Logging exporter - -The Logging exporter prints data to the standard output. - -* Support level: link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] -* Supported signals: traces, metrics - -.OpenTelemetry Collector custom resource with an enabled Logging exporter -[source,yaml] ----- - config: | - exporters: - logging: - verbosity: detailed <1> - service: - pipelines: - traces: - exporters: [logging] - metrics: - exporters: [logging] ----- -<1> Verbosity of the logging export: `detailed` or `normal` or `basic`. When set to `detailed`, pipeline data is verbosely logged. Defaults to `normal`. - -[id="prometheus-exporter_{context}"] -==== Prometheus exporter - -The Prometheus exporter exports data using the Prometheus or OpenMetrics formats. - -* Support level: link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] -* Supported signals: metrics - -.OpenTelemetry Collector custom resource with an enabled Prometheus exporter -[source,yaml] ----- - ports: - - name: promexporter <1> - port: 8889 - protocol: TCP - config: | - exporters: - prometheus: - endpoint: 0.0.0.0:8889 <2> - tls: <3> - ca_file: ca.pem - cert_file: cert.pem - key_file: key.pem - namespace: prefix <4> - const_labels: <5> - label1: value1 - enable_open_metrics: true <6> - resource_to_telemetry_conversion: <7> - enabled: true - metric_expiration: 180m <8> - service: - pipelines: - metrics: - exporters: [prometheus] ----- -<1> Exposes the Prometheus port from the collector pod and service. You can enable scraping of metrics by Prometheus by using the port name in `ServiceMonitor` or `PodMonitor` custom resource. -<2> The network endpoint where the metrics are exposed. -<3> The server-side TLS configuration. Defines paths to TLS certificates. -<4> If set, exports metrics under the provided value. No default. -<5> Key-value pair labels that are applied for every exported metric. No default. -<6> If `true`, metrics are exported using the OpenMetrics format. Exemplars are only exported in the OpenMetrics format and only for histogram and monotonic sum metrics such as `counter`. Disabled by default. -<7> If `enabled` is `true`, all the resource attributes are converted to metric labels by default. Disabled by default. -<8> Defines how long metrics are exposed without updates. The default is `5m`. - diff --git a/modules/distr-tracing-otel-config-send-metrics-monitoring-stack.adoc b/modules/distr-tracing-otel-config-send-metrics-monitoring-stack.adoc deleted file mode 100644 index f58507ad1c..0000000000 --- a/modules/distr-tracing-otel-config-send-metrics-monitoring-stack.adoc +++ /dev/null @@ -1,39 +0,0 @@ -//// -This module is included in the following assemblies: -- distr_tracing_install/distributed-tracing-deploying-otel.adoc -//// -:_mod-docs-content-type: REFERENCE -[id="distr-tracing-send-metrics-monitoring-stack_{context}"] -= Sending metrics to the monitoring stack - -You can configure the monitoring stack to scrape OpenTelemetry Collector metrics endpoints and to remove duplicated labels that the monitoring stack has added during scraping. - -.Sample `PodMonitor` custom resource (CR) that configures the monitoring stack to scrape Collector metrics -[source,yaml] ----- -apiVersion: monitoring.coreos.com/v1 -kind: PodMonitor -metadata: - name: otel-collector -spec: - selector: - matchLabels: - app.kubernetes.io/name: otel-collector - podMetricsEndpoints: - - port: metrics <1> - - port: promexporter <2> - relabelings: - - action: labeldrop - regex: pod - - action: labeldrop - regex: container - - action: labeldrop - regex: endpoint - metricRelabelings: - - action: labeldrop - regex: instance - - action: labeldrop - regex: job ----- -<1> The name of the internal metrics port for the OpenTelemetry Collector. This port name is always `metrics`. -<2> The name of the Prometheus exporter port for the OpenTelemetry Collector. This port name is defined in the `.spec.ports` section of the `OpenTelemetryCollector` CR. diff --git a/modules/distr-tracing-otel-troubleshoot-logging-exporter.adoc b/modules/distr-tracing-otel-troubleshoot-logging-exporter.adoc deleted file mode 100644 index b42ac3344e..0000000000 --- a/modules/distr-tracing-otel-troubleshoot-logging-exporter.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * /distr_tracing/distr_tracing_otel/distr-tracing-otel-troubleshooting.adoc -:_mod-docs-content-type: PROCEDURE -[id="distr-tracing-otel-troubleshoot-logging-exporter_{context}"] -= Logging exporter - -You can configure the logging exporter to export the collected data to the standard output. - -.Procedure - -. Configure the OpenTelemetry Collector custom resource as follows: -+ -[source,yaml] ----- - config: | - exporters: - logging: - verbosity: detailed - service: - pipelines: - traces: - exporters: [logging] - metrics: - exporters: [logging] - logs: - exporters: [logging] ----- - -. Use the `oc logs` command or the OpenShift console to export the logs to the standard output. diff --git a/modules/distr-tracing-otel-troubleshoot-logs.adoc b/modules/distr-tracing-otel-troubleshoot-logs.adoc deleted file mode 100644 index b6d2f85548..0000000000 --- a/modules/distr-tracing-otel-troubleshoot-logs.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * /distr_tracing/distr_tracing_otel/distr-tracing-otel-troubleshooting.adoc -:_mod-docs-content-type: PROCEDURE -[id="distr-tracing-otel-troubleshoot-logs_{context}"] -= Getting the OpenTelemetry Collector logs - -You can get the logs for the OpenTelemetry Collector as follows. - -.Procedure - -. Set the relevant log level in the OpenTelemetry Collector custom resource (CR): -+ -[source,yaml] ----- - config: | - service: - telemetry: - logs: - level: debug <1> ----- -<1> Collector's log level. Select one of the following values: `info`, `warn`, `error`, or `debug`. Defaults to `info`. - -. Use the `oc logs` command or the OpenShift console to retrieve the logs. diff --git a/modules/distr-tracing-product-overview.adoc b/modules/distr-tracing-product-overview.adoc index f8ec767c10..a7768e395e 100644 --- a/modules/distr-tracing-product-overview.adoc +++ b/modules/distr-tracing-product-overview.adoc @@ -10,6 +10,7 @@ // * distr_tracing/distr_tracing_rn/distr-tracing-rn-2-7.adoc // * distr_tracing/distr_tracing_rn/distr-tracing-rn-2-8.adoc // * distr_tracing/distr_tracing_rn/distr-tracing-rn-2-9.adoc +// * distr_tracing/distr_tracing_rn/distr-tracing-rn-3-0.adoc // * distr_tracing_arch/distr-tracing-architecture.adoc // * service_mesh/v2x/ossm-architecture.adoc // * serverless/serverless-tracing.adoc diff --git a/modules/distr-tracing-tempo-config-default.adoc b/modules/distr-tracing-tempo-config-default.adoc index 9cc0584102..489d9f6662 100644 --- a/modules/distr-tracing-tempo-config-default.adoc +++ b/modules/distr-tracing-tempo-config-default.adoc @@ -32,85 +32,86 @@ spec: .Tempo parameters [options="header"] +[cols="l, a, a, a"] |=== |Parameter |Description |Values |Default value -|`apiVersion:` +|apiVersion: |API version to use when creating the object. |`tempotracing.io/v1` |`tempotracing.io/v1` -|`kind:` +|kind: |Defines the kind of Kubernetes object to create. |`tempo` | -|`metadata:` +|metadata: |Data that uniquely identifies the object, including a `name` string, `UID`, and optional `namespace`. | |{product-title} automatically generates the `UID` and completes the `namespace` with the name of the project where the object is created. -|`name:` +|name: |Name for the object. |Name of your TempoStack instance. |`tempo-all-in-one-inmemory` -|`spec:` +|spec: |Specification for the object to be created. |Contains all of the configuration parameters for your TempoStack instance. When a common definition for all Tempo components is required, it is defined under the `spec` node. When the definition relates to an individual component, it is placed under the `spec/template/` node. |N/A -|`resources:` +|resources: |Resources assigned to the TempoStack. | | -|`storageSize:` +|storageSize: |Storage size for ingester PVCs. | | -|`replicationFactor:` +|replicationFactor: |Configuration for the replication factor. | | -|`retention:` +|retention: |Configuration options for retention of traces. | | -|`storage:` +|storage: |Configuration options that define the storage. All storage-related options must be placed under `storage` and not under the `allInOne` or other component options. | | -|`template.distributor:` +|template.distributor: |Configuration options for the Tempo `distributor`. | | -|`template.ingester:` +|template.ingester: |Configuration options for the Tempo `ingester`. | | -|`template.compactor:` +|template.compactor: |Configuration options for the Tempo `compactor`. | | -|`template.querier:` +|template.querier: |Configuration options for the Tempo `querier`. | | -|`template.queryFrontend:` +|template.queryFrontend: |Configuration options for the Tempo `query-frontend`. | | -|`template.gateway:` +|template.gateway: |Configuration options for the Tempo `gateway`. | | @@ -130,7 +131,7 @@ kind: TempoStack metadata: name: simplest spec: - storage: <1> + storage: # <1> secret: name: minio type: s3 diff --git a/modules/distr-tracing-tempo-config-multitenancy.adoc b/modules/distr-tracing-tempo-config-multitenancy.adoc new file mode 100644 index 0000000000..4310887e58 --- /dev/null +++ b/modules/distr-tracing-tempo-config-multitenancy.adoc @@ -0,0 +1,151 @@ +// Module included in the following assemblies: +// +// * distr_tracing_tempo/distr-tracing-tempo-configuring.adoc + +:_mod-docs-content-type: REFERENCE +[id="distr-tracing-tempo-config-multitenancy_{context}"] += Multitenancy + +Multitenancy with authentication and authorization is provided in the Tempo Gateway service. +The authentication uses OpenShift OAuth and the Kubernetes `TokenReview` API. The authorization uses the Kubernetes `SubjectAccessReview` API. + +.Sample Tempo CR with two tenants, `dev` and `prod` +[source,yaml] +---- +apiVersion: tempo.grafana.com/v1alpha1 +kind: TempoStack +metadata: + name: simplest +spec: + tenants: + mode: openshift # <1> + authentication: # <2> + - tenantName: dev # <3> + tenantId: "1610b0c3-c509-4592-a256-a1871353dbfa" # <4> + - tenantName: prod + tenantId: "1610b0c3-c509-4592-a256-a1871353dbfb" + template: + gateway: + enabled: true # <5> + queryFrontend: + jaegerQuery: + enabled: true +---- + +<1> Must be set to `openshift`. +<2> The list of tenants. +<3> The tenant name. Must be provided in the `X-Scope-OrgId` header when ingesting the data. +<4> A unique tenant ID. +<5> Enables a gateway that performs authentication and authorization. The Jaeger UI is exposed at `http:///api/traces/v1//search`. + +The authorization configuration uses the `ClusterRole` and `ClusterRoleBinding` of the Kubernetes Role-Based Access Control (RBAC). By default, no users have read or write permissions. + +.Sample of the read RBAC configuration that allows authenticated users to read the trace data of the `dev` and `prod` tenants +[source,yaml] +---- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: tempostack-traces-reader +rules: + - apiGroups: + - 'tempo.grafana.com' + resources: # <1> + - dev + - prod + resourceNames: + - traces + verbs: + - 'get' # <2> +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: tempostack-traces-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: tempostack-traces-reader +subjects: + - kind: Group + apiGroup: rbac.authorization.k8s.io + name: system:authenticated # <3> +---- + +<1> Lists the tenants. +<2> The `get` value enables the read operation. +<3> Grants all authenticated users the read permissions for trace data. + +.Sample of the write RBAC configuration that allows the `otel-collector` service account to write the trace data for the `dev` tenant +[source,yaml] +---- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: otel-collector # <1> + namespace: otel +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: tempostack-traces-write +rules: + - apiGroups: + - 'tempo.grafana.com' + resources: # <2> + - dev + resourceNames: + - traces + verbs: + - 'create' # <3> +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: tempostack-traces +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: tempostack-traces-write +subjects: + - kind: ServiceAccount + name: otel-collector + namespace: otel +---- +<1> The service account name for the client to use when exporting trace data. The client must send the service account token, `/var/run/secrets/kubernetes.io/serviceaccount/token`, as the bearer token header. +<2> Lists the tenants. +<3> The `create` value enables the write operation. + +Trace data can be sent to the Tempo instance from the OpenTelemetry Collector that uses the service account with RBAC for writing the data. + +.Sample OpenTelemetry CR configuration +[source,yaml] +---- +apiVersion: opentelemetry.io/v1alpha1 +kind: OpenTelemetryCollector +metadata: + name: cluster-collector + namespace: tracing-system +spec: + mode: deployment + serviceAccount: otel-collector + config: | + extensions: + bearertokenauth: + filename: "/var/run/secrets/kubernetes.io/serviceaccount/token" + exporters: + otlp/dev: + endpoint: tempo-simplest-gateway.tempo.svc.cluster.local:8090 + tls: + insecure: false + ca_file: "/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt" + auth: + authenticator: bearertokenauth + headers: + X-Scope-OrgID: "dev" + service: + extensions: [bearertokenauth] + pipelines: + traces: + exporters: [otlp/dev] +---- diff --git a/modules/distr-tracing-tempo-config-spanmetrics.adoc b/modules/distr-tracing-tempo-config-spanmetrics.adoc new file mode 100644 index 0000000000..538058223a --- /dev/null +++ b/modules/distr-tracing-tempo-config-spanmetrics.adoc @@ -0,0 +1,88 @@ +// Module included in the following assemblies: +// +// * distr_tracing_tempo/distr-tracing-tempo-configuring.adoc + +:_mod-docs-content-type: REFERENCE +[id="distr-tracing-tempo-config-spanmetrics_{context}"] += Configuration of the monitor tab in Jaeger UI + +Trace data contains rich information, and the data is normalized across instrumented languages and frameworks. +Therefore, additional metrics can be extracted from traces. These metrics are request count, duration, and error count (RED). +The metrics can be visualized in Jaeger console in the *Monitor* tab. + +The metrics are derived from spans in the OpenTelemetry Collector that are scraped from the Collector by the Prometheus deployed in the user-workload monitoring stack. +The Jaeger UI queries these metrics from the Prometheus endpoint and visualizes them. + +== OpenTelemetry Collector configuration + +The OpenTelemetry Collector requires configuration of the `spanmetrics` connector that derives metrics from traces and exports the metrics in the Prometheus format. + +.OpenTelemetry Collector custom resource for span RED +[source,yaml] +---- +kind: OpenTelemetryCollector +apiVersion: opentelemetry.io/v1alpha1 +metadata: + name: otel +spec: + mode: deployment + observability: + metrics: + enableMetrics: true # <1> + config: | + connectors: + spanmetrics: # <2> + metrics_flush_interval: 15s + + receivers: + otlp: # <3> + protocols: + grpc: + http: + + exporters: + prometheus: # <4> + endpoint: 0.0.0.0:8889 + resource_to_telemetry_conversion: + enabled: true # by default resource attributes are dropped + + service: + pipelines: + traces: + receivers: [otlp] + exporters: [otlp, spanmetrics] # <5> + metrics: + receivers: [spanmetrics] # <6> + exporters: [prometheus] +---- +<1> Creates the `ServiceMonitor` custom resource to enable scraping of the Prometheus exporter. +<2> The Spanmetrics connector receives traces and exports metrics. +<3> The OTLP receiver to receive spans in the OpenTelemetry protocol. +<4> The Prometheus exporter is used to export metrics in the Prometheus format. +<5> The Spanmetrics connector is configured as exporter in traces pipeline. +<6> The Spanmetrics connector is configured as receiver in metrics pipeline. + +== Tempo configuration + +The `TempoStack` custom resource must specify the following: the *Monitor* tab is enabled, and the Prometheus endpoint is set to the Thanos querier service to query the data from the user-defined monitoring stack. + +.TempoStack custom resource with the enabled Monitor tab +[source,yaml] +---- +kind: TempoStack +apiVersion: tempo.grafana.com/v1alpha1 +metadata: + name: simplest +spec: + template: + queryFrontend: + jaegerQuery: + enabled: true + monitorTab: + enabled: true # <1> + prometheusEndpoint: https://thanos-querier.openshift-monitoring.svc.cluster.local:9091 # <2> + ingress: + type: route +---- +<1> Enables the monitoring tab in the Jaeger console. +<2> The service name for Thanos Querier from user-workload monitoring. diff --git a/modules/distr-tracing-tempo-install-cli.adoc b/modules/distr-tracing-tempo-install-cli.adoc index c64193d1d9..b80d81f4db 100644 --- a/modules/distr-tracing-tempo-install-cli.adoc +++ b/modules/distr-tracing-tempo-install-cli.adoc @@ -45,7 +45,7 @@ metadata: EOF ---- -.. Create an operator group by running the following command: +.. Create an Operator group by running the following command: + [source,terminal] ---- @@ -86,7 +86,7 @@ EOF $ oc get csv -n openshift-tempo-operator ---- -. Create a project of your choice for the *TempoStack* instance that you will create in a subsequent step: +. Create a project of your choice for the TempoStack instance that you will create in a subsequent step: ** To create a project from standard input without metadata: + @@ -107,7 +107,7 @@ metadata: EOF ---- -. In the project that you created for the *TempoStack* instance, create a secret for your object storage bucket by running one of the following commands: +. In the project that you created for the TempoStack instance, create a secret for your object storage bucket by running one of the following commands: ** To create a secret from a YAML file: + @@ -133,9 +133,12 @@ include::snippets/distr-tracing-tempo-required-secret-parameters.adoc[] include::snippets/distr-tracing-tempo-secret-example.adoc[] -- -. Create a *TempoStack* instance in the project that you created for the *TempoStack* instance. +. Create a TempoStack instance in the project that you created for the TempoStack instance. + -NOTE: You can create multiple *TempoStack* instances in separate projects on the same cluster. +[NOTE] +==== +You can create multiple TempoStack instances in separate projects on the same cluster. +==== + .. Customize the `TempoStack` custom resource (CR): + @@ -150,8 +153,8 @@ spec: storageSize: 1Gi storage: secret: - name: <1> - type: <2> + name: # <1> + type: # <2> template: queryFrontend: jaegerQuery: @@ -233,4 +236,7 @@ $ export TEMPO_URL=$(oc get route -n tempo -o jsonpath .. Log in using your cluster administrator credentials for the web console. + -NOTE: The Tempo console initially shows no trace data following the Tempo console installation. +[NOTE] +==== +The Tempo console initially shows no trace data following the Tempo console installation. +==== diff --git a/modules/distr-tracing-tempo-install-web-console.adoc b/modules/distr-tracing-tempo-install-web-console.adoc index bfaadf741d..485f7472c0 100644 --- a/modules/distr-tracing-tempo-install-web-console.adoc +++ b/modules/distr-tracing-tempo-install-web-console.adoc @@ -50,7 +50,10 @@ include::snippets/distr-tracing-tempo-secret-example.adoc[] . Create a *TempoStack* instance. + -NOTE: You can create multiple *TempoStack* instances in separate projects on the same cluster. +[NOTE] +==== +You can create multiple *TempoStack* instances in separate projects on the same cluster. +==== .. Go to *Operators* -> *Installed Operators*. @@ -69,8 +72,8 @@ spec: storageSize: 1Gi storage: secret: - name: <1> - type: <2> + name: # <1> + type: # <2> template: queryFrontend: jaegerQuery: @@ -132,4 +135,7 @@ The stack deployed in this example is configured to receive Jaeger Thrift over H .. Select *Log In With OpenShift* to use your cluster administrator credentials for the web console. + -NOTE: The Tempo console initially shows no trace data following the Tempo console installation. +[NOTE] +==== +The Tempo console initially shows no trace data following the Tempo console installation. +==== diff --git a/modules/distr-tracing-tempo-update-olm.adoc b/modules/distr-tracing-tempo-update-olm.adoc deleted file mode 100644 index 2278f9c562..0000000000 --- a/modules/distr-tracing-tempo-update-olm.adoc +++ /dev/null @@ -1,13 +0,0 @@ -//Module included in the following assemblies: -// -//* distr_tracing_install/dist-tracing-tempo-updating.adoc - -:_mod-docs-content-type: CONCEPT -[id="distr-tracing-tempo-update-olm_{context}"] -= Automatic updates of the {TempoShortName} - -For version upgrades, the {TempoOperator} uses the Operator Lifecycle Manager (OLM), which controls installation, upgrade, and role-based access control (RBAC) of Operators in a cluster. - -The OLM runs in {product-title} by default. The OLM queries for available Operators as well as upgrades for installed Operators. - -When the {TempoOperator} is upgraded to the new version, it scans for running TempoStack instances that it manages and upgrades them to the version corresponding to the Operator's new version. diff --git a/modules/making-open-source-more-inclusive.adoc b/modules/making-open-source-more-inclusive.adoc index bec578dabe..027e670671 100644 --- a/modules/making-open-source-more-inclusive.adoc +++ b/modules/making-open-source-more-inclusive.adoc @@ -10,6 +10,7 @@ // * distr_tracing/distr_tracing_rn/distr-tracing-rn-2-7.adoc // * distr_tracing/distr_tracing_rn/distr-tracing-rn-2-8.adoc // * distr_tracing/distr_tracing_rn/distr-tracing-rn-2-9.adoc +// * distr_tracing/distr_tracing_rn/distr-tracing-rn-3-0.adoc :_mod-docs-content-type: CONCEPT diff --git a/modules/otel-config-collector.adoc b/modules/otel-config-collector.adoc new file mode 100644 index 0000000000..c7ddd26061 --- /dev/null +++ b/modules/otel-config-collector.adoc @@ -0,0 +1,1255 @@ +// Module included in the following assemblies: +// +// * otel/otel-configuring.adoc + +:_mod-docs-content-type: REFERENCE +[id="otel-collector-config-options_{context}"] += OpenTelemetry Collector configuration options + +The OpenTelemetry Collector consists of five types of components that access telemetry data: + +Receivers:: A receiver, which can be push or pull based, is how data gets into the Collector. Generally, a receiver accepts data in a specified format, translates it into the internal format, and passes it to processors and exporters defined in the applicable pipelines. By default, no receivers are configured. One or more receivers must be configured. Receivers may support one or more data sources. + +Processors:: Optional. Processors process the data between it is received and exported. By default, no processors are enabled. Processors must be enabled for every data source. Not all processors support all data sources. Depending on the data source, multiple processors might be enabled. Note that the order of processors matters. + +Exporters:: An exporter, which can be push or pull based, is how you send data to one or more back ends or destinations. By default, no exporters are configured. One or more exporters must be configured. Exporters can support one or more data sources. Exporters might be used with their default settings, but many exporters require configuration to specify at least the destination and security settings. + +Connectors:: A connector connects two pipelines. It consumes data as an exporter at the end of one pipeline and emits data as a receiver at the start of another pipeline. It can consume and emit data of the same or different data type. It can generate and emit data to summarize the consumed data, or it can merely replicate or route data. + +Extensions:: An extension adds capabilities to the Collector. For example, authentication can be added to the receivers and exporters automatically. + +You can define multiple instances of components in a custom resource YAML file. When configured, these components must be enabled through pipelines defined in the `spec.config.service` section of the YAML file. As a best practice, only enable the components that you need. + +.Example of the OpenTelemetry Collector custom resource file +[source,yaml] +---- +apiVersion: opentelemetry.io/v1alpha1 +kind: OpenTelemetryCollector +metadata: + name: cluster-collector + namespace: tracing-system +spec: + mode: deployment + observability: + metrics: + enableMetrics: true + config: | + receivers: + otlp: + protocols: + grpc: + http: + processors: + exporters: + otlp: + endpoint: jaeger-production-collector-headless.tracing-system.svc:4317 + tls: + ca_file: "/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt" + prometheus: + endpoint: 0.0.0.0:8889 + resource_to_telemetry_conversion: + enabled: true # by default resource attributes are dropped + service: # <1> + pipelines: + traces: + receivers: [otlp] + processors: [] + exporters: [jaeger] + metrics: + receivers: [otlp] + processors: [] + exporters: [prometheus] +---- +<1> If a component is configured but not defined in the `service` section, the component is not enabled. + +.Parameters used by the Operator to define the OpenTelemetry Collector +[options="header"] +[cols="l, a, a, a"] +|=== +|Parameter |Description |Values |Default +|receivers: +|A receiver is how data gets into the Collector. By default, no receivers are configured. There must be at least one enabled receiver for a configuration to be considered valid. Receivers are enabled by being added to a pipeline. +|`otlp`, `jaeger`, `prometheus`, `zipkin`, `kafka`, `opencensus` +|None + +|processors: +|Processors run through the data between it is received and exported. By default, no processors are enabled. +|`batch`, `memory_limiter`, `resourcedetection`, `attributes`, `span`, `k8sattributes`, `filter`, `routing` +|None + +|exporters: +|An exporter sends data to one or more back ends or destinations. By default, no exporters are configured. There must be at least one enabled exporter for a configuration to be considered valid. Exporters are enabled by being added to a pipeline. Exporters might be used with their default settings, but many require configuration to specify at least the destination and security settings. +|`otlp`, `otlphttp`, `debug`, `prometheus`, `kafka` +|None + +|connectors: +|Connectors join pairs of pipelines, that is by consuming data as end-of-pipeline exporters and emitting data as start-of-pipeline receivers, and can be used to summarize, replicate, or route consumed data. +|`spanmetrics` +|None + +|extensions: +|Optional components for tasks that do not involve processing telemetry data. +|`bearertokenauth`, `oauth2client`, `jaegerremotesamplin`, `pprof`, `health_check`, `memory_ballast`, `zpages` +|None + +|service: + pipelines: +|Components are enabled by adding them to a pipeline under `services.pipeline`. +| +| + +|service: + pipelines: + traces: + receivers: +|You enable receivers for tracing by adding them under `service.pipelines.traces`. +| +|None + +|service: + pipelines: + traces: + processors: +|You enable processors for tracing by adding them under `service.pipelines.traces`. +| +|None + +|service: + pipelines: + traces: + exporters: +|You enable exporters for tracing by adding them under `service.pipelines.traces`. +| +|None + +|service: + pipelines: + metrics: + receivers: +|You enable receivers for metrics by adding them under `service.pipelines.metrics`. +| +|None + +|service: + pipelines: + metrics: + processors: +|You enable processors for metircs by adding them under `service.pipelines.metrics`. +| +|None + +|service: + pipelines: + metrics: + exporters: +|You enable exporters for metrics by adding them under `service.pipelines.metrics`. +| +|None +|=== + +[id="otel-collector-components_{context}"] +== OpenTelemetry Collector components + +[id="receivers_{context}"] +=== Receivers + +Receivers get data into the Collector. + +[id="otlp-receiver_{context}"] +==== OTLP Receiver + +The OTLP receiver ingests traces and metrics using the OpenTelemetry protocol (OTLP). + +.OpenTelemetry Collector custom resource with an enabled OTLP receiver +[source,yaml] +---- + config: | + receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 # <1> + tls: # <2> + ca_file: ca.pem + cert_file: cert.pem + key_file: key.pem + client_ca_file: client.pem # <3> + reload_interval: 1h # <4> + http: + endpoint: 0.0.0.0:4318 # <5> + tls: # <6> + + service: + pipelines: + traces: + receivers: [otlp] + metrics: + receivers: [otlp] +---- +<1> The OTLP gRPC endpoint. If omitted, the default `+0.0.0.0:4317+` is used. +<2> The server-side TLS configuration. Defines paths to TLS certificates. If omitted, TLS is disabled. +<3> The path to the TLS certificate at which the server verifies a client certificate. This sets the value of `ClientCAs` and `ClientAuth` to `RequireAndVerifyClientCert` in the `TLSConfig`. For more information, see the link:https://godoc.org/crypto/tls#Config[`Config` of the Golang TLS package]. +<4> Specifies the time interval at which the certificate is reloaded. If the value is not set, the certificate is never reloaded. The `reload_interval` accepts a string containing valid units of time such as `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`. +<5> The OTLP HTTP endpoint. The default value is `+0.0.0.0:4318+`. +<6> The server-side TLS configuration. For more information, see the `grpc` protocol configuration section. + +[id="jaeger-receiver_{context}"] +==== Jaeger Receiver + +The Jaeger receiver ingests traces in the Jaeger formats. + +.OpenTelemetry Collector custom resource with an enabled Jaeger receiver +[source,yaml] +---- + config: | + receivers: + jaeger: + protocols: + grpc: + endpoint: 0.0.0.0:14250 # <1> + thrift_http: + endpoint: 0.0.0.0:14268 # <2> + thrift_compact: + endpoint: 0.0.0.0:6831 # <3> + thrift_binary: + endpoint: 0.0.0.0:6832 # <4> + tls: # <5> + + service: + pipelines: + traces: + receivers: [jaeger] +---- +<1> The Jaeger gRPC endpoint. If omitted, the default `+0.0.0.0:14250+` is used. +<2> The Jaeger Thrift HTTP endpoint. If omitted, the default `+0.0.0.0:14268+` is used. +<3> The Jaeger Thrift Compact endpoint. If omitted, the default `+0.0.0.0:6831+` is used. +<4> The Jaeger Thrift Binary endpoint. If omitted, the default `+0.0.0.0:6832+` is used. +<5> The server-side TLS configuration. See the OTLP receiver configuration section for more details. + +[id="prometheus-receiver_{context}"] +==== Prometheus Receiver + +The Prometheus receiver is currently a link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] feature only. + +The Prometheus receiver scrapes the metrics endpoints. + +.OpenTelemetry Collector custom resource with an enabled Prometheus receiver +[source,yaml] +---- + config: | + receivers: + prometheus: + config: + scrape_configs: # <1> + - job_name: 'my-app' # <2> + scrape_interval: 5s # <3> + static_configs: + - targets: ['my-app.example.svc.cluster.local:8888'] # <4> + service: + pipelines: + metrics: + receivers: [prometheus] +---- +<1> Scrapes configurations using the Prometheus format. +<2> The Prometheus job name. +<3> The lnterval for scraping the metrics data. Accepts time units. The default value is `1m`. +<4> The targets at which the metrics are exposed. This example scrapes the metrics from a `my-app` application in the `example` project. + +[id="zipkin-receiver_{context}"] +==== Zipkin Receiver + +The Zipkin receiver ingests traces in the Zipkin v1 and v2 formats. + +.OpenTelemetry Collector custom resource with the enabled Zipkin receiver +[source,yaml] +---- + config: | + receivers: + zipkin: + endpoint: 0.0.0.0:9411 # <1> + tls: # <2> + + service: + pipelines: + traces: + receivers: [zipkin] +---- +<1> The Zipkin HTTP endpoint. If omitted, the default `+0.0.0.0:9411+` is used. +<2> The server-side TLS configuration. See the OTLP receiver configuration section for more details. + +[id="kafka-receiver_{context}"] +==== Kafka Receiver + +The Kafka receiver is currently a link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] feature only. + +The Kafka receiver receives traces, metrics, and logs from Kafka in the OTLP format. + +.OpenTelemetry Collector custom resource with the enabled Kafka receiver +[source,yaml] +---- + config: | + receivers: + kafka: + brokers: ["localhost:9092"] # <1> + protocol_version: 2.0.0 # <2> + topic: otlp_spans # <3> + auth: + plain_text: # <4> + username: example + password: example + tls: # <5> + ca_file: ca.pem + cert_file: cert.pem + key_file: key.pem + insecure: false # <6> + server_name_override: kafka.example.corp # <7> + service: + pipelines: + traces: + receivers: [kafka] +---- +<1> The list of Kafka brokers. The default is `+localhost:9092+`. +<2> The Kafka protocol version. For example, `+2.0.0+`. This is a required field. +<3> The name of the Kafka topic to read from. The default is `+otlp_spans+`. +<4> The plaintext authentication configuration. If omitted, plaintext authentication is disabled. +<5> The client-side TLS configuration. Defines paths to the TLS certificates. If omitted, TLS authentication is disabled. +<6> Disables verifying the server's certificate chain and host name. The default is `+false+`. +<7> ServerName indicates the name of the server requested by the client to support virtual hosting. + +[id="opencensus-receiver_{context}"] +==== OpenCensus receiver + +The OpenCensus receiver provides backwards compatibility with the OpenCensus project for easier migration of instrumented codebases. It receives metrics and traces in the OpenCensus format via gRPC or HTTP and Json. + +.OpenTelemetry Collector custom resource with the enabled OpenCensus receiver +[source,yaml] +---- + config: | + receivers: + opencensus: + endpoint: 0.0.0.0:9411 # <1> + tls: # <2> + cors_allowed_origins: # <3> + - https://*..com + service: + pipelines: + traces: + receivers: [opencensus] + ... +---- +<1> The OpenCensus endpoint. If omitted, the default is `+0.0.0.0:55678+`. +<2> The server-side TLS configuration. See the OTLP receiver configuration section for more details. +<3> You can also use the HTTP JSON endpoint to optionally configure CORS, which is enabled by specifying a list of allowed CORS origins in this field. +Wildcards with `+*+` are accepted under the `cors_allowed_origins`. +To match any origin, enter only `+*+`. + +[id="processors_{context}"] +=== Processors + +Processors run through the data between it is received and exported. + +[id="batch-processor_{context}"] +==== Batch processor + +The Batch processor batches traces and metrics to reduce the number of outgoing connections needed to transfer the telemetry information. + +.Example of the OpenTelemetry Collector custom resource when using the Batch processor +[source,yaml] +---- + config: | + processor: + batch: + timeout: 5s + send_batch_max_size: 10000 + service: + pipelines: + traces: + processors: [batch] + metrics: + processors: [batch] +---- + +.Parameters used by the Batch processor +[options="header"] +[cols="l, a, a"] +|=== +|Parameter |Description |Default + +|timeout +|Sends the batch after a specific time duration and irrespective of the batch size. +|`200ms` + +|send_batch_size +|Sends the batch of telemetry data after the specified number of spans or metrics. +|`8192` + +|send_batch_max_size +|The maximum allowable size of the batch. Must be equal or greater than the `send_batch_size`. +|`0` + +|metadata_keys +|When activated, a batcher instance is created for each unique set of values found in the `client.Metadata`. +|`[]` + +|metadata_cardinality_limit +|When the `metadata_keys` are populated, this configuration restricts the number of distinct metadata key-value combinations processed throughout the duration of the process. +|`1000` +|=== + +[id="memorylimiter-processor_{context}"] +==== Memory Limiter processor + +The Memory Limiter processor periodically checks the Collector's memory usage and pauses data processing when the soft memory limit is reached. This processor supports traces, metrics, and logs. The preceding component, which is typically a receiver, is expected to retry sending the same data and may apply a backpressure to the incoming data. When memory usage exceeds the hard limit, the Memory Limiter processor forces garbage collection to run. + +.Example of the OpenTelemetry Collector custom resource when using the Memory Limiter processor +[source,yaml] +---- + config: | + processor: + memory_limiter: + check_interval: 1s + limit_mib: 4000 + spike_limit_mib: 800 + service: + pipelines: + traces: + processors: [batch] + metrics: + processors: [batch] +---- + +.Parameters used by the Memory Limiter processor +[options="header"] +[cols="l, a, a"] +|=== +|Parameter |Description |Default + +|check_interval +|Time between memory usage measurements. The optimal value is `1s`. For spiky traffic patterns, you can decrease the `check_interval` or increase the `spike_limit_mib`. +|`0s` + +|limit_mib +|The hard limit, which is the maximum amount of memory in MiB allocated on the heap. Typically, the total memory usage of the OpenTelemetry Collector is about 50 MiB greater than this value. +|`0` + +|spike_limit_mib +|Spike limit, which is the maximum expected spike of memory usage in MiB. The optimal value is approximately 20% of `limit_mib`. To calculate the soft limit, subtract the `spike_limit_mib` from the `limit_mib`. +|20% of `limit_mib` + +|limit_percentage +|Same as the `limit_mib` but expressed as a percentage of the total available memory. The `limit_mib` setting takes precedence over this setting. +|`0` + +|spike_limit_percentage +|Same as the `spike_limit_mib` but expressed as a percentage of the total available memory. Intended to be used with the `limit_percentage` setting. +|`0` + +|=== + +[id="resource-detection-processor_{context}"] +==== Resource Detection processor + +The Resource Detection processor is currently a link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] feature only. + +The Resource Detection processor identifies host resource details in alignment with OpenTelemetry's resource semantic standards. Using the detected information, it can add or replace the resource values in telemetry data. This processor supports traces, metrics, and can be used with multiple detectors such as the Docket metadata detector or the `OTEL_RESOURCE_ATTRIBUTES` environment variable detector. + +.{product-title} permissions required for the Resource Detection processor +[source,yaml] +---- +kind: ClusterRole +metadata: + name: otel-collector +rules: +- apiGroups: ["config.openshift.io"] + resources: ["infrastructures", "infrastructures/status"] + verbs: ["get", "watch", "list"] +---- + +.OpenTelemetry Collector using the Resource Detection processor +[source,yaml] +---- + config: | + processor: + resourcedetection: + detectors: [openshift] + override: true + service: + pipelines: + traces: + processors: [resourcedetection] + metrics: + processors: [resourcedetection] +---- + +.OpenTelemetry Collector using the Resource Detection Processor with an environment variable detector +[source,yaml] +---- + config: | + processors: + resourcedetection/env: + detectors: [env] # <1> + timeout: 2s + override: false +---- +<1> Specifies which detector to use. In this example, the environment detector is specified. + +[id="attributes-processor_{context}"] +==== Attributes processor + +The Attributes processor is currently a link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] feature only. + +The Attributes processor can modify attributes of a span, log, or metric. You can configure this processor to filter and match input data and include or exclude such data for specific actions. + +The processor operates on a list of actions, executing them in the order specified in the configuration. The following actions are supported: + +Insert:: Inserts a new attribute into the input data when the specified key does not already exist. + +Update:: Updates an attribute in the input data if the key already exists. + +Upsert:: Combines the insert and update actions: Inserts a new attribute if the key does not exist yet. Updates the attribute if the key already exists. + +Delete:: Removes an attribute from the input data. + +Hash:: Hashes an existing attribute value as SHA1. + +Extract:: Extracts values by using a regular expression rule from the input key to the target keys defined in the rule. If a target key already exists, it will be overridden similarly to the Span processor's `to_attributes` setting with the existing attribute as the source. + +Convert:: Converts an existing attribute to a specified type. + +.OpenTelemetry Collector using the Attributes processor +[source,yaml] +---- + config: | + processors: + attributes/example: + actions: + - key: db.table + action: delete + - key: redacted_span + value: true + action: upsert + - key: copy_key + from_attribute: key_original + action: update + - key: account_id + value: 2245 + action: insert + - key: account_password + action: delete + - key: account_email + action: hash + - key: http.status_code + action: convert + converted_type: int +---- + +[id="resource-processor_{context}"] +==== Resource processor + +The Resource processor is currently a link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] feature only. + +The Resource processor applies changes to the resource attributes. This processor supports traces, metrics, and logs. + +.OpenTelemetry Collector using the Resource Detection processor +[source,yaml] +---- + config: | + processor: + attributes: + - key: cloud.availability_zone + value: "zone-1" + action: upsert + - key: k8s.cluster.name + from_attribute: k8s-cluster + action: insert + - key: redundant-attribute + action: delete +---- + +Attributes represent the actions that are applied to the resource attributes, such as delete the attribute, insert the attribute, or upsert the attribute. + +[id="span-processor_{context}"] +==== Span processor + +The Span processor is currently a link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] feature only. + +The Span processor modifies the span name based on its attributes or extracts the span attributes from the span name. It can also change the span status. It can also include or exclude spans. This processor supports traces. + +Span renaming requires specifying attributes for the new name by using the `from_attributes` configuration. + +.OpenTelemetry Collector using the Span processor for renaming a span +[source,yaml] +---- + config: | + processor: + span: + name: + from_attributes: [, , ...] # <1> + separator: # <2> +---- +<1> Defines the keys to form the new span name. +<2> An optional separator. + +You can use the processor to extract attributes from the span name. + +.OpenTelemetry Collector using the Span processor for extracting attributes from a span name +[source,yaml] +---- + config: | + processor: + span/to_attributes: + name: + to_attributes: + rules: + - ^\/api\/v1\/document\/(?P.*)\/update$ # <1> +---- +<1> This rule defines how the extraction is to be executed. You can define more rules: for example, in this case, if the regular expression matches the name, a `documentID` attibute is created. In this example, if the input span name is `/api/v1/document/12345678/update`, this results in the `/api/v1/document/{documentId}/update` output span name, and a new `"documentId"="12345678"` attribute is added to the span. + +You can have the span status modified. + +.OpenTelemetry Collector using the Span Processor for status change +[source,yaml] +---- + config: | + processor: + span/set_status: + status: + code: Error + description: "" +---- + +[id="kubernetes-attributes-processor_{context}"] +==== Kubernetes Attributes processor + +The Kubernetes Attributes processor is currently a link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] feature only. + +The Kubernetes Attributes processor enables automatic configuration of spans, metrics, and log resource attributes by using the Kubernetes metadata. +This processor supports traces, metrics, and logs. +This processor automatically identifies the Kubernetes resources, extracts the metadata from them, and incorporates this extracted metadata as resource attributes into relevant spans, metrics, and logs. It utilizes the Kubernetes API to discover all pods operating within a cluster, maintaining records of their IP addresses, pod UIDs, and other relevant metadata. + +.Minimum {product-title} permissions required for the Kubernetes Attributes processor +[source,yaml] +---- +kind: ClusterRole +metadata: + name: otel-collector +rules: + - apiGroups: [''] + resources: ['pods', 'namespaces'] + verbs: ['get', 'watch', 'list'] +---- + +.OpenTelemetry Collector using the Kubernetes Attributes processor +[source,yaml] +---- + config: | + processors: + k8sattributes: + filter: + node_from_env_var: KUBE_NODE_NAME +---- + +[id="filter-processor_{context}"] +=== Filter processor + +The Filter processor is currently a link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] feature only. + +The Filter processor leverages the OpenTelemetry Transformation Language to establish criteria for discarding telemetry data. If any of these conditions are satisfied, the telemetry data are discarded. The conditions can be combined by using the logical OR operator. This processor supports traces, metrics, and logs. + +.OpenTelemetry Collector custom resource with an enabled OTLP exporter +[source,yaml] +---- +config: | + processors: + filter/ottl: + error_mode: ignore # <1> + traces: + span: + - 'attributes["container.name"] == "app_container_1"' # <2> + - 'resource.attributes["host.name"] == "localhost"' # <3> +---- +<1> Defines the error mode. When set to `ignore`, ignores errors returned by conditions. When set to `propagate`, returns the error up the pipeline. An error causes the payload to be dropped from the Collector. +<2> Filters the spans that have the `container.name == app_container_1` attribute. +<3> Filters the spans that have the `host.name == localhost` resource attribute. + +[id="routing-processor_{context}"] +=== Routing processor + +The Routing processor is currently a link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] feature only. + +The Routing processor routes logs, metrics, or traces to specific exporters. This processor can read a header from an incoming HTTP request (gRPC or plain HTTP) or can read a resource attribute, and then directs the trace information to relevant exporters according to the read value. + +.OpenTelemetry Collector custom resource with an enabled OTLP exporter +[source,yaml] +---- +config: | + processors: + routing: + from_attribute: X-Tenant # <1> + default_exporters: # <2> + - jaeger + table: # <3> + - value: acme + exporters: [jaeger/acme] + exporters: + jaeger: + endpoint: localhost:14250 + jaeger/acme: + endpoint: localhost:24250 +---- +<1> The HTTP header name for the lookup value when performing the route. +<2> The default exporter when the attribute value is not present in the table in the next section. +<3> The table that defines which values are to be routed to which exporters. + +You can optionally create an `attribute_source` configuratiion, which defines where to look for the attribute in `from_attribute`. The allowed value is `context` to search the context, which includes the HTTP headers, or `resource` to search the resource attributes. + +[id="exporters_{context}"] +=== Exporters + +Exporters send data to one or more back ends or destinations. + +[id="otlp-exporter_{context}"] +==== OTLP exporter + +The OTLP gRPC exporter exports traces and metrics using the OpenTelemetry protocol (OTLP). + +.OpenTelemetry Collector custom resource with an enabled OTLP exporter +[source,yaml] +---- + config: | + exporters: + otlp: + endpoint: tempo-ingester:4317 # <1> + tls: # <2> + ca_file: ca.pem + cert_file: cert.pem + key_file: key.pem + insecure: false # <3> + insecure_skip_verify: false # # <4> + reload_interval: 1h # <5> + server_name_override: # <6> + headers: # <7> + X-Scope-OrgID: "dev" + service: + pipelines: + traces: + exporters: [otlp] + metrics: + exporters: [otlp] +---- +<1> The OTLP gRPC endpoint. If the `+https://+` scheme is used, then client transport security is enabled and overrides the `insecure` setting in the `tls`. +<2> The client-side TLS configuration. Defines paths to TLS certificates. +<3> Disables client transport security when set to `true`. The default value is `false` by default. +<4> Skips verifying the certificate when set to `true`. The default value is `false`. +<5> Specifies the time interval at which the certificate is reloaded. If the value is not set, the certificate is never reloaded. The `reload_interval` accepts a string containing valid units of time such as `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`. +<6> Overrides the virtual host name of authority such as the authority header field in requests. You can use this for testing. +<7> Headers are sent for every request performed during an established connection. + +[id="otlp-http-exporter_{context}"] +==== OTLP HTTP exporter + +The OTLP HTTP exporter exports traces and metrics using the OpenTelemetry protocol (OTLP). + +.OpenTelemetry Collector custom resource with an enabled OTLP exporter +[source,yaml] +---- + config: | + exporters: + otlphttp: + endpoint: http://tempo-ingester:4318 # <1> + tls: # <2> + headers: # <3> + X-Scope-OrgID: "dev" + + service: + pipelines: + traces: + exporters: [otlphttp] + metrics: + expoters: [otlphttp] +---- +<1> The OTLP HTTP endpoint. If the `+https://+` scheme is used, then client transport security is enabled and overrides the `insecure` setting in the `tls`. +<2> The client side TLS configuration. Defines paths to TLS certificates. +<3> Headers are sent in every HTTP request. + +[id="debug-exporter_{context}"] +==== Debug exporter + +The Debug exporter prints traces and metrics to the standard output. + +.OpenTelemetry Collector custom resource with an enabled Debug exporter +[source,yaml] +---- + config: | + exporters: + debug: + verbosity: detailed # <1> + service: + pipelines: + traces: + exporters: [logging] + metrics: + exporters: [logging] +---- +<1> Verbosity of the debug export: `detailed` or `normal` or `basic`. When set to `detailed`, pipeline data is verbosely logged. Defaults to `normal`. + +[id="prometheus-exporter_{context}"] +==== Prometheus exporter + +The Prometheus exporter is currently a link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] feature only. + +The Prometheus exporter exports metrics in the Prometheus or OpenMetrics formats. + +.OpenTelemetry Collector custom resource with an enabled Prometheus exporter +[source,yaml] +---- + ports: + - name: promexporter # <1> + port: 8889 + protocol: TCP + config: | + exporters: + prometheus: + endpoint: 0.0.0.0:8889 # <2> + tls: # <3> + ca_file: ca.pem + cert_file: cert.pem + key_file: key.pem + namespace: prefix # <4> + const_labels: # <5> + label1: value1 + enable_open_metrics: true # <6> + resource_to_telemetry_conversion: # <7> + enabled: true + metric_expiration: 180m # <8> + add_metric_suffixes: false # <9> + service: + pipelines: + metrics: + exporters: [prometheus] +---- +<1> Exposes the Prometheus port from the Collector pod and service. You can enable scraping of metrics by Prometheus by using the port name in `ServiceMonitor` or `PodMonitor` custom resource. +<2> The network endpoint where the metrics are exposed. +<3> The server-side TLS configuration. Defines paths to TLS certificates. +<4> If set, exports metrics under the provided value. No default. +<5> Key-value pair labels that are applied for every exported metric. No default. +<6> If `true`, metrics are exported using the OpenMetrics format. Exemplars are only exported in the OpenMetrics format and only for histogram and monotonic sum metrics such as `counter`. Disabled by default. +<7> If `enabled` is `true`, all the resource attributes are converted to metric labels by default. Disabled by default. +<8> Defines how long metrics are exposed without updates. The default is `5m`. +<9> Adds the metrics types and units suffixes. Must be disabled if the monitor tab in Jaeger console is enabled. The default is `true`. + +[id="kafka-exporter_{context}"] +==== Kafka exporter + +The Kafka exporter is currently a link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] feature only. + +The Kafka exporter exports logs, metrics, and traces to Kafka. This exporter uses a synchronous producer that blocks and does not batch messages. It must be used with batch and queued retry processors for higher throughput and resiliency. + +.OpenTelemetry Collector custom resource with an enabled Kafka exporter +[source,yaml] +---- + config: | + exporters: + kafka: + brokers: ["localhost:9092"] # <1> + protocol_version: 2.0.0 # <2> + topic: otlp_spans # <3> + auth: + plain_text: # <4> + username: example + password: example + tls: # <5> + ca_file: ca.pem + cert_file: cert.pem + key_file: key.pem + insecure: false # <6> + server_name_override: kafka.example.corp # <7> + service: + pipelines: + traces: + exporters: [kafka] +---- +<1> The list of Kafka brokers. The default is `+localhost:9092+`. +<2> The Kafka protocol version. For example, `+2.0.0+`. This is a required field. +<3> The name of the Kafka topic to read from. The following are the defaults: `+otlp_spans+` for traces, `+otlp_metrics+` for metrics, `+otlp_logs+` for logs. +<4> The plaintext authentication configuration. If omitted, plaintext authentication is disabled. +<5> The client-side TLS configuration. Defines paths to the TLS certificates. If omitted, TLS authentication is disabled. +<6> Disables verifying the server's certificate chain and host name. The default is `+false+`. +<7> ServerName indicates the name of the server requested by the client to support virtual hosting. + +[id="connectors_{context}"] +=== Connectors + +Connectors connect two pipelines. + +[id="spanmetrics-connector_{context}"] +==== Spanmetrics connector + +The Spanmetrics connector is currently a link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] feature only. + +The Spanmetrics connector aggregates Request, Error, and Duration (R.E.D) OpenTelemetry metrics from span data. + +.OpenTelemetry Collector custom resource with an enabled spanmetrics connector +[source,yaml] +---- + config: | + connectors: + spanmetrics: + metrics_flush_interval: 15s # <1> + service: + pipelines: + traces: + exporters: [spanmetrics] + metrics: + receivers: [spanmetrics] +---- +<1> Defines the flush interval of the generated metrics. Defaults to `15s`. + +[id="extensions_{context}"] +=== Extensions + +Extensions add capabilities to the Collector. + +[id="bearertokenauth-extension_{context}"] +==== BearerTokenAuth extension + +The BearerTokenAuth extension is currently a link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] feature only. + +The BearerTokenAuth extension is an authenticator for receivers and exporters that are based on the HTTP and the gRPC protocol. +You can use the OpenTelemetry Collector custom resource to configure client authentication and server authentication for the BearerTokenAuth extension on the receiver and exporter side. +This extension supports traces, metrics, and logs. + +.OpenTelemetry Collector custom resource with client and server authentication configured for the BearerTokenAuth extension +[source,yaml] +---- + config: | + extensions: + bearertokenauth: + scheme: "Bearer" # <1> + token: "" # <2> + filename: "" # <3> + + receivers: + otlp: + protocols: + http: + auth: + authenticator: bearertokenauth # <4> + exporters: + otlp: + auth: + authenticator: bearertokenauth # <5> + + service: + extensions: [bearertokenauth] + pipelines: + traces: + receivers: [otlp] + exporters: [otlp] +---- +<1> You can configure the BearerTokenAuth extension to send a custom `scheme`. The default is `Bearer`. +<2> You can add the BearerTokenAuth extension token as metadata to identify a message. +<3> Path to a file that contains an authorization token that is transmitted with every message. +<4> You can assign the authenticator configuration to an OTLP receiver. +<5> You can assign the authenticator configuration to an OTLP exporter. + +[id="oauth2client-extension_{context}"] +==== OAuth2Client extension + +The OAuth2Client extension is currently a link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] feature only. + +The OAuth2Client extension is an authenticator for exporters that are based on the HTTP and the gRPC protocol. +Client authentication for the OAuth2Client extension is configured in a separate section in the OpenTelemetry Collector custom resource. +This extension supports traces, metrics, and logs. + +.OpenTelemetry Collector custom resource with client authentication configured for the OAuth2Client extension +[source,yaml] +---- + config: | + extensions: + oauth2client: + client_id: # <1> + client_secret: # <2> + endpoint_params: # <3> + audience: + token_url: https://example.com/oauth2/default/v1/token # <4> + scopes: ["api.metrics"] # <5> + # tls settings for the token client + tls: # <6> + insecure: true # <7> + ca_file: /var/lib/mycert.pem # <8> + cert_file: # <9> + key_file: # <10> + timeout: 2s # <11> + + receivers: + otlp: + protocols: + http: + + exporters: + otlp: + auth: + authenticator: oauth2client # <12> + + service: + extensions: [oauth2client] + pipelines: + traces: + receivers: [otlp] + exporters: [otlp] +---- +<1> Client identifier, which is provided by the identity provider. +<2> Confidential key used to authenticate the client to the identity provider. +<3> Further metadata, in the key-value pair format, which is transferred during authentication. For example, `audience` specifies the intended audience for the access token, indicating the recipient of the token. +<4> The URL of the OAuth2 token endpoint, where the Collector requests access tokens. +<5> The scopes define the specific permissions or access levels requested by the client. +<6> The Transport Layer Security (TLS) settings for the token client, which is used to establish a secure connection when requesting tokens. +<7> When set to `true`, configures the Collector to use an insecure or non-verified TLS connection to call the configured token endpoint. +<8> The path to a Certificate Authority (CA) file that is used to verify the server's certificate during the TLS handshake. +<9> The path to the client certificate file that the client must use to authenticate itself to the OAuth2 server if required. +<10> The path to the client's private key file that is used with the client certificate if needed for authentication. +<11> Sets a timeout for the token client's request. +<12> You can assign the authenticator configuration to an OTLP exporter. + + +[id="jaegerremotesampling-extension_{context}"] +==== Jaeger Remote Sampling extension + +The Jaeger Remote Sampling extension is currently a link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] feature only. + +The Jaeger Remote Sampling extension allows serving sampling strategies after Jaeger's remote sampling API. You can configure this extension to proxy requests to a backing remote sampling server such as a Jaeger collector down the pipeline or to a static JSON file from the local file system. + +.OpenTelemetry Collector custom resource with a configured Jaeger Remote Sampling extension +[source,yaml] +---- + config: | + extensions: + jaegerremotesampling: + source: + reload_interval: 30s # <1> + remote: + endpoint: jaeger-collector:14250 # <2> + file: /etc/otelcol/sampling_strategies.json # <3> + + receivers: + otlp: + protocols: + http: + + exporters: + otlp: + + service: + extensions: [jaegerremotesampling] + pipelines: + traces: + receivers: [otlp] + exporters: [otlp] +---- +<1> The time interval at which the sampling configuration is updated. +<2> The endpoint for reaching the Jaeger remote sampling strategy provider. +<3> The path to a local file that contains a sampling strategy configuration in the JSON format. + +.Example of a Jaeger Remote Sampling strategy file +[source,json] +---- +{ + "service_strategies": [ + { + "service": "foo", + "type": "probabilistic", + "param": 0.8, + "operation_strategies": [ + { + "operation": "op1", + "type": "probabilistic", + "param": 0.2 + }, + { + "operation": "op2", + "type": "probabilistic", + "param": 0.4 + } + ] + }, + { + "service": "bar", + "type": "ratelimiting", + "param": 5 + } + ], + "default_strategy": { + "type": "probabilistic", + "param": 0.5, + "operation_strategies": [ + { + "operation": "/health", + "type": "probabilistic", + "param": 0.0 + }, + { + "operation": "/metrics", + "type": "probabilistic", + "param": 0.0 + } + ] + } +} +---- + + + +[id="pprof-extension_{context}"] +==== Performance Profiler extension + +The Performance Profiler extension is currently a link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] feature only. + +The Performance Profiler extension enables the Go `net/http/pprof` endpoint. This is typically used by developers to collect performance profiles and investigate issues with the service. + +.OpenTelemetry Collector custom resource with the configured Performance Profiler extension +[source,yaml] +---- + config: | + extensions: + pprof: + endpoint: localhost:1777 # <1> + block_profile_fraction: 0 # <2> + mutex_profile_fraction: 0 # <3> + save_to_file: test.pprof # <4> + + receivers: + otlp: + protocols: + http: + + exporters: + otlp: + + service: + extensions: [pprof] + pipelines: + traces: + receivers: [otlp] + exporters: [otlp] +---- +<1> The endpoint at which this extension listens. Use `localhost:` to make it available only locally or `":"` to make it available on all network interfaces. The default value is `localhost:1777`. +<2> Sets a fraction of blocking events to be profiled. To disable profiling, set this to `0` or a negative integer. See the link:https://golang.org/pkg/runtime/#SetBlockProfileRate[documentation] for the `runtime` package. The default value is `0`. +<3> Set a fraction of mutex contention events to be profiled. To disable profiling, set this to `0` or a negative integer. See the link:https://golang.org/pkg/runtime/#SetMutexProfileFraction[documentation] for the `runtime` package. The default value is `0`. +<4> The name of the file in which the CPU profile is to be saved. Profiling starts when the Collector starts. Profiling is saved to the file when the Collector is terminated. + +[id="healthcheck-extension_{context}"] +==== Health Check extension + +The Health Check extension is currently a link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] feature only. + +The Health Check extension provides an HTTP URL for checking the status of the OpenTelemetry Collector. You can use this extension as a liveness and readiness probe on OpenShift. + +.OpenTelemetry Collector custom resource with the configured Health Check extension +[source,yaml] +---- + config: | + extensions: + health_check: + endpoint: "0.0.0.0:13133" # <1> + tls: # <2> + ca_file: "/path/to/ca.crt" + cert_file: "/path/to/cert.crt" + key_file: "/path/to/key.key" + path: "/health/status" # <3> + check_collector_pipeline: # <4> + enabled: true # <5> + interval: "5m" # <6> + exporter_failure_threshold: 5 # <7> + + receivers: + otlp: + protocols: + http: + + exporters: + otlp: + + service: + extensions: [health_check] + pipelines: + traces: + receivers: [otlp] + exporters: [otlp] +---- +<1> The target IP address for publishing the health check status. The default is `0.0.0.0:13133`. +<2> The TLS server-side configuration. Defines paths to TLS certificates. If omitted, the TLS is disabled. +<3> The path for the health check server. The default is `/`. +<4> Settings for the Collector pipeline health check. +<5> Enables the Collector pipeline health check. The default is `false`. +<6> The time interval for checking the number of failures. The default is `5m`. +<7> The threshold of a number of failures until which a container is still marked as healthy. The default is `5`. + +[id="memory-ballast-extension_{context}"] +==== Memory Ballast extension + +The Memory Ballast extension is currently a link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] feature only. + +The Memory Ballast extension enables applications to configure memory ballast for the process. + +.OpenTelemetry Collector custom resource with the configured Memory Ballast extension +[source,yaml] +---- + config: | + extensions: + memory_ballast: + size_mib: 64 # <1> + size_in_percentage: 20 # <2> + + receivers: + otlp: + protocols: + http: + + exporters: + otlp: + + service: + extensions: [memory_ballast] + pipelines: + traces: + receivers: [otlp] + exporters: [otlp] +---- +<1> Sets the memory ballast size in MiB. Takes priority over the `size_in_percentage` if both are specified. +<2> Sets the memory ballast as a percentage, `1`-`100`, of the total memory. Supports containerized and physical host environments. + + +[id="zpages-extension_{context}"] +==== zPages extension + +The zPages extension is currently a link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] feature only. + +The zPages extension provides an HTTP endpoint for extensions that serve zPages. At the endpoint, this extension serves live data for debugging instrumented components. All core exporters and receivers provide some zPages instrumentation. + +zPages are useful for in-process diagnostics without having to depend on a back end to examine traces or metrics. + +.OpenTelemetry Collector custom resource with the configured zPages extension +[source,yaml] +---- + config: | + extensions: + zpages: + endpoint: "localhost:55679" # <1> + + receivers: + otlp: + protocols: + http: + exporters: + otlp: + + service: + extensions: [zpages] + pipelines: + traces: + receivers: [otlp] + exporters: [otlp] +---- + +<1> Specifies the HTTP endpoint that serves zPages. Use `localhost:` to make it available only locally, or `":"` to make it available on all network interfaces. The default is `localhost:55679`. diff --git a/modules/otel-config-instrumentation.adoc b/modules/otel-config-instrumentation.adoc new file mode 100644 index 0000000000..4f94f83ec0 --- /dev/null +++ b/modules/otel-config-instrumentation.adoc @@ -0,0 +1,336 @@ +// Module included in the following assemblies: +// +// * otel/otel-instrumentation.adoc + +:_mod-docs-content-type: REFERENCE +[id="otel-instrumentation-config_{context}"] += OpenTelemetry instrumentation configuration options + +The {OTELName} can inject and configure the OpenTelemetry auto-instrumentation libraries into your workloads. Currently, the project supports injection of the instrumentation libraries from Go, Java, Node.js, Python, .NET, and the Apache HTTP Server (`httpd`). + +Auto-instrumentation in OpenTelemetry refers to the capability where the framework automatically instruments an application without manual code changes. This enables developers and administrators to get observability into their applications with minimal effort and changes to the existing codebase. + +[IMPORTANT] +==== +The {OTELName} Operator only supports the injection mechanism of the instrumentation libraries but does not support instrumentation libraries or upstream images. Customers can build their own instrumentation images or use community images. +==== + +== Instrumentation options + +Instrumentation options are specified in the `OpenTelemetryCollector` custom resource. + +.Sample `OpenTelemetryCollector` custom resource file +[source,yaml] +---- +apiVersion: opentelemetry.io/v1alpha1 +kind: Instrumentation +metadata: + name: java-instrumentation +spec: + env: + - name: OTEL_EXPORTER_OTLP_TIMEOUT + value: "20" + exporter: + endpoint: http://production-collector.observability.svc.cluster.local:4317 + propagators: + - w3c + sampler: + type: parentbased_traceidratio + argument: "0.25" + java: + env: + - name: OTEL_JAVAAGENT_DEBUG + value: "true" +---- + +//[cols=",,",options="header",] + +.Parameters used by the Operator to define the Instrumentation +[options="header"] +[cols="l, a, a"] +|=== +|Parameter |Description |Values + +|env +|Common environment variables to define across all the instrumentations. +| + +|exporter +|Exporter configuration. +| + +|propagators +|Propagators defines inter-process context propagation configuration. +|`tracecontext`, `baggage`, `b3`, `b3multi`, `jaeger`, `ottrace`, `none` + +|resource +|Resource attributes configuration. +| + +|sampler +|Sampling configuration. +| + +|apacheHttpd +|Configuration for the Apache HTTP Server instrumentation. +| + +|dotnet +|Configuration for the .NET instrumentation. +| + +|go +|Configuration for the Go instrumentation. +| + +|java +|Configuration for the Java instrumentation. +| + +|nodejs +|Configuration for the Node.js instrumentation. +| + +|python +|Configuration for the Python instrumentation. +| + +|=== + +== Using the instrumentation CR with Service Mesh + +When using the instrumentation custom resource (CR) with {SMProductName}, you must use the `b3multi` propagator. + +=== Configuration of the Apache HTTP Server auto-instrumentation + +.Prameters for the `+.spec.apacheHttpd+` field +[options="header"] +[cols="l, a, a"] +|=== +|Name |Description |Default + +|attrs +|Attributes specific to the Apache HTTP Server. +| + +|configPath +|Location of the Apache HTTP Server configuration. +|/usr/local/apache2/conf + +|env +|Environment variables specific to the Apache HTTP Server. +| + +|image +|Container image with the Apache SDK and auto-instrumentation. +| + +|resourceRequirements +|The compute resource requirements. +| + +|version +|Apache HTTP Server version. +|2.4 + +|=== + +.The `PodSpec` annotation to enable injection +[source,yaml] +---- +instrumentation.opentelemetry.io/inject-apache-httpd: "true" +---- + +=== Configuration of the .NET auto-instrumentation + +[options="header"] +[cols="l, a"] +|=== +|Name |Description + +|env +|Environment variables specific to .NET. + +|image +|Container image with the .NET SDK and auto-instrumentation. + +|resourceRequirements +|The compute resource requirements. + +|=== + +For the .NET auto-instrumentation, the required `OTEL_EXPORTER_OTLP_ENDPOINT` environment variable must be set if the endpoint of the exporters is set to `4317`. The .NET autoinstrumentation uses `http/proto` by default, and the telemetry data must be set to the `4318` port. + +.The `PodSpec` annotation to enable injection +[source,yaml] +---- +instrumentation.opentelemetry.io/inject-dotnet: "true" +---- + +=== Configuration of the Go auto-instrumentation + +[options="header"] +[cols="l, a"] +|=== +|Name |Description + +|env +|Environment variables specific to Go. + +|image +|Container image with the Go SDK and auto-instrumentation. + +|resourceRequirements +|The compute resource requirements. + +|=== + +.The `PodSpec` annotation to enable injection +[source,yaml] +---- +instrumentation.opentelemetry.io/inject-go: "true" +---- + +.Additional permissions required for the Go auto-instrumentation in the OpenShift cluster +[source,yaml] +---- +apiVersion: security.openshift.io/v1 +kind: SecurityContextConstraints +metadata: + name: otel-go-instrumentation-scc +allowHostDirVolumePlugin: true +allowPrivilegeEscalation: true +allowPrivilegedContainer: true +allowedCapabilities: +- "SYS_PTRACE" +fsGroup: + type: RunAsAny +runAsUser: + type: RunAsAny +seLinuxContext: + type: RunAsAny +seccompProfiles: +- '*' +supplementalGroups: + type: RunAsAny +---- + +[TIP] +==== +The CLI command for applying the permissions for the Go auto-instrumentation in the OpenShift cluster is as follows: +[source,terminal] +---- +$ oc adm policy add-scc-to-user otel-go-instrumentation-scc -z +---- +==== + +=== Configuration of the Java auto-instrumentation + +[options="header"] +[cols="l, a"] +|=== +|Name |Description + +|env +|Environment variables specific to Java. + +|image +|Container image with the Java SDK and auto-instrumentation. + +|resourceRequirements +|The compute resource requirements. + +|=== + +.The `PodSpec` annotation to enable injection +[source,yaml] +---- +instrumentation.opentelemetry.io/inject-java: "true" +---- + +=== Configuration of the Node.js auto-instrumentation + +[options="header"] +[cols="l, a"] +|=== +|Name |Description + +|env +|Environment variables specific to Node.js. + +|image +|Container image with the Node.js SDK and auto-instrumentation. + +|resourceRequirements +|The compute resource requirements. + +|=== + +.The `PodSpec` annotations to enable injection +[source,yaml] +---- +instrumentation.opentelemetry.io/inject-nodejs: "true" +instrumentation.opentelemetry.io/otel-go-auto-target-exe: "/path/to/container/executable" +---- + +The `+instrumentation.opentelemetry.io/otel-go-auto-target-exe+` annotation sets the value for the required `OTEL_GO_AUTO_TARGET_EXE` environment variable. + +=== Configuration of the Python auto-instrumentation + +[options="header"] +[cols="l, a"] +|=== +|Name |Description + +|env +|Environment variables specific to Python. + +|image +|Container image with the Python SDK and auto-instrumentation. + +|resourceRequirements +|The compute resource requirements. + +|=== + +For Python auto-instrumentation, the `OTEL_EXPORTER_OTLP_ENDPOINT` environment variable must be set if the endpoint of the exporters is set to `4317`. Python auto-instrumentation uses `http/proto` by default, and the telemetry data must be set to the `4318` port. + +.The `PodSpec` annotation to enable injection +[source,yaml] +---- +instrumentation.opentelemetry.io/inject-python: "true" +---- + +=== Configuration of the OpenTelemetry SDK variables + +The OpenTelemetry SDK variables in your pod are configurable by using the following annotation: + +[source,yaml] +---- +instrumentation.opentelemetry.io/inject-sdk: "true" +---- + +Note that all the annotations accept the following values: + +`true`:: Injects the `+Instrumentation+` resource from the namespace. + +`false`:: Does not inject any instrumentation. + +`instrumentation-name`:: The name of the instrumentation resource to inject from the current namespace. + +`other-namespace/instrumentation-name`:: The name of the instrumentation resource to inject from another namespace. + +=== Multi-container pods + +The instrumentation is run on the first container that is available by default according to the pod specification. In some cases, you can also specify target containers for injection. + +.Pod annotation +[source,yaml] +---- +instrumentation.opentelemetry.io/container-names: "," +---- + +[NOTE] +==== +The Go auto-instrumentation does not support multi-container auto-instrumentation injection. +==== diff --git a/modules/otel-config-multicluster.adoc b/modules/otel-config-multicluster.adoc new file mode 100644 index 0000000000..2ad215b125 --- /dev/null +++ b/modules/otel-config-multicluster.adoc @@ -0,0 +1,160 @@ +// Module included in the following assemblies: +// +// * otel/otel-configuring.adoc + +:_mod-docs-content-type: PROCEDURE +[id="gathering-observability-data-from-different-clusters_{context}"] += Gathering the observability data from different clusters with the OpenTelemetry Collector + +For a multicluster configuration, you can create one OpenTelemetry +Collector instance in each one of the remote clusters and forward all the telemetry +data to one OpenTelemetry Collector instance. + +.Prerequisites + +* The {OTELOperator} is installed. +* The {TempoOperator} is installed. +* A TempoStack is deployed on the cluster. + +.Procedure + +. Create a service account for the OpenTelemetry Collector. ++ +.Example ServiceAccount +[source,yaml] +---- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: otel-collector-deployment +---- + +. Create a cluster role for the service account. ++ +.Example ClusterRole +[source,yaml] +---- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: otel-collector +rules: + # <1> + # <2> +- apiGroups: ["", "config.openshift.io"] + resources: ["pods", "namespaces", "infrastructures", "infrastructures/status"] + verbs: ["get", "watch", "list"] +---- +<1> The `k8sattributesprocessor` requires permissions for pods and namespace resources. +<2> The `resourcedetectionprocessor` requires permissions for infrastructures and status. + +. Bind the cluster role to the service account. ++ +.Example ClusterRoleBinding +[source,yaml] +---- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: otel-collector +subjects: +- kind: ServiceAccount + name: otel-collector-deployment + namespace: otel-collector- +roleRef: + kind: ClusterRole + name: otel-collector + apiGroup: rbac.authorization.k8s.io +---- + +. Create the YAML file to define the `OpenTelemetryCollector` custom resource (CR) in the edge clusters. ++ +.Example `OpenTelemetryCollector` custom resource for the edge clusters +[source,yaml] +---- +apiVersion: opentelemetry.io/v1alpha1 +kind: OpenTelemetryCollector +metadata: + name: otel + namespace: otel-collector- +spec: + mode: daemonset + serviceAccount: otel-collector-deployment + config: | + receivers: + jaeger: + protocols: + grpc: + thrift_binary: + thrift_compact: + thrift_http: + opencensus: + otlp: + protocols: + grpc: + http: + zipkin: + processors: + batch: + k8sattributes: + memory_limiter: + check_interval: 1s + limit_percentage: 50 + spike_limit_percentage: 30 + resourcedetection: + detectors: [openshift] + exporters: + otlphttp: + endpoint: https://observability-cluster.com:443 # <1> + insecure: false + compression: on + tls: + cert_file: "/path/to/server-cert.pem" + key_file: "/path/to/server-key.pem" + client_ca_file: "/path/to/client-ca.pem" + + service: + pipelines: + traces: + receivers: [jaeger, opencensus, otlp, zipkin] + processors: [memory_limiter, k8sattributes, resourcedetection, batch] + exporters: [otlp] +---- +<1> The Collector exporter is configured to export OTLP HTTP and points to the OpenTelemetry Collector from the central cluster. + +. Create the YAML file to define the `OpenTelemetryCollector` custom resource (CR) in the central cluster. ++ +.Example `OpenTelemetryCollector` custom resource for the central cluster +[source,yaml] +---- +apiVersion: opentelemetry.io/v1alpha1 +kind: OpenTelemetryCollector +metadata: + name: otlp-receiver + namespace: observability +spec: + mode: "deployment" + ingress: + type: route + hostname: "observability-cluster.com" + route: + termination: "edge" + config: | + receivers: + otlp: + protocols: + http: + exporters: + logging: + otlp: + endpoint: "tempo--distributor:4317" # <1> + tls: + insecure: true + service: + pipelines: + traces: + receivers: [otlp] + processors: [] + exporters: [otlp] +---- +<1> The Collector exporter is configured to export OTLP and points to the Tempo distributor endpoint, which in this example is `"tempo-simplest-distributor:4317"` and already created. diff --git a/modules/otel-config-send-metrics-monitoring-stack.adoc b/modules/otel-config-send-metrics-monitoring-stack.adoc new file mode 100644 index 0000000000..5671d0f695 --- /dev/null +++ b/modules/otel-config-send-metrics-monitoring-stack.adoc @@ -0,0 +1,67 @@ +// Module included in the following assemblies: +// +// * otel/deploying-otel.adoc + +:_mod-docs-content-type: REFERENCE +[id="configuration-for-sending-metrics-to-the-monitoring-stack_{context}"] += Configuration for sending metrics to the monitoring stack + +The OpenTelemetry Collector custom resource (CR) can be configured to create a Prometheus `ServiceMonitor` CR for scraping the Collector's pipeline metrics and the enabled Prometheus exporters. + +.Example of the OpenTelemetry Collector custom resource with the Prometheus exporter +[source,yaml] +---- +spec: + mode: deployment + observability: + metrics: + enableMetrics: true # <1> + config: | + exporters: + prometheus: + endpoint: 0.0.0.0:8889 + resource_to_telemetry_conversion: + enabled: true # by default resource attributes are dropped + service: + telemetry: + metrics: + address: ":8888" + pipelines: + metrics: + receivers: [otlp] + exporters: [prometheus] +---- +<1> Configures the operator to create the Prometheus `ServiceMonitor` CR to scrape the collector's internal metrics endpoint and Prometheus exporter metric endpoints. The metrics will be stored in the OpenShift monitoring stack. + +Alternatively, a manually created Prometheus `PodMonitor` can provide fine control, for example removing duplicated labels added during Prometheus scraping. + +.Example of the `PodMonitor` custom resource that configures the monitoring stack to scrape the Collector metrics +[source,yaml] +---- +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: otel-collector +spec: + selector: + matchLabels: + app.kubernetes.io/name: `-collector` # <1> + podMetricsEndpoints: + - port: metrics # <2> + - port: promexporter # <3> + relabelings: + - action: labeldrop + regex: pod + - action: labeldrop + regex: container + - action: labeldrop + regex: endpoint + metricRelabelings: + - action: labeldrop + regex: instance + - action: labeldrop + regex: job +---- +<1> The name of the OpenTelemetry Collector custom resource. +<2> The name of the internal metrics port for the OpenTelemetry Collector. This port name is always `metrics`. +<3> The name of the Prometheus exporter port for the OpenTelemetry Collector. diff --git a/modules/otel-configuring-otelcol-metrics.adoc b/modules/otel-configuring-otelcol-metrics.adoc new file mode 100644 index 0000000000..8667def29f --- /dev/null +++ b/modules/otel-configuring-otelcol-metrics.adoc @@ -0,0 +1,35 @@ +// Module included in the following assemblies: +// +// * otel/otel-configuring.adoc + +:_mod-docs-content-type: PROCEDURE +[id="configuring-otelcol-metrics_{context}"] += Configuring the OpenTelemetry Collector metrics + +You can enable metrics and alerts of OpenTelemetry Collector instances. + +.Prerequisites + +* Monitoring for user-defined projects is enabled in the cluster. + +.Procedure + +* To enable metrics of a OpenTelemetry Collector instance, set the `spec.observability.metrics.enableMetrics` field to `true`: ++ +[source,yaml] +---- +apiVersion: opentelemetry.io/v1alpha1 +kind: OpenTelemetryCollector +metadata: + name: +spec: + observability: + metrics: + enableMetrics: true +---- + +.Verification + +You can use the *Administrator* view of the web console to verify successful configuration: + +* Go to *Observe* -> *Targets*, filter by *Source: User*, and check that the *ServiceMonitors* in the `opentelemetry-collector-` format have the *Up* status. diff --git a/modules/distr-tracing-otel-forwarding.adoc b/modules/otel-forwarding.adoc similarity index 94% rename from modules/distr-tracing-otel-forwarding.adoc rename to modules/otel-forwarding.adoc index a4077bb91e..a106f5810d 100644 --- a/modules/distr-tracing-otel-forwarding.adoc +++ b/modules/otel-forwarding.adoc @@ -1,9 +1,9 @@ // Module included in the following assemblies: // -// * /distr_tracing/distr_tracing_otel/distr-tracing-otel-using.adoc +// * otel/otel-using.adoc :_mod-docs-content-type: PROCEDURE -[id="distr-tracing-otel-forwarding_{context}"] +[id="forwarding-traces_{context}"] = Forwarding traces to a TempoStack by using the OpenTelemetry Collector To configure forwarding traces to a TempoStack, you can deploy and configure the OpenTelemetry Collector. You can deploy the OpenTelemetry Collector in the deployment mode by using the specified processors, receivers, and exporters. For other modes, see the OpenTelemetry Collector documentation linked in _Additional resources_. @@ -37,8 +37,8 @@ kind: ClusterRole metadata: name: otel-collector rules: - <1> - <2> + # <1> + # <2> - apiGroups: ["", "config.openshift.io"] resources: ["pods", "namespaces", "infrastructures", "infrastructures/status"] verbs: ["get", "watch", "list"] @@ -102,13 +102,13 @@ spec: detectors: [openshift] exporters: otlp: - endpoint: "tempo-simplest-distributor:4317" <1> + endpoint: "tempo-simplest-distributor:4317" # <1> tls: insecure: true service: pipelines: traces: - receivers: [jaeger, opencensus, otlp, zipkin] <2> + receivers: [jaeger, opencensus, otlp, zipkin] # <2> processors: [memory_limiter, k8sattributes, resourcedetection, batch] exporters: [otlp] ---- diff --git a/modules/otel-install-cli.adoc b/modules/otel-install-cli.adoc new file mode 100644 index 0000000000..79d6ad5c23 --- /dev/null +++ b/modules/otel-install-cli.adoc @@ -0,0 +1,179 @@ +// Module included in the following assemblies: +// +//* otel/otel-installing.adoc + +:_content-type: PROCEDURE +[id="installing-otel-by-using-the-cli_{context}"] += Installing the {OTELShortName} by using the CLI + +You can install the {OTELShortName} from the command line. + +.Prerequisites + +* An active {oc-first} session by a cluster administrator with the `cluster-admin` role. ++ +[TIP] +==== +* Ensure that your {oc-first} version is up to date and matches your {product-title} version. + +* Run `oc login`: ++ +[source,terminal] +---- +$ oc login --username= +---- +==== + +.Procedure + +. Install the {OTELOperator}: + +.. Create a project for the {OTELOperator} by running the following command: ++ +[source,terminal] +---- +$ oc apply -f - << EOF +apiVersion: project.openshift.io/v1 +kind: Project +metadata: + labels: + kubernetes.io/metadata.name: openshift-opentelemetry-operator + openshift.io/cluster-monitoring: "true" + name: openshift-opentelemetry-operator +EOF +---- + +.. Create an Operator group by running the following command: ++ +[source,terminal] +---- +$ oc apply -f - << EOF +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: openshift-opentelemetry-operator + namespace: openshift-opentelemetry-operator +spec: + upgradeStrategy: Default +EOF +---- + +.. Create a subscription by running the following command: ++ +[source,terminal] +---- +$ oc apply -f - << EOF +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: opentelemetry-product + namespace: openshift-opentelemetry-operator +spec: + channel: stable + installPlanApproval: Automatic + name: opentelemetry-product + source: redhat-operators + sourceNamespace: openshift-marketplace +EOF +---- + +.. Check the Operator status by running the following command: ++ +[source,terminal] +---- +$ oc get csv -n openshift-opentelemetry-operator +---- + +. Create a project of your choice for the OpenTelemetry Collector instance that you will create in a subsequent step: + +** To create a project without metadata, run the following command: ++ +[source,terminal] +---- +$ oc new-project +---- + +** To create a project with metadata, run the following command: ++ +[source,terminal] +---- +$ oc apply -f - << EOF +apiVersion: project.openshift.io/v1 +kind: Project +metadata: + name: +EOF +---- + +. Create an OpenTelemetry Collector instance in the project that you created for it. ++ +[NOTE] +==== +You can create multiple OpenTelemetry Collector instances in separate projects on the same cluster. +==== ++ +.. Customize the `OpenTelemetry Collector` custom resource (CR) with the OTLP, Jaeger, and Zipkin receivers and the debug exporter: ++ +[source,yaml] +---- +apiVersion: opentelemetry.io/v1alpha1 +kind: OpenTelemetryCollector +metadata: + name: otel + namespace: +spec: + mode: deployment + config: | + receivers: + otlp: + protocols: + grpc: + http: + jaeger: + protocols: + grpc: + thrift_binary: + thrift_compact: + thrift_http: + zipkin: + processors: + batch: + memory_limiter: + check_interval: 1s + limit_percentage: 50 + spike_limit_percentage: 30 + exporters: + debug: + service: + pipelines: + traces: + receivers: [otlp,jaeger,zipkin] + processors: [memory_limiter,batch] + exporters: [debug] +---- + +.. Apply the customized CR by running the following command: ++ +[source,terminal] +---- +$ oc apply -f - << EOF + +EOF +---- + + +.Verification + +. Verify that the `status.phase` of the OpenTelemetry Collector pod is `Running` and the `conditions` are `type: Ready` by running the following command: ++ +[source,terminal] +---- +$ oc get pod -l app.kubernetes.io/managed-by=opentelemetry-operator,app.kubernetes.io/instance=. -o yaml +---- + +. Get the OpenTelemetry Collector service by running the following command: ++ +[source,terminal] +---- +$ oc get service -l app.kubernetes.io/managed-by=opentelemetry-operator,app.kubernetes.io/instance=. +---- diff --git a/modules/distr-tracing-otel-install-web-console.adoc b/modules/otel-install-web-console.adoc similarity index 63% rename from modules/distr-tracing-otel-install-web-console.adoc rename to modules/otel-install-web-console.adoc index ad71553123..f6654ac688 100644 --- a/modules/distr-tracing-otel-install-web-console.adoc +++ b/modules/otel-install-web-console.adoc @@ -1,10 +1,9 @@ -//// -This module included in the following assemblies: -- distr_tracing_otel/distr-tracing-otel-installing.adoc -//// +// Module included in the following assemblies: +// +// * otel/otel-installing.adoc :_mod-docs-content-type: PROCEDURE -[id="distr-tracing-install-otel-operator_{context}"] +[id="installing-otel-by-using-the-web-console_{context}"] = Installing the {OTELShortName} from the web console You can install the {OTELShortName} from the *Administrator* view of the web console. @@ -15,20 +14,6 @@ You can install the {OTELShortName} from the *Administrator* view of the web con * For {product-dedicated}, you must be logged in using an account with the `dedicated-admin` role. -* An active {oc-first} session by a cluster administrator with the `cluster-admin` role. -+ -[TIP] -==== -* Ensure that your {oc-first} version is up to date and matches your {product-title} version. - -* Run `oc login`: -+ -[source,terminal] ----- -$ oc login --username= ----- -==== - .Procedure . Install the {OTELOperator}: @@ -55,9 +40,9 @@ This installs the Operator with the default presets: .. Go to *Operators* -> *Installed Operators*. -.. Select *OpenTelemetry Collector* -> *Create OpenTelemetryCollector* -> *YAML view*. +.. Select *OpenTelemetry Collector* -> *Create OpenTelemetry Collector* -> *YAML view*. -.. In the *YAML view*, customize the `OpenTelemetryCollector` custom resource (CR) with the OTLP, Jaeger, Zipkin receiver, and logging exporter. +.. In the *YAML view*, customize the `OpenTelemetryCollector` custom resource (CR) with the OTLP, Jaeger, Zipkin receivers and the debug exporter. + [source,yaml] ---- @@ -88,29 +73,21 @@ spec: limit_percentage: 50 spike_limit_percentage: 30 exporters: - logging: + debug: service: pipelines: traces: receivers: [otlp,jaeger,zipkin] processors: [memory_limiter,batch] - exporters: [logging] + exporters: [debug] ---- .. Select *Create*. .Verification -. Verify that the `status.phase` of the OpenTelemetry Collector pod is `Running` and the `conditions` are `type: Ready` by running the following command: -+ -[source,terminal] ----- -$ oc get pod -l app.kubernetes.io/managed-by=opentelemetry-operator,app.kubernetes.io/instance=. -o yaml ----- +. Use the *Project:* dropdown list to select the project of the *OpenTelemetry Collector* instance. -. Get the OpenTelemetry Collector service by running the following command: -+ -[source,terminal] ----- -$ oc get service -l app.kubernetes.io/managed-by=opentelemetry-operator,app.kubernetes.io/instance=. ----- +. Go to *Operators* -> *Installed Operators* to verify that the *Status* of the *OpenTelemetry Collector* instance is *Condition: Ready*. + +. Go to *Workloads* -> *Pods* to verify that all the component pods of the *OpenTelemetry Collector* instance are running. diff --git a/modules/distr-tracing-otel-migrating-from-jaeger-with-sidecars.adoc b/modules/otel-migrating-from-jaeger-with-sidecars.adoc similarity index 94% rename from modules/distr-tracing-otel-migrating-from-jaeger-with-sidecars.adoc rename to modules/otel-migrating-from-jaeger-with-sidecars.adoc index 5c390097c5..8900fe7f35 100644 --- a/modules/distr-tracing-otel-migrating-from-jaeger-with-sidecars.adoc +++ b/modules/otel-migrating-from-jaeger-with-sidecars.adoc @@ -1,9 +1,9 @@ // Module included in the following assemblies: // -// * distr-tracing-otel-migrating.adoc +// * otel-migrating.adoc :_mod-docs-content-type: PROCEDURE -[id="distr-tracing-otel-migrating-from-jaeger-with-sidecars_{context}"] +[id="migrating-to-otel-from-jaeger-with-sidecars_{context}"] = Migrating from the {JaegerShortName} to the {OTELShortName} with sidecars The {OTELShortName} Operator supports sidecar injection into deployment workloads, so you can migrate from a {JaegerShortName} sidecar to a {OTELShortName} sidecar. @@ -45,7 +45,7 @@ spec: timeout: 2s exporters: otlp: - endpoint: "tempo--gateway:8090" <1> + endpoint: "tempo--gateway:8090" # <1> tls: insecure: true service: @@ -76,7 +76,7 @@ kind: ClusterRole metadata: name: otel-collector-sidecar rules: - <1> + # <1> - apiGroups: ["config.openshift.io"] resources: ["infrastructures", "infrastructures/status"] verbs: ["get", "watch", "list"] diff --git a/modules/distr-tracing-otel-migrating-from-jaeger-without-sidecars.adoc b/modules/otel-migrating-from-jaeger-without-sidecars.adoc similarity index 90% rename from modules/distr-tracing-otel-migrating-from-jaeger-without-sidecars.adoc rename to modules/otel-migrating-from-jaeger-without-sidecars.adoc index 6411eb28fb..7f74f3d02f 100644 --- a/modules/distr-tracing-otel-migrating-from-jaeger-without-sidecars.adoc +++ b/modules/otel-migrating-from-jaeger-without-sidecars.adoc @@ -1,9 +1,9 @@ // Module included in the following assemblies: // -// * distr-tracing-otel-migrating.adoc +// * otel/otel-migrating.adoc :_mod-docs-content-type: PROCEDURE -[id="distr-tracing-otel-migrating-from-jaeger-without-sidecars_{context}"] +[id="migrating-to-otel-from-jaeger-without-sidecars_{context}"] = Migrating from the {JaegerShortName} to the {OTELShortName} without sidecars You can migrate from the {JaegerShortName} to the {OTELShortName} without sidecar deployment. @@ -47,8 +47,8 @@ kind: ClusterRole metadata: name: otel-collector rules: - <1> - <2> + # <1> + # <2> - apiGroups: ["", "config.openshift.io"] resources: ["pods", "namespaces", "infrastructures", "infrastructures/status"] verbs: ["get", "watch", "list"] @@ -76,7 +76,10 @@ roleRef: . Create the OpenTelemetry Collector instance. + -NOTE: This collector will export traces to a TempoStack instance. You must create your TempoStack instance by using the Red Hat Tempo Operator and place here the correct endpoint. +[NOTE] +==== +This collector will export traces to a TempoStack instance. You must create your TempoStack instance by using the Red Hat Tempo Operator and place here the correct endpoint. +==== + [source,yaml] ---- @@ -125,6 +128,6 @@ spec: .Example of exporting traces by using the `jaegerexporter` with Golang [source,golang] ---- -exp, err := jaeger.New(jaeger.WithCollectorEndpoint(jaeger.WithEndpoint(url))) <1> +exp, err := jaeger.New(jaeger.WithCollectorEndpoint(jaeger.WithEndpoint(url))) # <1> ---- <1> The URL points to the OpenTelemetry Collector API endpoint. diff --git a/modules/distr-tracing-otel-remove-cli.adoc b/modules/otel-remove-cli.adoc similarity index 61% rename from modules/distr-tracing-otel-remove-cli.adoc rename to modules/otel-remove-cli.adoc index 64ae5e873a..0da6916b8a 100644 --- a/modules/distr-tracing-otel-remove-cli.adoc +++ b/modules/otel-remove-cli.adoc @@ -1,12 +1,12 @@ //Module included in the following assemblies: // -//* distr_tracing_install/dist-tracing-otel-removing.adoc +// * otel/otel-removing.adoc :_mod-docs-content-type: PROCEDURE -[id="distr-tracing-removing-otel-instance-cli_{context}"] -= Removing a {OTELShortName} instance by using the CLI +[id="removing-otel-instance-cli_{context}"] += Removing an OpenTelemetry Collector instance by using the CLI -You can remove a {OTELShortName} instance on the command line. +You can remove an OpenTelemetry Collector instance on the command line. .Prerequisites @@ -26,14 +26,14 @@ $ oc login --username= .Procedure -. Get the name of the {OTELShortName} instance by running the following command: +. Get the name of the OpenTelemetry Collector instance by running the following command: + [source,terminal] ---- $ oc get deployments -n ---- -. Remove the {OTELShortName} instance by running the following command: +. Remove the OpenTelemetry Collector instance by running the following command: + [source,terminal] ---- @@ -44,7 +44,7 @@ $ oc delete opentelemetrycollectors -n -gateway:8090" <1> + endpoint: "tempo--gateway:8090" # <1> tls: insecure: true service: @@ -116,4 +116,4 @@ spec: . Create your deployment using the `otel-collector-sidecar` service account. -. Add the `sidecar.opentelemetry.io/inject: "true"` annotation to your `Deployment` object. This will inject all the needed environment variables to send data from your workloads to the OpenTelemetryCollector instance. +. Add the `sidecar.opentelemetry.io/inject: "true"` annotation to your `Deployment` object. This will inject all the needed environment variables to send data from your workloads to the OpenTelemetry Collector instance. diff --git a/modules/distr-tracing-otel-send-traces-and-metrics-to-otel-collector-without-sidecar.adoc b/modules/otel-send-traces-and-metrics-to-otel-collector-without-sidecar.adoc similarity index 77% rename from modules/distr-tracing-otel-send-traces-and-metrics-to-otel-collector-without-sidecar.adoc rename to modules/otel-send-traces-and-metrics-to-otel-collector-without-sidecar.adoc index 1cfd69affd..1f27c913b1 100644 --- a/modules/distr-tracing-otel-send-traces-and-metrics-to-otel-collector-without-sidecar.adoc +++ b/modules/otel-send-traces-and-metrics-to-otel-collector-without-sidecar.adoc @@ -1,16 +1,16 @@ // Module included in the following assemblies: // -// * /distr_tracing/distr_tracing_otel/distr-tracing-otel-using.adoc +// * otel/otel-using.adoc :_mod-docs-content-type: PROCEDURE -[id="distr-tracing-otel-send-traces-and-metrics-to-otel-collector-without-sidecar_{context}"] +[id="sending-traces-and-metrics-to-otel-collector-without-sidecar_{context}"] = Sending traces and metrics to the OpenTelemetry Collector without sidecar injection -You can set up sending telemetry data to an OpenTelemetryCollector instance without sidecar injection, which involves manually setting several environment variables. +You can set up sending telemetry data to an OpenTelemetry Collector instance without sidecar injection, which involves manually setting several environment variables. .Prerequisites -* The {TempoName} is installed and a TempoStack instance is deployed. +* The {TempoName} is installed, and a TempoStack instance is deployed. * You have access to the cluster through the web console or the {oc-first}: @@ -22,7 +22,7 @@ You can set up sending telemetry data to an OpenTelemetryCollector instance with .Procedure -. Create a project for the OpenTelemtry Collector. +. Create a project for an OpenTelemetry Collector instance. + [source,yaml] ---- @@ -43,7 +43,7 @@ metadata: namespace: observability ---- -. Grant permissions to the service account for the `k8sattributes` and `resourcedetection` processors. +. Grant the permissions to the service account for the `k8sattributes` and `resourcedetection` processors. + [source,yaml] ---- @@ -70,7 +70,7 @@ roleRef: apiGroup: rbac.authorization.k8s.io ---- -. Deploy the OpenTelemetryCollector instance. +. Deploy the OpenTelemetry Collector instance with the `OpenTelemetryCollector` custom resource. + [source,yaml] ---- @@ -107,7 +107,7 @@ spec: detectors: [openshift] exporters: otlp: - endpoint: "tempo--distributor:4317" <1> + endpoint: "tempo--distributor:4317" # <1> tls: insecure: true service: @@ -119,7 +119,7 @@ spec: ---- <1> This points to the Gateway of the TempoStack instance deployed by using the `` {TempoOperator}. -. Set the following environment variables in the container with your instrumented application: +. Set the environment variables in the container with your instrumented application. + [options="header"] [cols="l, a, a"] @@ -146,10 +146,10 @@ spec: |`grpc` |OTEL_EXPORTER_OTLP_TIMEOUT -|Maximum time the OTLP exporter will wait for each batch export. +|Maximum time interval for the OTLP exporter to wait for each batch export. |`10s` |OTEL_EXPORTER_OTLP_INSECURE -|Disables client transport security for gRPC requests; an HTTPS schema overrides it. +|Disables client transport security for gRPC requests. An HTTPS schema overrides it. |`False` |=== diff --git a/modules/otel-troubleshoot-collector-logs.adoc b/modules/otel-troubleshoot-collector-logs.adoc new file mode 100644 index 0000000000..ff5a8741fa --- /dev/null +++ b/modules/otel-troubleshoot-collector-logs.adoc @@ -0,0 +1,25 @@ +// Module included in the following assemblies: +// +// * otel/otel-troubleshooting.adoc + +:_mod-docs-content-type: PROCEDURE +[id="getting-otel-collector-logs_{context}"] += Getting the OpenTelemetry Collector logs + +You can get the logs for the OpenTelemetry Collector as follows. + +.Procedure + +. Set the relevant log level in the `OpenTelemetryCollector` custom resource (CR): ++ +[source,yaml] +---- + config: | + service: + telemetry: + logs: + level: debug # <1> +---- +<1> Collector's log level. Supported values include `info`, `warn`, `error`, or `debug`. Defaults to `info`. + +. Use the `oc logs` command or the web console to retrieve the logs. diff --git a/modules/otel-troubleshoot-logging-exporter-stdout.adoc b/modules/otel-troubleshoot-logging-exporter-stdout.adoc new file mode 100644 index 0000000000..ca2b004675 --- /dev/null +++ b/modules/otel-troubleshoot-logging-exporter-stdout.adoc @@ -0,0 +1,31 @@ +// Module included in the following assemblies: +// +// * otel/otel-troubleshooting.adoc + +:_mod-docs-content-type: PROCEDURE +[id="debug-exporter-to-stdout_{context}"] += Debug exporter + +You can configure the debug exporter to export the collected data to the standard output. + +.Procedure + +. Configure the `OpenTelemetryCollector` custom resource as follows: ++ +[source,yaml] +---- + config: | + exporters: + debug: + verbosity: detailed + service: + pipelines: + traces: + exporters: [debug] + metrics: + exporters: [debug] + logs: + exporters: [debug] +---- + +. Use the `oc logs` command or the web console to export the logs to the standard output. diff --git a/modules/distr-tracing-otel-troubleshoot-metrics.adoc b/modules/otel-troubleshoot-metrics.adoc similarity index 71% rename from modules/distr-tracing-otel-troubleshoot-metrics.adoc rename to modules/otel-troubleshoot-metrics.adoc index 49a53633e1..6001d0e812 100644 --- a/modules/distr-tracing-otel-troubleshoot-metrics.adoc +++ b/modules/otel-troubleshoot-metrics.adoc @@ -1,22 +1,25 @@ // Module included in the following assemblies: // -// * /distr_tracing/distr_tracing_otel/distr-tracing-otel-troubleshooting.adoc +// * otel/otel-troubleshooting.adoc + :_mod-docs-content-type: PROCEDURE -[id="distr-tracing-otel-troubleshoot-metrics_{context}"] +[id="exposing-metrics_{context}"] = Exposing the metrics The OpenTelemetry Collector exposes the metrics about the data volumes it has processed. The following metrics are for spans, although similar metrics are exposed for metrics and logs signals: `otelcol_receiver_accepted_spans`:: The number of spans successfully pushed into the pipeline. + `otelcol_receiver_refused_spans`:: The number of spans that could not be pushed into the pipeline. -`otelcol_exporter_sent_spans`:: The number of spans successfully sent to destination. +`otelcol_exporter_sent_spans`:: The number of spans successfully sent to the destination. + `otelcol_exporter_enqueue_failed_spans`:: The number of spans failed to be added to the sending queue. -The operator creates a `-collector-monitoring` telemetry service that you can use to scrape the metrics endpoint. +The operator creates a `-collector-monitoring` telemetry service that you can use to scrape the metrics endpoint. .Procedure -. Enable the telemetry service by adding the following lines in the OpenTelemetry Collector custom resource: +. Enable the telemetry service by adding the following lines in the `OpenTelemetryCollector` custom resource: + [source,yaml] @@ -25,16 +28,17 @@ The operator creates a `-collector-monitoring` telemetry service that y service: telemetry: metrics: - address: ":8888" <1> + address: ":8888" # <1> ---- -<1> The address on which internal collector metrics are exposed. Defaults to `:8888`. +<1> The address at which the internal collector metrics are exposed. Defaults to `:8888`. + // TODO Operator 0.82.0 has spec.observability.metrics.enableMetrics config that creates ServiceMonitors for users -. Retrieve the metrics by running the following command, which uses the port forwarding collector pod: +. Retrieve the metrics by running the following command, which uses the port-forwarding Collector pod: + [source,terminal] ---- -$ oc port-forward +$ oc port-forward ---- . Access the metrics endpoint at `+http://localhost:8888/metrics+`. diff --git a/modules/overriding-default-node-ip-selection-logic.adoc b/modules/overriding-default-node-ip-selection-logic.adoc index 254eb67af9..de0085e275 100644 --- a/modules/overriding-default-node-ip-selection-logic.adoc +++ b/modules/overriding-default-node-ip-selection-logic.adoc @@ -61,7 +61,7 @@ spec: storage: files: - contents: - source: data:text/plain;charset=utf-8;base64, <1> + source: data:text/plain;charset=utf-8;base64, # <1> mode: 0644 overwrite: true path: /etc/default/nodeip-configuration @@ -85,7 +85,7 @@ spec: storage: files: - contents: - source: data:text/plain;charset=utf-8;base64, <1> + source: data:text/plain;charset=utf-8;base64, # <1> mode: 0644 overwrite: true path: /etc/default/nodeip-configuration diff --git a/modules/support.adoc b/modules/support.adoc index 8fe569b6bc..956074d0c0 100644 --- a/modules/support.adoc +++ b/modules/support.adoc @@ -17,6 +17,7 @@ // * distr_tracing/distr_tracing_rn/distr-tracing-rn-2-7.adoc // * distr_tracing/distr_tracing_rn/distr-tracing-rn-2-8.adoc // * distr_tracing/distr_tracing_rn/distr-tracing-rn-2-9.adoc +// * distr_tracing/distr_tracing_rn/distr-tracing-rn-3-0.adoc // * microshift_support/microshift-getting-support.adoc [id="support_{context}"] diff --git a/otel/_attributes b/otel/_attributes new file mode 120000 index 0000000000..f27fd275ea --- /dev/null +++ b/otel/_attributes @@ -0,0 +1 @@ +../_attributes/ \ No newline at end of file diff --git a/otel/images b/otel/images new file mode 120000 index 0000000000..e4c5bd02a1 --- /dev/null +++ b/otel/images @@ -0,0 +1 @@ +../images/ \ No newline at end of file diff --git a/otel/modules b/otel/modules new file mode 120000 index 0000000000..43aab75b53 --- /dev/null +++ b/otel/modules @@ -0,0 +1 @@ +../modules/ \ No newline at end of file diff --git a/otel/otel-configuring.adoc b/otel/otel-configuring.adoc new file mode 100644 index 0000000000..8350694ba3 --- /dev/null +++ b/otel/otel-configuring.adoc @@ -0,0 +1,27 @@ +:_mod-docs-content-type: ASSEMBLY +[id="otel-configuring"] += Configuring and deploying the {OTELShortName} +include::_attributes/common-attributes.adoc[] +:context: otel-configuring + +toc::[] + +The {OTELName} Operator uses a custom resource definition (CRD) file that defines the architecture and configuration settings to be used when creating and deploying the {OTELShortName} resources. You can install the default configuration or modify the file. + +include::modules/otel-config-collector.adoc[leveloffset=+1] +include::modules/otel-config-multicluster.adoc[leveloffset=+1] +include::modules/otel-config-send-metrics-monitoring-stack.adoc[leveloffset=+1] + +[id="setting-up-monitoring-for-otel"] +== Setting up monitoring for the {OTELShortName} + +The {OTELOperator} supports monitoring and alerting of each OpenTelemtry Collector instance and exposes upgrade and operational metrics about the Operator itself. + +include::modules/otel-configuring-otelcol-metrics.adoc[leveloffset=+2] + +// modules/otel-configuring-oteloperator-metrics.adoc[leveloffset=+2] + +[role="_additional-resources"] +[id="additional-resources_deploy-otel"] +== Additional resources +* xref:../monitoring/enabling-monitoring-for-user-defined-projects.adoc#enabling-monitoring-for-user-defined-projects[Enabling monitoring for user-defined projects] diff --git a/otel/otel-installing.adoc b/otel/otel-installing.adoc new file mode 100644 index 0000000000..a0ba70096a --- /dev/null +++ b/otel/otel-installing.adoc @@ -0,0 +1,27 @@ +:_mod-docs-content-type: ASSEMBLY +[id="install-otel"] += Installing the {OTELShortName} +include::_attributes/common-attributes.adoc[] +:context: install-otel + +toc::[] + +Installing the {OTELShortName} involves the following steps: + +. Installing the {OTELOperator}. +. Creating a namespace for an OpenTelemetry Collector instance. +. Creating an `OpenTelemetryCollector` custom resource to deploy the OpenTelemetry Collector instance. + +include::modules/otel-install-web-console.adoc[leveloffset=+1] + +include::modules/otel-install-cli.adoc[leveloffset=+1] + +[role="_additional-resources"] +[id="additional-resources_otel-installing"] +== Additional resources +* xref:../post_installation_configuration/preparing-for-users.adoc#creating-cluster-admin_post-install-preparing-for-users[Creating a cluster admin] +* link:https://operatorhub.io/[OperatorHub.io] +* xref:../web_console/web-console.adoc#web-console[Accessing the web console] +* xref:../operators/admin/olm-adding-operators-to-cluster.adoc#olm-installing-from-operatorhub-using-web-console_olm-adding-operators-to-a-cluster[Installing from OperatorHub using the web console] +* xref:../operators/user/olm-creating-apps-from-installed-operators.adoc#olm-creating-apps-from-installed-operators[Creating applications from installed Operators] +* xref:../cli_reference/openshift_cli/getting-started-cli.adoc#getting-started-cli[Getting started with the OpenShift CLI] diff --git a/otel/otel-instrumentation.adoc b/otel/otel-instrumentation.adoc new file mode 100644 index 0000000000..13eadd2bf8 --- /dev/null +++ b/otel/otel-instrumentation.adoc @@ -0,0 +1,14 @@ +:_mod-docs-content-type: ASSEMBLY +[id="otel-instrumentation"] += Configuring and deploying the OpenTelemetry instrumentation injection +include::_attributes/common-attributes.adoc[] +:context: otel-instrumentation + +toc::[] + +:FeatureName: OpenTelemetry instrumentation injection +include::snippets/technology-preview.adoc[leveloffset=+1] + +The {OTELName} Operator uses a custom resource definition (CRD) file that defines the configuration of the instrumentation. + +include::modules/otel-config-instrumentation.adoc[leveloffset=+1] diff --git a/distr_tracing/distr_tracing_otel/distr-tracing-otel-migrating.adoc b/otel/otel-migrating.adoc similarity index 65% rename from distr_tracing/distr_tracing_otel/distr-tracing-otel-migrating.adoc rename to otel/otel-migrating.adoc index 0a638b5985..75bed9b697 100644 --- a/distr_tracing/distr_tracing_otel/distr-tracing-otel-migrating.adoc +++ b/otel/otel-migrating.adoc @@ -6,12 +6,12 @@ include::_attributes/common-attributes.adoc[] toc::[] -If you are already using {JaegerName} for your applications, you can migrate to the {OTELName}, which is based on the link:https://opentelemetry.io/[OpenTelemetry] open-source project. +If you are already using the {JaegerName} for your applications, you can migrate to the {OTELName}, which is based on the link:https://opentelemetry.io/[OpenTelemetry] open-source project. The {OTELShortName} provides a set of APIs, libraries, agents, and instrumentation to facilitate observability in distributed systems. The OpenTelemetry Collector in the {OTELShortName} can ingest the Jaeger protocol, so you do not need to change the SDKs in your applications. Migration from the {JaegerShortName} to the {OTELShortName} requires configuring the OpenTelemetry Collector and your applications to report traces seamlessly. You can migrate sidecar and sidecarless deployments. -include::modules/distr-tracing-otel-migrating-from-jaeger-with-sidecars.adoc[leveloffset=+1] +include::modules/otel-migrating-from-jaeger-with-sidecars.adoc[leveloffset=+1] -include::modules/distr-tracing-otel-migrating-from-jaeger-without-sidecars.adoc[leveloffset=+1] +include::modules/otel-migrating-from-jaeger-without-sidecars.adoc[leveloffset=+1] diff --git a/otel/otel-release-notes.adoc b/otel/otel-release-notes.adoc new file mode 100644 index 0000000000..b0c207c410 --- /dev/null +++ b/otel/otel-release-notes.adoc @@ -0,0 +1,136 @@ +:_mod-docs-content-type: ASSEMBLY +include::_attributes/common-attributes.adoc[] +[id="otel-release-notes"] += Release notes for {OTELName} +:context: otel-release-notes + +toc::[] + +[id="otel-product-overview"] +== {OTELName} overview + +{OTELName} is based on the open source link:https://opentelemetry.io/[OpenTelemetry project], which aims to provide unified, standardized, and vendor-neutral telemetry data collection for cloud-native software. {OTELName} product provides support for deploying and managing the OpenTelemetry Collector and simplifying the workload instrumentation. + +The link:https://opentelemetry.io/docs/collector/[OpenTelemetry Collector] can receive, process, and forward telemetry data in multiple formats, making it the ideal component for telemetry processing and interoperability between telemetry systems. The Collector provides a unified solution for collecting and processing metrics, traces, and logs. + +The OpenTelemetry Collector has a number of features including the following: + +Data Collection and Processing Hub:: It acts as a central component that gathers telemetry data like metrics and traces from various sources. This data can be created from instrumented applications and infrastructure. + +Customizable telemetry data pipeline:: The OpenTelemetry Collector is designed to be customizable. It supports various processors, exporters, and receivers. + +Auto-instrumentation features:: Automatic instrumentation simplifies the process of adding observability to applications. Developers don't need to manually instrument their code for basic telemetry data. + +Here are some of the use cases for the OpenTelemetry Collector: + +Centralized data collection:: In a microservices architecture, the Collector can be deployed to aggregate data from multiple services. + +Data enrichment and processing:: Before forwarding data to analysis tools, the Collector can enrich, filter, and process this data. + +Multi-backend receiving and exporting:: The Collector can receive and send data to multiple monitoring and analysis platforms simultaneously. + +[id="otel-3-0-rn"] +== {OTELName} {DTProductVersion} + +{OTELName} {DTProductVersion} is based on link:https://opentelemetry.io/[OpenTelemetry] {OTELVersion}. + +[id="new-features-and-enhancements_otel-3-0-rn"] +=== New features and enhancements + +This update introduces the following enhancements: + +* The *OpenShift distributed tracing data collection Operator* is renamed as the *{OTELOperator}*. +* Support for the ARM architecture. +* Support for the Prometheus receiver for metrics collection. +* Support for the Kafka receiver and exporter for sending traces and metrics to Kafka. +* Support for cluster-wide proxy environments. +* The {OTELOperator} creates the Prometheus `ServiceMonitor` custom resource if the Prometheus exporter is enabled. +* The Operator enables the `Instrumentation` custom resource that allows injecting upstream OpenTelemetry auto-instrumentation libraries. + +[id="removal-notice_otel-3-0-rn"] +=== Removal notice + +* In {OTELName} {DTProductVersion}, the Jaeger exporter has been removed. Bug fixes and support are provided only through the end of the 2.9 lifecycle. As an alternative to the Jaeger exporter for sending data to the Jaeger collector, you can use the OTLP exporter instead. + +[id="bug-fixes_otel-3-0-rn"] +=== Bug fixes + +This update introduces the following bug fixes: + +* Fixed support for disconnected environments when using the `oc adm catalog mirror` CLI command. + +[id="known-issues_otel-3-0-rn"] +=== Known issues + +Curently, the cluster monitoring of the {OTELOperator} is disabled due to a bug (link:https://issues.redhat.com/browse/TRACING-3761[TRACING-3761]). The bug is preventing the cluster monitoring from scraping metrics from the {OTELOperator} due to a missing label `openshift.io/cluster-monitoring=true` + that is required for the cluster monitoring and service monitor object. + +.Workaround + +You can enable the cluster monitoring as follows: + +. Add the following label in the Operator namespace: `oc label namespace openshift-opentelemetry-operator openshift.io/cluster-monitoring=true` + +. Create a service monitor, role, and role binding: ++ +[source,yaml] +---- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: opentelemetry-operator-controller-manager-metrics-service + namespace: openshift-opentelemetry-operator +spec: + endpoints: + - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + path: /metrics + port: https + scheme: https + tlsConfig: + insecureSkipVerify: true + selector: + matchLabels: + app.kubernetes.io/name: opentelemetry-operator + control-plane: controller-manager +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: otel-operator-prometheus + namespace: openshift-opentelemetry-operator + annotations: + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" +rules: +- apiGroups: + - "" + resources: + - services + - endpoints + - pods + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: otel-operator-prometheus + namespace: openshift-opentelemetry-operator + annotations: + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: otel-operator-prometheus +subjects: +- kind: ServiceAccount + name: prometheus-k8s + namespace: openshift-monitoring +---- + +include::modules/support.adoc[leveloffset=+1] + +include::modules/making-open-source-more-inclusive.adoc[leveloffset=+1] diff --git a/distr_tracing/distr_tracing_otel/distr-tracing-otel-removing.adoc b/otel/otel-removing.adoc similarity index 55% rename from distr_tracing/distr_tracing_otel/distr-tracing-otel-removing.adoc rename to otel/otel-removing.adoc index b8a9cfa848..18b650898b 100644 --- a/distr_tracing/distr_tracing_otel/distr-tracing-otel-removing.adoc +++ b/otel/otel-removing.adoc @@ -12,13 +12,13 @@ The steps for removing the {OTELShortName} from an {product-title} cluster are a . Remove any OpenTelemetryCollector instances. . Remove the {OTELOperator}. -include::modules/distr-tracing-otel-remove-web-console.adoc[leveloffset=+1] +include::modules/otel-remove-web-console.adoc[leveloffset=+1] -include::modules/distr-tracing-otel-remove-cli.adoc[leveloffset=+1] +include::modules/otel-remove-cli.adoc[leveloffset=+1] [role="_additional-resources"] [id="additional-resources_dist-tracing-otel-removing"] == Additional resources -* xref:../../operators/admin/olm-deleting-operators-from-cluster.adoc#olm-deleting-operators-from-a-cluster[Deleting Operators from a cluster] -* xref:../../cli_reference/openshift_cli/getting-started-cli.adoc#getting-started-cli[Getting started with the OpenShift CLI] \ No newline at end of file +* xref:../operators/admin/olm-deleting-operators-from-cluster.adoc#olm-deleting-operators-from-a-cluster[Deleting Operators from a cluster] +* xref:../cli_reference/openshift_cli/getting-started-cli.adoc#getting-started-cli[Getting started with the OpenShift CLI] diff --git a/otel/otel-troubleshooting.adoc b/otel/otel-troubleshooting.adoc new file mode 100644 index 0000000000..cbdb41fb14 --- /dev/null +++ b/otel/otel-troubleshooting.adoc @@ -0,0 +1,13 @@ +:_mod-docs-content-type: ASSEMBLY +[id="otel-troubleshoot"] += Troubleshooting the {OTELShortName} +include::_attributes/common-attributes.adoc[] +:context: otel-troubleshoot + +toc::[] + +The OpenTelemetry Collector offers multiple ways to measure its health as well as investigate data ingestion issues. + +include::modules/otel-troubleshoot-collector-logs.adoc[leveloffset=+1] +include::modules/otel-troubleshoot-metrics.adoc[leveloffset=+1] +include::modules/otel-troubleshoot-logging-exporter-stdout.adoc[leveloffset=+1] diff --git a/otel/otel-updating.adoc b/otel/otel-updating.adoc new file mode 100644 index 0000000000..98b55d8316 --- /dev/null +++ b/otel/otel-updating.adoc @@ -0,0 +1,20 @@ +:_mod-docs-content-type: ASSEMBLY +[id="dist-tracing-otel-updating"] += Updating the {OTELShortName} +include::_attributes/common-attributes.adoc[] +:context: dist-tracing-otel-updating + +toc::[] + +For version upgrades, the {OTELOperator} uses the Operator Lifecycle Manager (OLM), which controls installation, upgrade, and role-based access control (RBAC) of Operators in a cluster. + +The OLM runs in the {product-title} by default. The OLM queries for available Operators as well as upgrades for installed Operators. + +When the {OTELOperator} is upgraded to the new version, it scans for running OpenTelemetry Collector instances that it manages and upgrades them to the version corresponding to the Operator's new version. + +[role="_additional-resources"] +[id="additional-resources_dist-tracing-otel-updating"] +== Additional resources + +* xref:../operators/understanding/olm/olm-understanding-olm.adoc#olm-understanding-olm[Operator Lifecycle Manager concepts and resources] +* xref:../operators/admin/olm-upgrading-operators.adoc#olm-upgrading-operators[Updating installed Operators] diff --git a/otel/otel-using.adoc b/otel/otel-using.adoc new file mode 100644 index 0000000000..61287ca72d --- /dev/null +++ b/otel/otel-using.adoc @@ -0,0 +1,20 @@ +:_mod-docs-content-type: ASSEMBLY +[id="otel-temp"] += Using the {OTELShortName} +include::_attributes/common-attributes.adoc[] +:context: otel-temp + +toc::[] + +You can set up and use the {OTELShortName} to send traces to the OpenTelemetry Collector or the TempoStack. + +include::modules/otel-forwarding.adoc[leveloffset=+1] + +[id="otel-send-traces-and-metrics-to-otel-collector_{context}"] +== Sending traces and metrics to the OpenTelemetry Collector + +Sending tracing and metrics to the OpenTelemetry Collector is possible with or without sidecar injection. + +include::modules/otel-send-traces-and-metrics-to-otel-collector-with-sidecar.adoc[leveloffset=+2] + +include::modules/otel-send-traces-and-metrics-to-otel-collector-without-sidecar.adoc[leveloffset=+2] diff --git a/otel/snippets b/otel/snippets new file mode 120000 index 0000000000..9d58b92e50 --- /dev/null +++ b/otel/snippets @@ -0,0 +1 @@ +../snippets/ \ No newline at end of file