mirror of
https://github.com/openshift/openshift-docs.git
synced 2026-02-05 21:46:22 +01:00
Distributed tracing 3.0 release docs
This commit is contained in:
committed by
openshift-cherrypick-robot
parent
d7292263a4
commit
e82e3cdc33
@@ -116,18 +116,18 @@ endif::[]
|
||||
//distributed tracing
|
||||
:DTProductName: Red Hat OpenShift distributed tracing platform
|
||||
:DTShortName: distributed tracing platform
|
||||
:DTProductVersion: 2.9
|
||||
:DTProductVersion: 3.0
|
||||
:JaegerName: Red Hat OpenShift distributed tracing platform (Jaeger)
|
||||
:JaegerShortName: distributed tracing platform (Jaeger)
|
||||
:JaegerVersion: 1.47.0
|
||||
:OTELName: Red Hat OpenShift distributed tracing data collection
|
||||
:OTELShortName: distributed tracing data collection
|
||||
:OTELOperator: Red Hat OpenShift distributed tracing data collection Operator
|
||||
:OTELVersion: 0.81.0
|
||||
:JaegerVersion: 1.51.0
|
||||
:OTELName: Red Hat build of OpenTelemetry
|
||||
:OTELShortName: Red Hat build of OpenTelemetry
|
||||
:OTELOperator: Red Hat build of OpenTelemetry Operator
|
||||
:OTELVersion: 0.89.0
|
||||
:TempoName: Red Hat OpenShift distributed tracing platform (Tempo)
|
||||
:TempoShortName: distributed tracing platform (Tempo)
|
||||
:TempoOperator: Tempo Operator
|
||||
:TempoVersion: 2.1.1
|
||||
:TempoVersion: 2.3.0
|
||||
//logging
|
||||
:logging-title: logging subsystem for Red Hat OpenShift
|
||||
:logging-title-uc: Logging subsystem for Red Hat OpenShift
|
||||
|
||||
@@ -2649,6 +2649,90 @@ Topics:
|
||||
- Name: Configuring the Cluster Observability Operator to monitor a service
|
||||
File: configuring-the-cluster-observability-operator-to-monitor-a-service
|
||||
---
|
||||
Name: Distributed tracing
|
||||
Dir: distr_tracing
|
||||
Distros: openshift-enterprise
|
||||
Topics:
|
||||
- Name: Distributed tracing release notes
|
||||
Dir: distr_tracing_rn
|
||||
Topics:
|
||||
- Name: "3.0"
|
||||
File: distr-tracing-rn-3-0
|
||||
- Name: "2.9.2"
|
||||
File: distr-tracing-rn-2-9-2
|
||||
- Name: "2.9.1"
|
||||
File: distr-tracing-rn-2-9-1
|
||||
- Name: "2.9"
|
||||
File: distr-tracing-rn-2-9
|
||||
- Name: "2.8"
|
||||
File: distr-tracing-rn-2-8
|
||||
- Name: "2.7"
|
||||
File: distr-tracing-rn-2-7
|
||||
- Name: "2.6"
|
||||
File: distr-tracing-rn-2-6
|
||||
- Name: "2.5"
|
||||
File: distr-tracing-rn-2-5
|
||||
- Name: "2.4"
|
||||
File: distr-tracing-rn-2-4
|
||||
- Name: "2.3"
|
||||
File: distr-tracing-rn-2-3
|
||||
- Name: "2.2"
|
||||
File: distr-tracing-rn-2-2
|
||||
- Name: "2.1"
|
||||
File: distr-tracing-rn-2-1
|
||||
- Name: "2.0"
|
||||
File: distr-tracing-rn-2-0
|
||||
- Name: Distributed tracing architecture
|
||||
Dir: distr_tracing_arch
|
||||
Topics:
|
||||
- Name: Distributed tracing architecture
|
||||
File: distr-tracing-architecture
|
||||
- Name: Distributed tracing platform (Jaeger)
|
||||
Dir: distr_tracing_jaeger
|
||||
Topics:
|
||||
- Name: Installation
|
||||
File: distr-tracing-jaeger-installing
|
||||
- Name: Configuration
|
||||
File: distr-tracing-jaeger-configuring
|
||||
- Name: Updating
|
||||
File: distr-tracing-jaeger-updating
|
||||
- Name: Removal
|
||||
File: distr-tracing-jaeger-removing
|
||||
- Name: Distributed tracing platform (Tempo)
|
||||
Dir: distr_tracing_tempo
|
||||
Topics:
|
||||
- Name: Installation
|
||||
File: distr-tracing-tempo-installing
|
||||
- Name: Configuration
|
||||
File: distr-tracing-tempo-configuring
|
||||
- Name: Updating
|
||||
File: distr-tracing-tempo-updating
|
||||
- Name: Removal
|
||||
File: distr-tracing-tempo-removing
|
||||
---
|
||||
Name: Red Hat build of OpenTelemetry
|
||||
Dir: otel
|
||||
Distros: openshift-enterprise
|
||||
Topics:
|
||||
- Name: Release notes
|
||||
File: otel-release-notes
|
||||
- Name: Installation
|
||||
File: otel-installing
|
||||
- Name: Collector configuration
|
||||
File: otel-configuring
|
||||
- Name: Instrumentation
|
||||
File: otel-instrumentation
|
||||
- Name: Use
|
||||
File: otel-using
|
||||
- Name: Troubleshooting
|
||||
File: otel-troubleshooting
|
||||
- Name: Migration
|
||||
File: otel-migrating
|
||||
- Name: Updating
|
||||
File: otel-updating
|
||||
- Name: Removal
|
||||
File: otel-removing
|
||||
---
|
||||
Name: Network Observability
|
||||
Dir: network_observability
|
||||
Distros: openshift-enterprise,openshift-origin
|
||||
@@ -3633,80 +3717,6 @@ Topics:
|
||||
- Name: Removing Service Mesh
|
||||
File: removing-ossm
|
||||
---
|
||||
Name: Distributed tracing
|
||||
Dir: distr_tracing
|
||||
Distros: openshift-enterprise
|
||||
Topics:
|
||||
- Name: Distributed tracing release notes
|
||||
Dir: distr_tracing_rn
|
||||
Topics:
|
||||
- Name: "2.9.2"
|
||||
File: distr-tracing-rn-2-9-2
|
||||
- Name: "2.9.1"
|
||||
File: distr-tracing-rn-2-9-1
|
||||
- Name: "2.9"
|
||||
File: distr-tracing-rn-2-9
|
||||
- Name: "2.8"
|
||||
File: distr-tracing-rn-2-8
|
||||
- Name: "2.7"
|
||||
File: distr-tracing-rn-2-7
|
||||
- Name: "2.6"
|
||||
File: distr-tracing-rn-2-6
|
||||
- Name: "2.5"
|
||||
File: distr-tracing-rn-2-5
|
||||
- Name: "2.4"
|
||||
File: distr-tracing-rn-2-4
|
||||
- Name: "2.3"
|
||||
File: distr-tracing-rn-2-3
|
||||
- Name: "2.2"
|
||||
File: distr-tracing-rn-2-2
|
||||
- Name: "2.1"
|
||||
File: distr-tracing-rn-2-1
|
||||
- Name: "2.0"
|
||||
File: distr-tracing-rn-2-0
|
||||
- Name: Distributed tracing architecture
|
||||
Dir: distr_tracing_arch
|
||||
Topics:
|
||||
- Name: Distributed tracing architecture
|
||||
File: distr-tracing-architecture
|
||||
- Name: Distributed tracing platform (Jaeger)
|
||||
Dir: distr_tracing_jaeger
|
||||
Topics:
|
||||
- Name: Installation
|
||||
File: distr-tracing-jaeger-installing
|
||||
- Name: Configuration
|
||||
File: distr-tracing-jaeger-configuring
|
||||
- Name: Updating
|
||||
File: distr-tracing-jaeger-updating
|
||||
- Name: Removal
|
||||
File: distr-tracing-jaeger-removing
|
||||
- Name: Distributed tracing platform (Tempo)
|
||||
Dir: distr_tracing_tempo
|
||||
Topics:
|
||||
- Name: Installation
|
||||
File: distr-tracing-tempo-installing
|
||||
- Name: Configuration
|
||||
File: distr-tracing-tempo-configuring
|
||||
- Name: Updating
|
||||
File: distr-tracing-tempo-updating
|
||||
- Name: Removal
|
||||
File: distr-tracing-tempo-removing
|
||||
- Name: Distributed tracing data collection (OpenTelemetry)
|
||||
Dir: distr_tracing_otel
|
||||
Topics:
|
||||
- Name: Installation
|
||||
File: distr-tracing-otel-installing
|
||||
- Name: Configuration
|
||||
File: distr-tracing-otel-configuring
|
||||
- Name: Use
|
||||
File: distr-tracing-otel-using
|
||||
- Name: Troubleshooting
|
||||
File: distr-tracing-otel-troubleshooting
|
||||
- Name: Migration
|
||||
File: distr-tracing-otel-migrating
|
||||
- Name: Removal
|
||||
File: distr-tracing-otel-removing
|
||||
---
|
||||
Name: Virtualization
|
||||
Dir: virt
|
||||
Distros: openshift-enterprise,openshift-origin
|
||||
|
||||
@@ -1,128 +0,0 @@
|
||||
////
|
||||
This module included in the following assemblies:
|
||||
- distr_tracing_install/distr-tracing-deploying.adoc
|
||||
////
|
||||
|
||||
:_mod-docs-content-type: PROCEDURE
|
||||
[id="distr-tracing-deploy-otel-collector_{context}"]
|
||||
= Deploying distributed tracing data collection
|
||||
|
||||
The custom resource definition (CRD) defines the configuration used when you deploy an instance of {OTELName}.
|
||||
|
||||
.Prerequisites
|
||||
|
||||
* The {OTELName} Operator has been installed.
|
||||
//* You have reviewed the instructions for how to customize the deployment.
|
||||
* You have access to the cluster as a user with the `cluster-admin` role.
|
||||
|
||||
.Procedure
|
||||
|
||||
. Log in to the OpenShift web console as a user with the `cluster-admin` role.
|
||||
|
||||
. Create a new project, for example `tracing-system`.
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
If you are installing distributed tracing as part of Service Mesh, the {DTShortName} resources must be installed in the same namespace as the `ServiceMeshControlPlane` resource, for example `istio-system`.
|
||||
====
|
||||
+
|
||||
.. Navigate to *Home* -> *Projects*.
|
||||
|
||||
.. Click *Create Project*.
|
||||
|
||||
.. Enter `tracing-system` in the *Name* field.
|
||||
|
||||
.. Click *Create*.
|
||||
|
||||
. Navigate to *Operators* -> *Installed Operators*.
|
||||
|
||||
. If necessary, select `tracing-system` from the *Project* menu. You might have to wait a few moments for the Operators to be copied to the new project.
|
||||
|
||||
. Click the *{OTELName} Operator*. On the *Details* tab, under *Provided APIs*, the Operator provides a single link.
|
||||
|
||||
. Under *OpenTelemetryCollector*, click *Create Instance*.
|
||||
|
||||
. On the *Create OpenTelemetry Collector* page, to install using the defaults, click *Create* to create the {OTELShortName} instance.
|
||||
|
||||
. On the *OpenTelemetryCollectors* page, click the name of the {OTELShortName} instance, for example, `opentelemetrycollector-sample`.
|
||||
|
||||
. On the *Details* page, click the *Resources* tab. Wait until the pod has a status of "Running" before continuing.
|
||||
|
||||
[id="distr-tracing-deploy-otel-collector-cli_{context}"]
|
||||
= Deploying {OTELShortName} from the CLI
|
||||
|
||||
Follow this procedure to create an instance of {OTELShortName} from the command line.
|
||||
|
||||
.Prerequisites
|
||||
|
||||
* The {OTELName} Operator has been installed and verified.
|
||||
+
|
||||
//* You have reviewed the instructions for how to customize the deployment.
|
||||
+
|
||||
* You have access to the OpenShift CLI (`oc`) that matches your {product-title} version.
|
||||
* You have access to the cluster as a user with the `cluster-admin` role.
|
||||
|
||||
.Procedure
|
||||
|
||||
. Log in to the {product-title} CLI as a user with the `cluster-admin` role.
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc login https://<HOSTNAME>:8443
|
||||
----
|
||||
|
||||
. Create a new project named `tracing-system`.
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc new-project tracing-system
|
||||
----
|
||||
|
||||
. Create a custom resource file named `jopentelemetrycollector-sample.yaml` that contains the following text:
|
||||
+
|
||||
.Example opentelemetrycollector.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: opentelemetry.io/v1alpha1
|
||||
kind: OpenTelemetryCollector
|
||||
metadata:
|
||||
name: opentelemetrycollector-sample
|
||||
namespace: openshift-operators
|
||||
spec:
|
||||
image: >-
|
||||
registry.redhat.io/rhosdt/opentelemetry-collector-rhel8@sha256:61934ea5793c55900d09893e8f8b1f2dbd2e712faba8e97684e744691b29f25e
|
||||
config: |
|
||||
receivers:
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
exporters:
|
||||
logging:
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [jaeger]
|
||||
exporters: [logging]
|
||||
----
|
||||
|
||||
. Run the following command to deploy {JaegerShortName}:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc create -n tracing-system -f opentelemetrycollector.yaml
|
||||
----
|
||||
|
||||
. Run the following command to watch the progress of the pods during the installation process:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc get pods -n tracing-system -w
|
||||
----
|
||||
+
|
||||
After the installation process has completed, you should see output similar to the following example:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
opentelemetrycollector-cdff7897b-qhfdx 2/2 Running 0 24s
|
||||
----
|
||||
@@ -6,6 +6,11 @@ include::_attributes/common-attributes.adoc[]
|
||||
|
||||
toc::[]
|
||||
|
||||
[WARNING]
|
||||
====
|
||||
Jaeger is deprecated in Red Hat OpenShift distributed tracing 3.0. See the xref:../distr_tracing_rn/distr-tracing-rn-3-0.adoc[release notes] for more information.
|
||||
====
|
||||
|
||||
You can install {DTProductName} on {product-title} in either of two ways:
|
||||
|
||||
* You can install {DTProductName} as part of {SMProductName}. Distributed tracing is included by default in the Service Mesh installation. To install {DTProductName} as part of a service mesh, follow the xref:../../service_mesh/v2x/preparing-ossm-installation.adoc#preparing-ossm-installation[Red Hat Service Mesh Installation] instructions. You must install {DTProductName} in the same namespace as your service mesh, that is, the `ServiceMeshControlPlane` and the {DTProductName} resources must be in the same namespace.
|
||||
|
||||
@@ -6,6 +6,11 @@ include::_attributes/common-attributes.adoc[]
|
||||
|
||||
toc::[]
|
||||
|
||||
[WARNING]
|
||||
====
|
||||
Jaeger is deprecated in Red Hat OpenShift distributed tracing 3.0. See the xref:../distr_tracing_rn/distr-tracing-rn-3-0.adoc[release notes] for more information.
|
||||
====
|
||||
|
||||
Operator Lifecycle Manager (OLM) controls the installation, upgrade, and role-based access control (RBAC) of Operators in a cluster. The OLM runs by default in {product-title}.
|
||||
OLM queries for available Operators as well as upgrades for installed Operators.
|
||||
|
||||
@@ -22,4 +27,4 @@ If you have not already updated your OpenShift Elasticsearch Operator as describ
|
||||
|
||||
* xref:../../operators/understanding/olm/olm-understanding-olm.adoc#olm-understanding-olm[Operator Lifecycle Manager concepts and resources]
|
||||
* xref:../../operators/admin/olm-upgrading-operators.adoc#olm-upgrading-operators[Updating installed Operators]
|
||||
* xref:../../logging/cluster-logging-upgrading.adoc#cluster-logging-upgrading_cluster-logging-upgrading[Updating OpenShift Logging]
|
||||
* xref:../../logging/cluster-logging-upgrading.adoc#cluster-logging-upgrading_cluster-logging-upgrading[Updating OpenShift Logging]
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
../../_attributes/
|
||||
@@ -1,18 +0,0 @@
|
||||
:_mod-docs-content-type: ASSEMBLY
|
||||
[id="distr-tracing-otel-configuring"]
|
||||
= Configuring and deploying the {OTELShortName}
|
||||
include::_attributes/common-attributes.adoc[]
|
||||
:context: distr-tracing-otel-configuring
|
||||
|
||||
toc::[]
|
||||
|
||||
The {OTELName} Operator uses a custom resource definition (CRD) file that defines the architecture and configuration settings to be used when creating and deploying the {OTELShortName} resources. You can install the default configuration or modify the file.
|
||||
|
||||
include::modules/distr-tracing-otel-config-collector.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/distr-tracing-otel-config-send-metrics-monitoring-stack.adoc[leveloffset=+1]
|
||||
|
||||
[role="_additional-resources"]
|
||||
[id="additional-resources_deploy-otel"]
|
||||
== Additional resources
|
||||
* xref:../../monitoring/enabling-monitoring-for-user-defined-projects.adoc#enabling-monitoring-for-user-defined-projects[Enabling monitoring for user-defined projects]
|
||||
@@ -1,28 +0,0 @@
|
||||
:_mod-docs-content-type: ASSEMBLY
|
||||
[id="install-distributed-tracing-otel"]
|
||||
= Installing the {OTELShortName}
|
||||
include::_attributes/common-attributes.adoc[]
|
||||
:context: install-distributed-tracing-otel
|
||||
|
||||
toc::[]
|
||||
|
||||
:FeatureName: The {OTELOperator}
|
||||
include::snippets/technology-preview.adoc[leveloffset=+1]
|
||||
|
||||
Installing the {OTELShortName} involves the following steps:
|
||||
|
||||
. Installing the {OTELOperator}.
|
||||
. Creating a namespace for an OpenTelemetry Collector instance.
|
||||
. Creating an `OpenTelemetryCollector` custom resource to deploy the OpenTelemetry Collector instance.
|
||||
|
||||
include::modules/distr-tracing-otel-install-web-console.adoc[leveloffset=+1]
|
||||
|
||||
[role="_additional-resources"]
|
||||
[id="additional-resources_dist-tracing-otel-installing"]
|
||||
== Additional resources
|
||||
* xref:../../post_installation_configuration/preparing-for-users.adoc#creating-cluster-admin_post-install-preparing-for-users[Creating a cluster admin]
|
||||
* link:https://operatorhub.io/[OperatorHub.io]
|
||||
* xref:../../web_console/web-console.adoc#web-console[Accessing the web console]
|
||||
* xref:../../operators/admin/olm-adding-operators-to-cluster.adoc#olm-installing-from-operatorhub-using-web-console_olm-adding-operators-to-a-cluster[Installing from OperatorHub using the web console]
|
||||
* xref:../../operators/user/olm-creating-apps-from-installed-operators.adoc#olm-creating-apps-from-installed-operators[Creating applications from installed Operators]
|
||||
* xref:../../cli_reference/openshift_cli/getting-started-cli.adoc#getting-started-cli[Getting started with the OpenShift CLI]
|
||||
@@ -1,13 +0,0 @@
|
||||
:_mod-docs-content-type: ASSEMBLY
|
||||
[id="distr-tracing-otel-troubleshoot"]
|
||||
= Troubleshooting the {OTELShortName}
|
||||
include::_attributes/common-attributes.adoc[]
|
||||
:context: distr-tracing-otel-troubleshoot
|
||||
|
||||
toc::[]
|
||||
|
||||
The OpenTelemetry Collector offers multiple ways to measure its health as well as investigate data ingestion issues.
|
||||
|
||||
include::modules/distr-tracing-otel-troubleshoot-logs.adoc[leveloffset=+1]
|
||||
include::modules/distr-tracing-otel-troubleshoot-metrics.adoc[leveloffset=+1]
|
||||
include::modules/distr-tracing-otel-troubleshoot-logging-exporter.adoc[leveloffset=+1]
|
||||
@@ -1,18 +0,0 @@
|
||||
:_mod-docs-content-type: ASSEMBLY
|
||||
[id="distr-tracing-otel-temp"]
|
||||
= Using the {OTELShortName}
|
||||
include::_attributes/common-attributes.adoc[]
|
||||
:context: distr-tracing-otel-temp
|
||||
|
||||
toc::[]
|
||||
|
||||
include::modules/distr-tracing-otel-forwarding.adoc[leveloffset=+1]
|
||||
|
||||
[id="distr-tracing-otel-send-traces-and-metrics-to-otel-collector_{context}"]
|
||||
== Sending traces and metrics to the OpenTelemetry Collector
|
||||
|
||||
Sending tracing and metrics to the OpenTelemetry Collector is possible with or without sidecar injection.
|
||||
|
||||
include::modules/distr-tracing-otel-send-traces-and-metrics-to-otel-collector-with-sidecar.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/distr-tracing-otel-send-traces-and-metrics-to-otel-collector-without-sidecar.adoc[leveloffset=+2]
|
||||
@@ -1 +0,0 @@
|
||||
../../images/
|
||||
@@ -1 +0,0 @@
|
||||
../../modules/
|
||||
@@ -1 +0,0 @@
|
||||
../../snippets/
|
||||
95
distr_tracing/distr_tracing_rn/distr-tracing-rn-3-0.adoc
Normal file
95
distr_tracing/distr_tracing_rn/distr-tracing-rn-3-0.adoc
Normal file
@@ -0,0 +1,95 @@
|
||||
:_mod-docs-content-type: ASSEMBLY
|
||||
include::_attributes/common-attributes.adoc[]
|
||||
[id="distributed-tracing-rn-3-0"]
|
||||
= Release notes for {DTProductName} 3.0
|
||||
:context: distributed-tracing-rn-3-0
|
||||
|
||||
toc::[]
|
||||
|
||||
include::modules/distr-tracing-product-overview.adoc[leveloffset=+1]
|
||||
|
||||
[id="component-versions_distributed-tracing-rn-3-0"]
|
||||
== Component versions in the {DTProductName} 3.0
|
||||
|
||||
[options="header"]
|
||||
|===
|
||||
|Operator |Component |Version
|
||||
|{JaegerName}
|
||||
|Jaeger
|
||||
|1.51.0
|
||||
|
||||
|xref:../../otel/otel-release-notes.adoc[{OTELName}]
|
||||
|OpenTelemetry
|
||||
|0.89.0
|
||||
|
||||
|{TempoName}
|
||||
|Tempo
|
||||
|2.3.0
|
||||
|===
|
||||
|
||||
// Jaeger section
|
||||
[id="jaeger-release-notes_distributed-tracing-rn-3-0"]
|
||||
== {JaegerName}
|
||||
|
||||
[id="deprecated-functionality_jaeger-release-notes_distributed-tracing-rn-3-0"]
|
||||
=== Deprecated functionality
|
||||
|
||||
In Red Hat OpenShift distributed tracing 3.0, Jaeger and Elasticsearch are deprecated, and both are planned to be removed in a future release. Red Hat will provide critical and above CVE bug fixes and support for these components during the current release lifecycle, but these components will no longer receive feature enhancements.
|
||||
|
||||
In Red Hat OpenShift distributed tracing 3.0, Tempo provided by the {TempoOperator} and the OpenTelemetry collector provided by the Red Hat build of OpenTelemetry are the preferred Operators for distributed tracing collection and storage. The OpenTelemetry and Tempo distributed tracing stack is to be adopted by all users because this will be the stack that will be enhanced going forward.
|
||||
|
||||
[id="new-features-and-enhancements_jaeger-release-notes_distributed-tracing-rn-3-0"]
|
||||
=== New features and enhancements
|
||||
|
||||
This update introduces the following enhancements for the {JaegerShortName}:
|
||||
|
||||
* Support for the ARM architecture.
|
||||
* Support for cluster-wide proxy environments.
|
||||
|
||||
[id="bug-fixes_jaeger-release-notes_distributed-tracing-rn-3-0"]
|
||||
=== Bug fixes
|
||||
|
||||
This update introduces the following bug fixes for the {JaegerShortName}:
|
||||
|
||||
* Fixed support for disconnected environments when using the `oc adm catalog mirror` CLI command. (link:https://issues.redhat.com/browse/TRACING-3546[TRACING-3546])
|
||||
|
||||
[id="known-issues_jaeger-release-notes_distributed-tracing-rn-3-0"]
|
||||
=== Known issues
|
||||
|
||||
* Currently, Apache Spark is not supported.
|
||||
|
||||
ifndef::openshift-rosa[]
|
||||
|
||||
* Currently, the streaming deployment via AMQ/Kafka is not supported on the IBM Z and IBM Power Systems architectures.
|
||||
endif::openshift-rosa[]
|
||||
|
||||
// Tempo section
|
||||
[id="tempo-release-notes_distributed-tracing-rn-3-0"]
|
||||
== {TempoName}
|
||||
|
||||
[id="new-features-and-enhancements_tempo-release-notes_distributed-tracing-rn-3-0"]
|
||||
=== New features and enhancements
|
||||
|
||||
This update introduces the following enhancements for the {TempoShortName}:
|
||||
|
||||
* Support for the ARM architecture.
|
||||
* Support for span request count, duration, and error count (RED) metrics. The metrics can be visualized in the Jaeger console deployed as part of Tempo or in the web console in the *Observe* menu.
|
||||
|
||||
[id="bug-fixes_tempo-release-notes_distributed-tracing-rn-3-0"]
|
||||
=== Bug fixes
|
||||
|
||||
This update introduces the following bug fixes for the {TempoShortName}:
|
||||
|
||||
* Fixed support for the custom TLS CA option for connecting to object storage. (link:https://issues.redhat.com/browse/TRACING-3462[TRACING-3462])
|
||||
* Fixed support for disconnected environments when using the `oc adm catalog mirror` CLI command. (link:https://issues.redhat.com/browse/TRACING-3523[TRACING-3523])
|
||||
* Fixed mTLS when Gateway is not deployed. (link:https://issues.redhat.com/browse/TRACING-3510[TRACING-3510])
|
||||
|
||||
[id="known-issues_tempo-release-notes_distributed-tracing-rn-3-0"]
|
||||
=== Known issues
|
||||
|
||||
* Currently, when used with the {TempoOperator}, the Jaeger UI only displays services that have sent traces in the last 15 minutes. For services that did not send traces in the last 15 minutes, traces are still stored but not displayed in the Jaeger UI. (link:https://issues.redhat.com/browse/TRACING-3139[TRACING-3139])
|
||||
* Currently, the {TempoShortName} fails on the IBM Z (`s390x`) architecture. (link:https://issues.redhat.com/browse/TRACING-3545[TRACING-3545])
|
||||
|
||||
include::modules/support.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/making-open-source-more-inclusive.adoc[leveloffset=+1]
|
||||
@@ -21,6 +21,10 @@ include::modules/distr-tracing-tempo-config-storage.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/distr-tracing-tempo-config-query-frontend.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/distr-tracing-tempo-config-spanmetrics.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/distr-tracing-tempo-config-multitenancy.adoc[leveloffset=+2]
|
||||
|
||||
[id="setting-up-monitoring-for-tempo"]
|
||||
== Setting up monitoring for the {TempoShortName}
|
||||
|
||||
|
||||
@@ -6,9 +6,6 @@ include::_attributes/common-attributes.adoc[]
|
||||
|
||||
toc::[]
|
||||
|
||||
:FeatureName: The {TempoOperator}
|
||||
include::snippets/technology-preview.adoc[leveloffset=+1]
|
||||
|
||||
Installing the {TempoShortName} involves the following steps:
|
||||
|
||||
. Setting up supported object storage.
|
||||
|
||||
@@ -6,8 +6,11 @@ include::_attributes/common-attributes.adoc[]
|
||||
|
||||
toc::[]
|
||||
|
||||
include::modules/distr-tracing-tempo-update-olm.adoc[leveloffset=+1]
|
||||
For version upgrades, the {TempoOperator} uses the Operator Lifecycle Manager (OLM), which controls installation, upgrade, and role-based access control (RBAC) of Operators in a cluster.
|
||||
|
||||
The OLM runs in the {product-title} by default. The OLM queries for available Operators as well as upgrades for installed Operators.
|
||||
|
||||
When the {TempoOperator} is upgraded to the new version, it scans for running TempoStack instances that it manages and upgrades them to the version corresponding to the Operator's new version.
|
||||
[role="_additional-resources"]
|
||||
[id="additional-resources_dist-tracing-tempo-updating"]
|
||||
== Additional resources
|
||||
|
||||
@@ -109,6 +109,7 @@ spec:
|
||||
|Configuration options that define the Ingester service.
|
||||
|
|
||||
|
|
||||
|
||||
|===
|
||||
|
||||
The following example YAML is the minimum required to create a {JaegerName} deployment using the default settings.
|
||||
|
||||
@@ -63,4 +63,46 @@ The Collectors are stateless and thus many instances of Jaeger Collector can be
|
||||
log-level:
|
||||
|Logging level for the Collector.
|
||||
|Possible values: `debug`, `info`, `warn`, `error`, `fatal`, `panic`.
|
||||
|
||||
|options:
|
||||
otlp:
|
||||
enabled: true
|
||||
grpc:
|
||||
host-port: 4317
|
||||
max-connection-age: 0s
|
||||
max-connection-age-grace: 0s
|
||||
max-message-size: 4194304
|
||||
tls:
|
||||
enabled: false
|
||||
cert: /path/to/cert.crt
|
||||
cipher-suites: "TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256"
|
||||
client-ca: /path/to/cert.ca
|
||||
reload-interval: 0s
|
||||
min-version: 1.2
|
||||
max-version: 1.3
|
||||
|To accept OTLP/gRPC, explicitly enable the `otlp`. All the other options are optional.
|
||||
|
||||
|options:
|
||||
otlp:
|
||||
enabled: true
|
||||
http:
|
||||
cors:
|
||||
allowed-headers: [<header-name>[, <header-name>]*]
|
||||
allowed-origins: *
|
||||
host-port: 4318
|
||||
max-connection-age: 0s
|
||||
max-connection-age-grace: 0s
|
||||
max-message-size: 4194304
|
||||
read-timeout: 0s
|
||||
read-header-timeout: 2s
|
||||
idle-timeout: 0s
|
||||
tls:
|
||||
enabled: false
|
||||
cert: /path/to/cert.crt
|
||||
cipher-suites: "TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256"
|
||||
client-ca: /path/to/cert.ca
|
||||
reload-interval: 0s
|
||||
min-version: 1.2
|
||||
max-version: 1.3
|
||||
|To accept OTLP/HTTP, explicitly enable the `otlp`. All the other options are optional.
|
||||
|===
|
||||
|
||||
@@ -1,128 +0,0 @@
|
||||
////
|
||||
This module included in the following assemblies:
|
||||
-distr_tracing_install/distributed-tracing-deploying-otel.adoc
|
||||
////
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="distr-tracing-config-otel-collector_{context}"]
|
||||
= OpenTelemetry Collector configuration options
|
||||
|
||||
:FeatureName: The {OTELName} Operator
|
||||
include::snippets/technology-preview.adoc[leveloffset=+1]
|
||||
|
||||
The OpenTelemetry Collector consists of three components that access telemetry data:
|
||||
|
||||
* *Receivers* - A receiver, which can be push or pull based, is how data gets into the Collector. Generally, a receiver accepts data in a specified format, translates it into the internal format and passes it to processors and exporters defined in the applicable pipelines. By default, no receivers are configured. One or more receivers must be configured. Receivers may support one or more data sources.
|
||||
|
||||
* *Processors* - (Optional) Processors are run on data between being received and being exported. By default, no processors are enabled. Processors must be enabled for every data source. Not all processors support all data sources. Depending on the data source, it may be recommended that multiple processors be enabled. In addition, it is important to note that the order of processors matters.
|
||||
|
||||
* *Exporters* - An exporter, which can be push or pull based, is how you send data to one or more backends/destinations. By default, no exporters are configured. One or more exporters must be configured. Exporters may support one or more data sources. Exporters may come with default settings, but many require configuration to specify at least the destination and security settings.
|
||||
|
||||
You can define multiple instances of components in a custom resource YAML file. Once configured, these components must be enabled through pipelines defined in the `spec.config.service` section of the YAML file. As a best practice you should only enable the components that you need.
|
||||
|
||||
.sample OpenTelemetry collector custom resource file
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: opentelemetry.io/v1alpha1
|
||||
kind: OpenTelemetryCollector
|
||||
metadata:
|
||||
name: cluster-collector
|
||||
namespace: tracing-system
|
||||
spec:
|
||||
mode: deployment
|
||||
config: |
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
http:
|
||||
processors:
|
||||
exporters:
|
||||
jaeger:
|
||||
endpoint: jaeger-production-collector-headless.tracing-system.svc:14250
|
||||
tls:
|
||||
ca_file: "/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt"
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: []
|
||||
exporters: [jaeger]
|
||||
----
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
If a component is configured, but not defined within the `service` section then it is not enabled.
|
||||
====
|
||||
|
||||
.Parameters used by the Operator to define the OpenTelemetry Collector
|
||||
[options="header"]
|
||||
[cols="l, a, a, a"]
|
||||
|===
|
||||
|Parameter |Description |Values |Default
|
||||
|receivers:
|
||||
|A receiver is how data gets into the Collector. By default, no receivers are configured. There must be at least one enabled receiver for a configuration to be considered valid. Receivers are enabled by being added to a pipeline.
|
||||
|`otlp`, `jaeger`
|
||||
|None
|
||||
|
||||
|receivers:
|
||||
otlp:
|
||||
|The `oltp` and `jaeger` receivers come with default settings, specifying the name of the receiver is enough to configure it.
|
||||
|
|
||||
|
|
||||
|
||||
|processors:
|
||||
|Processors run on data between being received and being exported. By default, no processors are enabled.
|
||||
|
|
||||
|None
|
||||
|
||||
|exporters:
|
||||
|An exporter sends data to one or more backends/destinations. By default, no exporters are configured. There must be at least one enabled exporter for a configuration to be considered valid. Exporters are enabled by being added to a pipeline. Exporters may come with default settings, but many require configuration to specify at least the destination and security settings.
|
||||
|`logging`, `jaeger`
|
||||
|None
|
||||
|
||||
|exporters:
|
||||
jaeger:
|
||||
endpoint:
|
||||
|
||||
|The `jaeger` exporter’s endpoint must be of the form `<name>-collector-headless.<namespace>.svc`, with the name and namespace of the Jaeger deployment, for a secure connection to be established.
|
||||
|
|
||||
|
|
||||
|
||||
|exporters:
|
||||
jaeger:
|
||||
tls:
|
||||
ca_file:
|
||||
|Path to the CA certificate. For a client this verifies the server certificate. For a server this verifies client certificates. If empty uses system root CA.
|
||||
|
|
||||
|
|
||||
|
||||
|service:
|
||||
pipelines:
|
||||
|Components are enabled by adding them to a pipeline under `services.pipeline`.
|
||||
|
|
||||
|
|
||||
|
||||
|service:
|
||||
pipelines:
|
||||
traces:
|
||||
receivers:
|
||||
|You enable receivers for tracing by adding them under `service.pipelines.traces`.
|
||||
|
|
||||
|None
|
||||
|
||||
|service:
|
||||
pipelines:
|
||||
traces:
|
||||
processors:
|
||||
|You enable processors for tracing by adding them under `service.pipelines.traces`.
|
||||
|
|
||||
|None
|
||||
|
||||
|service:
|
||||
pipelines:
|
||||
traces:
|
||||
exporters:
|
||||
|You enable exporters for tracing by adding them under `service.pipelines.traces`.
|
||||
|
|
||||
|None
|
||||
|===
|
||||
@@ -159,10 +159,10 @@ Minimum deployment = 16Gi*
|
||||
|`true`/`false`
|
||||
|`true`
|
||||
|
||||
|
|
||||
3+|*Each Elasticsearch node can operate with a lower memory setting though this is NOT recommended for production deployments. For production use, you should have no less than 16Gi allocated to each pod by default, but preferably allocate as much as you can, up to 64Gi per pod.
|
||||
|===
|
||||
|
||||
Each Elasticsearch node can operate with a lower memory setting though this is NOT recommended for production deployments. For production use, you must have no less than 16 Gi allocated to each pod by default, but preferably allocate as much as you can, up to 64 Gi per pod.
|
||||
|
||||
.Production storage example
|
||||
[source,yaml]
|
||||
----
|
||||
|
||||
@@ -71,8 +71,7 @@ spec:
|
||||
kafka:
|
||||
producer:
|
||||
topic: jaeger-spans
|
||||
#Note: If brokers are not defined,AMQStreams 1.4.0+ will self-provision Kafka.
|
||||
brokers: my-cluster-kafka-brokers.kafka:9092
|
||||
brokers: my-cluster-kafka-brokers.kafka:9092 # <1>
|
||||
storage:
|
||||
type: elasticsearch
|
||||
ingester:
|
||||
@@ -83,6 +82,7 @@ spec:
|
||||
brokers: my-cluster-kafka-brokers.kafka:9092
|
||||
|
||||
----
|
||||
<1> If the brokers are not defined, AMQStreams 1.4.0+ self-provisions Kafka.
|
||||
//TODO - find out if this storage configuration is correct for OpenShift
|
||||
|
||||
. Click *Create* to create the {JaegerShortName} instance.
|
||||
|
||||
@@ -1,53 +0,0 @@
|
||||
////
|
||||
This module included in the following assemblies:
|
||||
- distr_tracing_install/distr-tracing-installing.adoc
|
||||
////
|
||||
|
||||
:_mod-docs-content-type: PROCEDURE
|
||||
[id="distr-tracing-install-otel-operator_{context}"]
|
||||
= Installing the {OTELName} Operator
|
||||
|
||||
:FeatureName: The {OTELName} Operator
|
||||
include::snippets/technology-preview.adoc[leveloffset=+1]
|
||||
|
||||
To install {OTELName}, you use the link:https://operatorhub.io/[OperatorHub] to install the {OTELName} Operator.
|
||||
|
||||
By default, the Operator is installed in the `openshift-operators` project.
|
||||
|
||||
.Prerequisites
|
||||
* You have access to the {product-title} web console.
|
||||
* You have access to the cluster as a user with the `cluster-admin` role. If you use {product-dedicated}, you must have an account with the `dedicated-admin` role.
|
||||
|
||||
[WARNING]
|
||||
====
|
||||
Do not install Community versions of the Operators. Community Operators are not supported.
|
||||
====
|
||||
|
||||
.Procedure
|
||||
|
||||
. Log in to the {product-title} web console as a user with the `cluster-admin` role. If you use {product-dedicated}, you must have an account with the `dedicated-admin` role.
|
||||
|
||||
. Navigate to *Operators* -> *OperatorHub*.
|
||||
|
||||
. Type *distributed tracing data collection* into the filter to locate the {OTELName} Operator.
|
||||
|
||||
. Click the *{OTELName} Operator* provided by Red Hat to display information about the Operator.
|
||||
|
||||
. Click *Install*.
|
||||
|
||||
. On the *Install Operator* page, accept the default *stable* Update channel. This automatically updates your Operator as new versions are released.
|
||||
|
||||
. Accept the default *All namespaces on the cluster (default)*. This installs the Operator in the default `openshift-operators` project and makes the Operator available to all projects in the cluster.
|
||||
|
||||
. Accept the default *Automatic* approval strategy. By accepting the default, when a new version of this Operator is available, Operator Lifecycle Manager (OLM) automatically upgrades the running instance of your Operator without human intervention. If you select *Manual* updates, when a newer version of an Operator is available, OLM creates an update request. As a cluster administrator, you must then manually approve that update request to have the Operator updated to the new version.
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
The *Manual* approval strategy requires a user with appropriate credentials to approve the Operator install and subscription process.
|
||||
====
|
||||
|
||||
. Click *Install*.
|
||||
|
||||
. Go to *Operators* -> *Installed Operators*.
|
||||
|
||||
. On the *Installed Operators* page, select the `openshift-operators` project. Wait until you see that the {OTELName} Operator shows a status of "Succeeded" before continuing.
|
||||
@@ -1,553 +0,0 @@
|
||||
////
|
||||
This module included in the following assemblies:
|
||||
-distr_tracing_otel/distr-tracing-otel-configuring.adoc
|
||||
////
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="distr-tracing-config-otel-collector_{context}"]
|
||||
= OpenTelemetry Collector configuration options
|
||||
|
||||
The OpenTelemetry Collector consists of three components that access telemetry data:
|
||||
|
||||
Receivers:: A receiver, which can be push or pull based, is how data gets into the Collector. Generally, a receiver accepts data in a specified format, translates it into the internal format, and passes it to processors and exporters defined in the applicable pipelines. By default, no receivers are configured. One or more receivers must be configured. Receivers may support one or more data sources.
|
||||
|
||||
Processors:: Optional. Processors run through the data between it is received and exported. By default, no processors are enabled. Processors must be enabled for every data source. Not all processors support all data sources. Depending on the data source, multiple processors might be enabled. Note that the order of processors matters.
|
||||
|
||||
Exporters:: An exporter, which can be push or pull based, is how you send data to one or more back ends or destinations. By default, no exporters are configured. One or more exporters must be configured. Exporters can support one or more data sources. Exporters might be used with their default settings, but many exporters require configuration to specify at least the destination and security settings.
|
||||
|
||||
You can define multiple instances of components in a custom resource YAML file. When configured, these components must be enabled through pipelines defined in the `spec.config.service` section of the YAML file. As a best practice, only enable the components that you need.
|
||||
|
||||
.Example of the OpenTelemetry Collector custom resource file
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: opentelemetry.io/v1alpha1
|
||||
kind: OpenTelemetryCollector
|
||||
metadata:
|
||||
name: cluster-collector
|
||||
namespace: tracing-system
|
||||
spec:
|
||||
mode: deployment
|
||||
ports:
|
||||
- name: promexporter
|
||||
port: 8889
|
||||
protocol: TCP
|
||||
config: |
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
http:
|
||||
processors:
|
||||
exporters:
|
||||
jaeger:
|
||||
endpoint: jaeger-production-collector-headless.tracing-system.svc:14250
|
||||
tls:
|
||||
ca_file: "/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt"
|
||||
prometheus:
|
||||
endpoint: 0.0.0.0:8889
|
||||
resource_to_telemetry_conversion:
|
||||
enabled: true # by default resource attributes are dropped
|
||||
service: <1>
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: []
|
||||
exporters: [jaeger]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: []
|
||||
exporters: [prometheus]
|
||||
----
|
||||
<1> If a component is configured but not defined in the `service` section, the component is not enabled.
|
||||
|
||||
.Parameters used by the Operator to define the OpenTelemetry Collector
|
||||
[options="header"]
|
||||
[cols="l, a, a, a"]
|
||||
|===
|
||||
|Parameter |Description |Values |Default
|
||||
|receivers:
|
||||
|A receiver is how data gets into the Collector. By default, no receivers are configured. There must be at least one enabled receiver for a configuration to be considered valid. Receivers are enabled by being added to a pipeline.
|
||||
|`otlp`, `jaeger`, `zipkin`
|
||||
|None
|
||||
|
||||
|processors:
|
||||
|Processors run through the data between it is received and exported. By default, no processors are enabled.
|
||||
|
|
||||
|None
|
||||
|
||||
|exporters:
|
||||
|An exporter sends data to one or more back ends or destinations. By default, no exporters are configured. There must be at least one enabled exporter for a configuration to be considered valid. Exporters are enabled by being added to a pipeline. Exporters might be used with their default settings, but many require configuration to specify at least the destination and security settings.
|
||||
|`otlp`, `otlphttp`, `jaeger`, `logging`, `prometheus`
|
||||
|None
|
||||
|
||||
|service:
|
||||
pipelines:
|
||||
|Components are enabled by adding them to a pipeline under `services.pipeline`.
|
||||
|
|
||||
|
|
||||
|
||||
|service:
|
||||
pipelines:
|
||||
traces:
|
||||
receivers:
|
||||
|You enable receivers for tracing by adding them under `service.pipelines.traces`.
|
||||
|
|
||||
|None
|
||||
|
||||
|service:
|
||||
pipelines:
|
||||
traces:
|
||||
processors:
|
||||
|You enable processors for tracing by adding them under `service.pipelines.traces`.
|
||||
|
|
||||
|None
|
||||
|
||||
|service:
|
||||
pipelines:
|
||||
traces:
|
||||
exporters:
|
||||
|You enable exporters for tracing by adding them under `service.pipelines.traces`.
|
||||
|
|
||||
|None
|
||||
|
||||
|service:
|
||||
pipelines:
|
||||
metrics:
|
||||
receivers:
|
||||
|You enable receivers for metrics by adding them under `service.pipelines.metrics`.
|
||||
|
|
||||
|None
|
||||
|
||||
|service:
|
||||
pipelines:
|
||||
metrics:
|
||||
processors:
|
||||
|You enable processors for metircs by adding them under `service.pipelines.metrics`.
|
||||
|
|
||||
|None
|
||||
|
||||
|service:
|
||||
pipelines:
|
||||
metrics:
|
||||
exporters:
|
||||
|You enable exporters for metrics by adding them under `service.pipelines.metrics`.
|
||||
|
|
||||
|None
|
||||
|===
|
||||
|
||||
[id="otel-collector-components_{context}"]
|
||||
== OpenTelemetry Collector components
|
||||
|
||||
[id="receivers_{context}"]
|
||||
=== Receivers
|
||||
|
||||
[id="otlp-receiver_{context}"]
|
||||
==== OTLP Receiver
|
||||
|
||||
The OTLP receiver ingests data using the OpenTelemetry protocol (OTLP).
|
||||
|
||||
* Support level: link:https://access.redhat.com/support/offerings/techpreview[Technology Preview]
|
||||
* Supported signals: traces, metrics
|
||||
|
||||
.OpenTelemetry Collector custom resource with an enabled OTLP receiver
|
||||
[source,yaml]
|
||||
----
|
||||
config: |
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317 <1>
|
||||
tls: <2>
|
||||
ca_file: ca.pem
|
||||
cert_file: cert.pem
|
||||
key_file: key.pem
|
||||
client_ca_file: client.pem <3>
|
||||
reload_interval: 1h <4>
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318 <5>
|
||||
tls: <6>
|
||||
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
----
|
||||
<1> The OTLP gRPC endpoint. If omitted, the default `+0.0.0.0:4317+` is used.
|
||||
<2> The server-side TLS configuration. Defines paths to TLS certificates. If omitted, TLS is disabled.
|
||||
<3> The path to the TLS certificate at which the server verifies a client certificate. This sets the value of `ClientCAs` and `ClientAuth` to `RequireAndVerifyClientCert` in the `TLSConfig`. For more information, see the link:https://godoc.org/crypto/tls#Config[`Config` of the Golang TLS package].
|
||||
<4> Specifies the time interval at which the certificate is reloaded. If the value is not set, the certificate is never reloaded. `reload_interval` accepts a string containing valid units of time such as `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`.
|
||||
<5> The OTLP HTTP endpoint. The default value is `+0.0.0.0:4318+`.
|
||||
<6> The server-side TLS configuration. For more information, see `grpc` protocol configuration section.
|
||||
|
||||
[id="jaeger-receiver_{context}"]
|
||||
==== Jaeger Receiver
|
||||
|
||||
The Jaeger receiver ingests data in Jaeger formats.
|
||||
|
||||
* Support level: link:https://access.redhat.com/support/offerings/techpreview[Technology Preview]
|
||||
* Supported signals: traces
|
||||
|
||||
.OpenTelemetry Collector custom resource with an enabled Jaeger receiver
|
||||
[source,yaml]
|
||||
----
|
||||
config: |
|
||||
receivers:
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:14250 <1>
|
||||
thrift_http:
|
||||
endpoint: 0.0.0.0:14268 <2>
|
||||
thrift_compact:
|
||||
endpoint: 0.0.0.0:6831 <3>
|
||||
thrift_binary:
|
||||
endpoint: 0.0.0.0:6832 <4>
|
||||
tls: <5>
|
||||
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [jaeger]
|
||||
----
|
||||
<1> The Jaeger gRPC endpoint. If omitted, the default `+0.0.0.0:14250+` is used.
|
||||
<2> The Jaeger Thrift HTTP endpoint. If omitted, the default `+0.0.0.0:14268+` is used.
|
||||
<3> The Jaeger Thrift Compact endpoint. If omitted, the default `+0.0.0.0:6831+` is used.
|
||||
<4> The Jaeger Thrift Binary endpoint. If omitted, the default `+0.0.0.0:6832+` is used.
|
||||
<5> The TLS server side configuration. See the OTLP receiver configuration section for more details.
|
||||
|
||||
[id="zipkin-receiver_{context}"]
|
||||
==== Zipkin Receiver
|
||||
|
||||
The Zipkin receiver ingests data in the Zipkin v1 and v2 formats.
|
||||
|
||||
* Support level: link:https://access.redhat.com/support/offerings/techpreview[Technology Preview]
|
||||
* Supported signals: traces
|
||||
|
||||
.OpenTelemetry Collector custom resource with enabled Zipkin receiver
|
||||
[source,yaml]
|
||||
----
|
||||
config: |
|
||||
receivers:
|
||||
zipkin:
|
||||
endpoint: 0.0.0.0:9411 <1>
|
||||
tls: <2>
|
||||
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [zipkin]
|
||||
----
|
||||
<1> The Zipkin HTTP endpoint. If omitted, the default `+0.0.0.0:9411+` is used.
|
||||
<2> The TLS server side configuration. See the OTLP receiver configuration section for more details.
|
||||
|
||||
[id="processors_{context}"]
|
||||
=== Processors
|
||||
|
||||
|
||||
[id="batch-processor_{context}"]
|
||||
==== Batch processor
|
||||
|
||||
The batch processor batches the data to reduce the number of outgoing connections needed to transfer the telemetry information.
|
||||
|
||||
* Support level: link:https://access.redhat.com/support/offerings/techpreview[Technology Preview]
|
||||
* Supported signals: traces, metrics
|
||||
|
||||
.Example of the OpenTelemetry Collector custom resource when using the batch processor
|
||||
[source,yaml]
|
||||
----
|
||||
config: |
|
||||
processor:
|
||||
batch:
|
||||
timeout: 5s
|
||||
send_batch_max_size: 10000
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
processors: [batch]
|
||||
metrics:
|
||||
processors: [batch]
|
||||
----
|
||||
|
||||
.Parameters used by the batch processor
|
||||
[cols="3",options="header"]
|
||||
|===
|
||||
|Parameter |Description |Default
|
||||
|
||||
| `timeout`
|
||||
| Sends the batch after a specific time duration, irrespective of its size.
|
||||
| 200ms
|
||||
|
||||
| `send_batch_size`
|
||||
| Sends the batch of telemetry data after the specified number of spans or metrics.
|
||||
| 8192
|
||||
|
||||
| `send_batch_max_size`
|
||||
| The maximum allowable size of the batch. Must be equal or greater than `send_batch_size`.
|
||||
| 0
|
||||
|
||||
| `metadata_keys`
|
||||
| When activated, a batcher instance is created for each unique set of values found in the `client.Metadata`.
|
||||
| []
|
||||
|
||||
| `metadata_cardinality_limit`
|
||||
| When the `metadata_keys` are populated, this configuration restricts the number of distinct metadata key-value combinations processed throughout the duration of the process.
|
||||
| 1000
|
||||
|===
|
||||
|
||||
[id="memorylimiter-processor_{context}"]
|
||||
==== Memory Limiter processor
|
||||
|
||||
The Memory Limiter processor periodically checks the Collector's memory usage and pauses data processing when the soft memory limit is reached.
|
||||
The preceding component, which is typically a receiver, is expected to retry sending the same data and may apply a backpressure to the incoming data.
|
||||
When memory usage exceeds the hard limit, the Memory Limiter processor forces garbage collection to run.
|
||||
|
||||
* Support level: General Availability
|
||||
* Supported signals: traces, metrics, logs
|
||||
|
||||
.Example of the OpenTelemetry Collector custom resource when using the Memory Limiter processor
|
||||
[source,yaml]
|
||||
----
|
||||
config: |
|
||||
processor:
|
||||
memory_limiter:
|
||||
check_interval: 1s
|
||||
limit_mib: 4000
|
||||
spike_limit_mib: 800
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
processors: [batch]
|
||||
metrics:
|
||||
processors: [batch]
|
||||
----
|
||||
|
||||
.Parameters used by the Memory Limiter processor
|
||||
[cols="3",options="header"]
|
||||
|===
|
||||
| Parameter | Description | Default
|
||||
|
||||
| `check_interval`
|
||||
| Time between memory usage measurements. The optimal value is 1s. For spiky traffic patterns, you can decrease the `check_interval` or increase the `spike_limit_mib`.
|
||||
| `0s`
|
||||
|
||||
| `limit_mib`
|
||||
| The hard limit, which is the maximum amount of memory in MiB allocated on the heap. Typically, the total memory usage of the OpenTelemetry Collector is about 50 MiB greater than this value.
|
||||
| `0`
|
||||
|
||||
| `spike_limit_mib`
|
||||
| Spike limit, which is the maximum expected spike of memory usage in MiB. The optimal value is approximately 20% of `limit_mib`. To calculate the soft limit, subtract the `spike_limit_mib` from the `limit_mib`.
|
||||
| 20% of `limit_mib`
|
||||
|
||||
| `limit_percentage`
|
||||
| Same as the `limit_mib` but expressed as a percentage of the total available memory. The `limit_mib` setting takes precedence over this setting.
|
||||
| `0`
|
||||
|
||||
| `spike_limit_percentage`
|
||||
| Same as the `spike_limit_mib` but expressed as a percentage of the total available memory. Intended to be used with the `limit_percentage` setting.
|
||||
| `0`
|
||||
|
||||
|===
|
||||
|
||||
[id="resource-detection-processor_{context}"]
|
||||
==== Resource Detection processor
|
||||
|
||||
The Resource Detection processor is designed to identify host resource details in alignment with OpenTelemetry's resource semantic standards. Using this detected information, it can add or replace the resource values in telemetry data.
|
||||
|
||||
* Support level: link:https://access.redhat.com/support/offerings/techpreview[Technology Preview]
|
||||
* Supported signals: traces, metrics
|
||||
|
||||
.{product-title} permissions required for the Resource Detection processor
|
||||
[source,yaml]
|
||||
----
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: otel-collector
|
||||
rules:
|
||||
- apiGroups: ["config.openshift.io"]
|
||||
resources: ["infrastructures", "infrastructures/status"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
----
|
||||
|
||||
.OpenTelemetry Collector using the Resource Detection processor
|
||||
[source,yaml]
|
||||
----
|
||||
config: |
|
||||
processor:
|
||||
resourcedetection:
|
||||
detectors: [openshift]
|
||||
override: true
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
processors: [resourcedetection]
|
||||
metrics:
|
||||
processors: [resourcedetection]
|
||||
----
|
||||
|
||||
[id="exporters_{context}"]
|
||||
=== Exporters
|
||||
|
||||
[id="otlp-exporter_{context}"]
|
||||
==== OTLP exporter
|
||||
|
||||
The OTLP gRPC exporter exports data using the OpenTelemetry protocol (OTLP).
|
||||
|
||||
* Support level: link:https://access.redhat.com/support/offerings/techpreview[Technology Preview]
|
||||
* Supported signals: traces, metrics
|
||||
|
||||
.OpenTelemetry Collector custom resource with an enabled OTLP exporter
|
||||
[source,yaml]
|
||||
----
|
||||
config: |
|
||||
exporters:
|
||||
otlp:
|
||||
endpoint: tempo-ingester:4317 <1>
|
||||
tls: <2>
|
||||
ca_file: ca.pem
|
||||
cert_file: cert.pem
|
||||
key_file: key.pem
|
||||
insecure: false <3>
|
||||
insecure_skip_verify: false <4>
|
||||
reload_interval: 1h <5>
|
||||
server_name_override: <name> <6>
|
||||
headers: <7>
|
||||
X-Scope-OrgID: "dev"
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
exporters: [otlp]
|
||||
metrics:
|
||||
exporters: [otlp]
|
||||
----
|
||||
<1> The OTLP gRPC endpoint. If the `+https://+` scheme is used, then client transport security is enabled and overrides the `insecure` setting in the `tls`.
|
||||
<2> The client side TLS configuration. Defines paths to TLS certificates.
|
||||
<3> Disables client transport security when set to `true`. The default value is `false` by default.
|
||||
<4> Skips verifying the certificate when set to `true`. The default value is `false`.
|
||||
<5> Specifies the time interval at which the certificate is reloaded. If the value is not set, the certificate is never reloaded. `reload_interval` accepts a string containing valid units of time such as `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`.
|
||||
<6> Overrides the virtual host name of authority such as the authority header field in requests. You can use this for testing.
|
||||
<7> Headers are sent for every request performed during an established connection.
|
||||
|
||||
[id="otlp-http-exporter_{context}"]
|
||||
==== OTLP HTTP exporter
|
||||
|
||||
The OTLP HTTP exporter exports data using the OpenTelemetry protocol (OTLP).
|
||||
|
||||
* Support level: link:https://access.redhat.com/support/offerings/techpreview[Technology Preview]
|
||||
* Supported signals: traces, metrics
|
||||
|
||||
.OpenTelemetry Collector custom resource with an enabled OTLP exporter
|
||||
[source,yaml]
|
||||
----
|
||||
config: |
|
||||
exporters:
|
||||
otlphttp:
|
||||
endpoint: http://tempo-ingester:4318 <1>
|
||||
tls: <2>
|
||||
headers: <3>
|
||||
X-Scope-OrgID: "dev"
|
||||
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
exporters: [otlphttp]
|
||||
metrics:
|
||||
expoters: [otlphttp]
|
||||
----
|
||||
<1> The OTLP HTTP endpoint. If the `+https://+` scheme is used, then client transport security is enabled and overrides the `insecure` setting in the `tls`.
|
||||
<2> The client side TLS configuration. Defines paths to TLS certificates.
|
||||
<3> Headers are sent in every HTTP request.
|
||||
|
||||
[id="jaeger-exporter_{context}"]
|
||||
==== Jaeger exporter
|
||||
|
||||
The Jaeger exporter exports data using the Jaeger proto format through gRPC.
|
||||
|
||||
* Support level: link:https://access.redhat.com/support/offerings/techpreview[Technology Preview]
|
||||
* Supported signals: traces
|
||||
|
||||
.OpenTelemetry Collector custom resource with enabled Jaeger exporter
|
||||
[source,yaml]
|
||||
----
|
||||
config: |
|
||||
exporters:
|
||||
jaeger:
|
||||
endpoint: jaeger-all-in-one:14250 <1>
|
||||
tls: <2>
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
exporters: [jaeger]
|
||||
----
|
||||
<1> The Jaeger gRPC endpoint.
|
||||
<2> The client side TLS configuration. Defines paths to TLS certificates.
|
||||
|
||||
[id="logging-exporter_{context}"]
|
||||
==== Logging exporter
|
||||
|
||||
The Logging exporter prints data to the standard output.
|
||||
|
||||
* Support level: link:https://access.redhat.com/support/offerings/techpreview[Technology Preview]
|
||||
* Supported signals: traces, metrics
|
||||
|
||||
.OpenTelemetry Collector custom resource with an enabled Logging exporter
|
||||
[source,yaml]
|
||||
----
|
||||
config: |
|
||||
exporters:
|
||||
logging:
|
||||
verbosity: detailed <1>
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
exporters: [logging]
|
||||
metrics:
|
||||
exporters: [logging]
|
||||
----
|
||||
<1> Verbosity of the logging export: `detailed` or `normal` or `basic`. When set to `detailed`, pipeline data is verbosely logged. Defaults to `normal`.
|
||||
|
||||
[id="prometheus-exporter_{context}"]
|
||||
==== Prometheus exporter
|
||||
|
||||
The Prometheus exporter exports data using the Prometheus or OpenMetrics formats.
|
||||
|
||||
* Support level: link:https://access.redhat.com/support/offerings/techpreview[Technology Preview]
|
||||
* Supported signals: metrics
|
||||
|
||||
.OpenTelemetry Collector custom resource with an enabled Prometheus exporter
|
||||
[source,yaml]
|
||||
----
|
||||
ports:
|
||||
- name: promexporter <1>
|
||||
port: 8889
|
||||
protocol: TCP
|
||||
config: |
|
||||
exporters:
|
||||
prometheus:
|
||||
endpoint: 0.0.0.0:8889 <2>
|
||||
tls: <3>
|
||||
ca_file: ca.pem
|
||||
cert_file: cert.pem
|
||||
key_file: key.pem
|
||||
namespace: prefix <4>
|
||||
const_labels: <5>
|
||||
label1: value1
|
||||
enable_open_metrics: true <6>
|
||||
resource_to_telemetry_conversion: <7>
|
||||
enabled: true
|
||||
metric_expiration: 180m <8>
|
||||
service:
|
||||
pipelines:
|
||||
metrics:
|
||||
exporters: [prometheus]
|
||||
----
|
||||
<1> Exposes the Prometheus port from the collector pod and service. You can enable scraping of metrics by Prometheus by using the port name in `ServiceMonitor` or `PodMonitor` custom resource.
|
||||
<2> The network endpoint where the metrics are exposed.
|
||||
<3> The server-side TLS configuration. Defines paths to TLS certificates.
|
||||
<4> If set, exports metrics under the provided value. No default.
|
||||
<5> Key-value pair labels that are applied for every exported metric. No default.
|
||||
<6> If `true`, metrics are exported using the OpenMetrics format. Exemplars are only exported in the OpenMetrics format and only for histogram and monotonic sum metrics such as `counter`. Disabled by default.
|
||||
<7> If `enabled` is `true`, all the resource attributes are converted to metric labels by default. Disabled by default.
|
||||
<8> Defines how long metrics are exposed without updates. The default is `5m`.
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
////
|
||||
This module is included in the following assemblies:
|
||||
- distr_tracing_install/distributed-tracing-deploying-otel.adoc
|
||||
////
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="distr-tracing-send-metrics-monitoring-stack_{context}"]
|
||||
= Sending metrics to the monitoring stack
|
||||
|
||||
You can configure the monitoring stack to scrape OpenTelemetry Collector metrics endpoints and to remove duplicated labels that the monitoring stack has added during scraping.
|
||||
|
||||
.Sample `PodMonitor` custom resource (CR) that configures the monitoring stack to scrape Collector metrics
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PodMonitor
|
||||
metadata:
|
||||
name: otel-collector
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: otel-collector
|
||||
podMetricsEndpoints:
|
||||
- port: metrics <1>
|
||||
- port: promexporter <2>
|
||||
relabelings:
|
||||
- action: labeldrop
|
||||
regex: pod
|
||||
- action: labeldrop
|
||||
regex: container
|
||||
- action: labeldrop
|
||||
regex: endpoint
|
||||
metricRelabelings:
|
||||
- action: labeldrop
|
||||
regex: instance
|
||||
- action: labeldrop
|
||||
regex: job
|
||||
----
|
||||
<1> The name of the internal metrics port for the OpenTelemetry Collector. This port name is always `metrics`.
|
||||
<2> The name of the Prometheus exporter port for the OpenTelemetry Collector. This port name is defined in the `.spec.ports` section of the `OpenTelemetryCollector` CR.
|
||||
@@ -1,30 +0,0 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * /distr_tracing/distr_tracing_otel/distr-tracing-otel-troubleshooting.adoc
|
||||
:_mod-docs-content-type: PROCEDURE
|
||||
[id="distr-tracing-otel-troubleshoot-logging-exporter_{context}"]
|
||||
= Logging exporter
|
||||
|
||||
You can configure the logging exporter to export the collected data to the standard output.
|
||||
|
||||
.Procedure
|
||||
|
||||
. Configure the OpenTelemetry Collector custom resource as follows:
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
config: |
|
||||
exporters:
|
||||
logging:
|
||||
verbosity: detailed
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
exporters: [logging]
|
||||
metrics:
|
||||
exporters: [logging]
|
||||
logs:
|
||||
exporters: [logging]
|
||||
----
|
||||
|
||||
. Use the `oc logs` command or the OpenShift console to export the logs to the standard output.
|
||||
@@ -1,24 +0,0 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * /distr_tracing/distr_tracing_otel/distr-tracing-otel-troubleshooting.adoc
|
||||
:_mod-docs-content-type: PROCEDURE
|
||||
[id="distr-tracing-otel-troubleshoot-logs_{context}"]
|
||||
= Getting the OpenTelemetry Collector logs
|
||||
|
||||
You can get the logs for the OpenTelemetry Collector as follows.
|
||||
|
||||
.Procedure
|
||||
|
||||
. Set the relevant log level in the OpenTelemetry Collector custom resource (CR):
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
config: |
|
||||
service:
|
||||
telemetry:
|
||||
logs:
|
||||
level: debug <1>
|
||||
----
|
||||
<1> Collector's log level. Select one of the following values: `info`, `warn`, `error`, or `debug`. Defaults to `info`.
|
||||
|
||||
. Use the `oc logs` command or the OpenShift console to retrieve the logs.
|
||||
@@ -10,6 +10,7 @@
|
||||
// * distr_tracing/distr_tracing_rn/distr-tracing-rn-2-7.adoc
|
||||
// * distr_tracing/distr_tracing_rn/distr-tracing-rn-2-8.adoc
|
||||
// * distr_tracing/distr_tracing_rn/distr-tracing-rn-2-9.adoc
|
||||
// * distr_tracing/distr_tracing_rn/distr-tracing-rn-3-0.adoc
|
||||
// * distr_tracing_arch/distr-tracing-architecture.adoc
|
||||
// * service_mesh/v2x/ossm-architecture.adoc
|
||||
// * serverless/serverless-tracing.adoc
|
||||
|
||||
@@ -32,85 +32,86 @@ spec:
|
||||
|
||||
.Tempo parameters
|
||||
[options="header"]
|
||||
[cols="l, a, a, a"]
|
||||
|===
|
||||
|Parameter |Description |Values |Default value
|
||||
|
||||
|`apiVersion:`
|
||||
|apiVersion:
|
||||
|API version to use when creating the object.
|
||||
|`tempotracing.io/v1`
|
||||
|`tempotracing.io/v1`
|
||||
|
||||
|`kind:`
|
||||
|kind:
|
||||
|Defines the kind of Kubernetes object to create.
|
||||
|`tempo`
|
||||
|
|
||||
|
||||
|`metadata:`
|
||||
|metadata:
|
||||
|Data that uniquely identifies the object, including a `name` string, `UID`, and optional `namespace`.
|
||||
|
|
||||
|{product-title} automatically generates the `UID` and completes the `namespace` with the name of the project where the object is created.
|
||||
|
||||
|`name:`
|
||||
|name:
|
||||
|Name for the object.
|
||||
|Name of your TempoStack instance.
|
||||
|`tempo-all-in-one-inmemory`
|
||||
|
||||
|`spec:`
|
||||
|spec:
|
||||
|Specification for the object to be created.
|
||||
|Contains all of the configuration parameters for your TempoStack instance. When a common definition for all Tempo components is required, it is defined under the `spec` node. When the definition relates to an individual component, it is placed under the `spec/template/<component>` node.
|
||||
|N/A
|
||||
|
||||
|`resources:`
|
||||
|resources:
|
||||
|Resources assigned to the TempoStack.
|
||||
|
|
||||
|
|
||||
|
||||
|`storageSize:`
|
||||
|storageSize:
|
||||
|Storage size for ingester PVCs.
|
||||
|
|
||||
|
|
||||
|
||||
|`replicationFactor:`
|
||||
|replicationFactor:
|
||||
|Configuration for the replication factor.
|
||||
|
|
||||
|
|
||||
|
||||
|`retention:`
|
||||
|retention:
|
||||
|Configuration options for retention of traces.
|
||||
|
|
||||
|
|
||||
|
||||
|`storage:`
|
||||
|storage:
|
||||
|Configuration options that define the storage. All storage-related options must be placed under `storage` and not under the `allInOne` or other component options.
|
||||
|
|
||||
|
|
||||
|
||||
|`template.distributor:`
|
||||
|template.distributor:
|
||||
|Configuration options for the Tempo `distributor`.
|
||||
|
|
||||
|
|
||||
|
||||
|`template.ingester:`
|
||||
|template.ingester:
|
||||
|Configuration options for the Tempo `ingester`.
|
||||
|
|
||||
|
|
||||
|
||||
|`template.compactor:`
|
||||
|template.compactor:
|
||||
|Configuration options for the Tempo `compactor`.
|
||||
|
|
||||
|
|
||||
|
||||
|`template.querier:`
|
||||
|template.querier:
|
||||
|Configuration options for the Tempo `querier`.
|
||||
|
|
||||
|
|
||||
|
||||
|`template.queryFrontend:`
|
||||
|template.queryFrontend:
|
||||
|Configuration options for the Tempo `query-frontend`.
|
||||
|
|
||||
|
|
||||
|
||||
|`template.gateway:`
|
||||
|template.gateway:
|
||||
|Configuration options for the Tempo `gateway`.
|
||||
|
|
||||
|
|
||||
@@ -130,7 +131,7 @@ kind: TempoStack
|
||||
metadata:
|
||||
name: simplest
|
||||
spec:
|
||||
storage: <1>
|
||||
storage: # <1>
|
||||
secret:
|
||||
name: minio
|
||||
type: s3
|
||||
|
||||
151
modules/distr-tracing-tempo-config-multitenancy.adoc
Normal file
151
modules/distr-tracing-tempo-config-multitenancy.adoc
Normal file
@@ -0,0 +1,151 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * distr_tracing_tempo/distr-tracing-tempo-configuring.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="distr-tracing-tempo-config-multitenancy_{context}"]
|
||||
= Multitenancy
|
||||
|
||||
Multitenancy with authentication and authorization is provided in the Tempo Gateway service.
|
||||
The authentication uses OpenShift OAuth and the Kubernetes `TokenReview` API. The authorization uses the Kubernetes `SubjectAccessReview` API.
|
||||
|
||||
.Sample Tempo CR with two tenants, `dev` and `prod`
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: tempo.grafana.com/v1alpha1
|
||||
kind: TempoStack
|
||||
metadata:
|
||||
name: simplest
|
||||
spec:
|
||||
tenants:
|
||||
mode: openshift # <1>
|
||||
authentication: # <2>
|
||||
- tenantName: dev # <3>
|
||||
tenantId: "1610b0c3-c509-4592-a256-a1871353dbfa" # <4>
|
||||
- tenantName: prod
|
||||
tenantId: "1610b0c3-c509-4592-a256-a1871353dbfb"
|
||||
template:
|
||||
gateway:
|
||||
enabled: true # <5>
|
||||
queryFrontend:
|
||||
jaegerQuery:
|
||||
enabled: true
|
||||
----
|
||||
|
||||
<1> Must be set to `openshift`.
|
||||
<2> The list of tenants.
|
||||
<3> The tenant name. Must be provided in the `X-Scope-OrgId` header when ingesting the data.
|
||||
<4> A unique tenant ID.
|
||||
<5> Enables a gateway that performs authentication and authorization. The Jaeger UI is exposed at `http://<gateway-ingress>/api/traces/v1/<tenant-name>/search`.
|
||||
|
||||
The authorization configuration uses the `ClusterRole` and `ClusterRoleBinding` of the Kubernetes Role-Based Access Control (RBAC). By default, no users have read or write permissions.
|
||||
|
||||
.Sample of the read RBAC configuration that allows authenticated users to read the trace data of the `dev` and `prod` tenants
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: tempostack-traces-reader
|
||||
rules:
|
||||
- apiGroups:
|
||||
- 'tempo.grafana.com'
|
||||
resources: # <1>
|
||||
- dev
|
||||
- prod
|
||||
resourceNames:
|
||||
- traces
|
||||
verbs:
|
||||
- 'get' # <2>
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: tempostack-traces-reader
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: tempostack-traces-reader
|
||||
subjects:
|
||||
- kind: Group
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
name: system:authenticated # <3>
|
||||
----
|
||||
|
||||
<1> Lists the tenants.
|
||||
<2> The `get` value enables the read operation.
|
||||
<3> Grants all authenticated users the read permissions for trace data.
|
||||
|
||||
.Sample of the write RBAC configuration that allows the `otel-collector` service account to write the trace data for the `dev` tenant
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: otel-collector # <1>
|
||||
namespace: otel
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: tempostack-traces-write
|
||||
rules:
|
||||
- apiGroups:
|
||||
- 'tempo.grafana.com'
|
||||
resources: # <2>
|
||||
- dev
|
||||
resourceNames:
|
||||
- traces
|
||||
verbs:
|
||||
- 'create' # <3>
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: tempostack-traces
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: tempostack-traces-write
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: otel-collector
|
||||
namespace: otel
|
||||
----
|
||||
<1> The service account name for the client to use when exporting trace data. The client must send the service account token, `/var/run/secrets/kubernetes.io/serviceaccount/token`, as the bearer token header.
|
||||
<2> Lists the tenants.
|
||||
<3> The `create` value enables the write operation.
|
||||
|
||||
Trace data can be sent to the Tempo instance from the OpenTelemetry Collector that uses the service account with RBAC for writing the data.
|
||||
|
||||
.Sample OpenTelemetry CR configuration
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: opentelemetry.io/v1alpha1
|
||||
kind: OpenTelemetryCollector
|
||||
metadata:
|
||||
name: cluster-collector
|
||||
namespace: tracing-system
|
||||
spec:
|
||||
mode: deployment
|
||||
serviceAccount: otel-collector
|
||||
config: |
|
||||
extensions:
|
||||
bearertokenauth:
|
||||
filename: "/var/run/secrets/kubernetes.io/serviceaccount/token"
|
||||
exporters:
|
||||
otlp/dev:
|
||||
endpoint: tempo-simplest-gateway.tempo.svc.cluster.local:8090
|
||||
tls:
|
||||
insecure: false
|
||||
ca_file: "/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt"
|
||||
auth:
|
||||
authenticator: bearertokenauth
|
||||
headers:
|
||||
X-Scope-OrgID: "dev"
|
||||
service:
|
||||
extensions: [bearertokenauth]
|
||||
pipelines:
|
||||
traces:
|
||||
exporters: [otlp/dev]
|
||||
----
|
||||
88
modules/distr-tracing-tempo-config-spanmetrics.adoc
Normal file
88
modules/distr-tracing-tempo-config-spanmetrics.adoc
Normal file
@@ -0,0 +1,88 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * distr_tracing_tempo/distr-tracing-tempo-configuring.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="distr-tracing-tempo-config-spanmetrics_{context}"]
|
||||
= Configuration of the monitor tab in Jaeger UI
|
||||
|
||||
Trace data contains rich information, and the data is normalized across instrumented languages and frameworks.
|
||||
Therefore, additional metrics can be extracted from traces. These metrics are request count, duration, and error count (RED).
|
||||
The metrics can be visualized in Jaeger console in the *Monitor* tab.
|
||||
|
||||
The metrics are derived from spans in the OpenTelemetry Collector that are scraped from the Collector by the Prometheus deployed in the user-workload monitoring stack.
|
||||
The Jaeger UI queries these metrics from the Prometheus endpoint and visualizes them.
|
||||
|
||||
== OpenTelemetry Collector configuration
|
||||
|
||||
The OpenTelemetry Collector requires configuration of the `spanmetrics` connector that derives metrics from traces and exports the metrics in the Prometheus format.
|
||||
|
||||
.OpenTelemetry Collector custom resource for span RED
|
||||
[source,yaml]
|
||||
----
|
||||
kind: OpenTelemetryCollector
|
||||
apiVersion: opentelemetry.io/v1alpha1
|
||||
metadata:
|
||||
name: otel
|
||||
spec:
|
||||
mode: deployment
|
||||
observability:
|
||||
metrics:
|
||||
enableMetrics: true # <1>
|
||||
config: |
|
||||
connectors:
|
||||
spanmetrics: # <2>
|
||||
metrics_flush_interval: 15s
|
||||
|
||||
receivers:
|
||||
otlp: # <3>
|
||||
protocols:
|
||||
grpc:
|
||||
http:
|
||||
|
||||
exporters:
|
||||
prometheus: # <4>
|
||||
endpoint: 0.0.0.0:8889
|
||||
resource_to_telemetry_conversion:
|
||||
enabled: true # by default resource attributes are dropped
|
||||
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
exporters: [otlp, spanmetrics] # <5>
|
||||
metrics:
|
||||
receivers: [spanmetrics] # <6>
|
||||
exporters: [prometheus]
|
||||
----
|
||||
<1> Creates the `ServiceMonitor` custom resource to enable scraping of the Prometheus exporter.
|
||||
<2> The Spanmetrics connector receives traces and exports metrics.
|
||||
<3> The OTLP receiver to receive spans in the OpenTelemetry protocol.
|
||||
<4> The Prometheus exporter is used to export metrics in the Prometheus format.
|
||||
<5> The Spanmetrics connector is configured as exporter in traces pipeline.
|
||||
<6> The Spanmetrics connector is configured as receiver in metrics pipeline.
|
||||
|
||||
== Tempo configuration
|
||||
|
||||
The `TempoStack` custom resource must specify the following: the *Monitor* tab is enabled, and the Prometheus endpoint is set to the Thanos querier service to query the data from the user-defined monitoring stack.
|
||||
|
||||
.TempoStack custom resource with the enabled Monitor tab
|
||||
[source,yaml]
|
||||
----
|
||||
kind: TempoStack
|
||||
apiVersion: tempo.grafana.com/v1alpha1
|
||||
metadata:
|
||||
name: simplest
|
||||
spec:
|
||||
template:
|
||||
queryFrontend:
|
||||
jaegerQuery:
|
||||
enabled: true
|
||||
monitorTab:
|
||||
enabled: true # <1>
|
||||
prometheusEndpoint: https://thanos-querier.openshift-monitoring.svc.cluster.local:9091 # <2>
|
||||
ingress:
|
||||
type: route
|
||||
----
|
||||
<1> Enables the monitoring tab in the Jaeger console.
|
||||
<2> The service name for Thanos Querier from user-workload monitoring.
|
||||
@@ -45,7 +45,7 @@ metadata:
|
||||
EOF
|
||||
----
|
||||
|
||||
.. Create an operator group by running the following command:
|
||||
.. Create an Operator group by running the following command:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
@@ -86,7 +86,7 @@ EOF
|
||||
$ oc get csv -n openshift-tempo-operator
|
||||
----
|
||||
|
||||
. Create a project of your choice for the *TempoStack* instance that you will create in a subsequent step:
|
||||
. Create a project of your choice for the TempoStack instance that you will create in a subsequent step:
|
||||
|
||||
** To create a project from standard input without metadata:
|
||||
+
|
||||
@@ -107,7 +107,7 @@ metadata:
|
||||
EOF
|
||||
----
|
||||
|
||||
. In the project that you created for the *TempoStack* instance, create a secret for your object storage bucket by running one of the following commands:
|
||||
. In the project that you created for the TempoStack instance, create a secret for your object storage bucket by running one of the following commands:
|
||||
|
||||
** To create a secret from a YAML file:
|
||||
+
|
||||
@@ -133,9 +133,12 @@ include::snippets/distr-tracing-tempo-required-secret-parameters.adoc[]
|
||||
include::snippets/distr-tracing-tempo-secret-example.adoc[]
|
||||
--
|
||||
|
||||
. Create a *TempoStack* instance in the project that you created for the *TempoStack* instance.
|
||||
. Create a TempoStack instance in the project that you created for the TempoStack instance.
|
||||
+
|
||||
NOTE: You can create multiple *TempoStack* instances in separate projects on the same cluster.
|
||||
[NOTE]
|
||||
====
|
||||
You can create multiple TempoStack instances in separate projects on the same cluster.
|
||||
====
|
||||
+
|
||||
.. Customize the `TempoStack` custom resource (CR):
|
||||
+
|
||||
@@ -150,8 +153,8 @@ spec:
|
||||
storageSize: 1Gi
|
||||
storage:
|
||||
secret:
|
||||
name: <secret-name> <1>
|
||||
type: <secret-provider> <2>
|
||||
name: <secret-name> # <1>
|
||||
type: <secret-provider> # <2>
|
||||
template:
|
||||
queryFrontend:
|
||||
jaegerQuery:
|
||||
@@ -233,4 +236,7 @@ $ export TEMPO_URL=$(oc get route -n <control_plane_namespace> tempo -o jsonpath
|
||||
|
||||
.. Log in using your cluster administrator credentials for the web console.
|
||||
+
|
||||
NOTE: The Tempo console initially shows no trace data following the Tempo console installation.
|
||||
[NOTE]
|
||||
====
|
||||
The Tempo console initially shows no trace data following the Tempo console installation.
|
||||
====
|
||||
|
||||
@@ -50,7 +50,10 @@ include::snippets/distr-tracing-tempo-secret-example.adoc[]
|
||||
|
||||
. Create a *TempoStack* instance.
|
||||
+
|
||||
NOTE: You can create multiple *TempoStack* instances in separate projects on the same cluster.
|
||||
[NOTE]
|
||||
====
|
||||
You can create multiple *TempoStack* instances in separate projects on the same cluster.
|
||||
====
|
||||
|
||||
.. Go to *Operators* -> *Installed Operators*.
|
||||
|
||||
@@ -69,8 +72,8 @@ spec:
|
||||
storageSize: 1Gi
|
||||
storage:
|
||||
secret:
|
||||
name: <secret-name> <1>
|
||||
type: <secret-provider> <2>
|
||||
name: <secret-name> # <1>
|
||||
type: <secret-provider> # <2>
|
||||
template:
|
||||
queryFrontend:
|
||||
jaegerQuery:
|
||||
@@ -132,4 +135,7 @@ The stack deployed in this example is configured to receive Jaeger Thrift over H
|
||||
|
||||
.. Select *Log In With OpenShift* to use your cluster administrator credentials for the web console.
|
||||
+
|
||||
NOTE: The Tempo console initially shows no trace data following the Tempo console installation.
|
||||
[NOTE]
|
||||
====
|
||||
The Tempo console initially shows no trace data following the Tempo console installation.
|
||||
====
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
//Module included in the following assemblies:
|
||||
//
|
||||
//* distr_tracing_install/dist-tracing-tempo-updating.adoc
|
||||
|
||||
:_mod-docs-content-type: CONCEPT
|
||||
[id="distr-tracing-tempo-update-olm_{context}"]
|
||||
= Automatic updates of the {TempoShortName}
|
||||
|
||||
For version upgrades, the {TempoOperator} uses the Operator Lifecycle Manager (OLM), which controls installation, upgrade, and role-based access control (RBAC) of Operators in a cluster.
|
||||
|
||||
The OLM runs in {product-title} by default. The OLM queries for available Operators as well as upgrades for installed Operators.
|
||||
|
||||
When the {TempoOperator} is upgraded to the new version, it scans for running TempoStack instances that it manages and upgrades them to the version corresponding to the Operator's new version.
|
||||
@@ -10,6 +10,7 @@
|
||||
// * distr_tracing/distr_tracing_rn/distr-tracing-rn-2-7.adoc
|
||||
// * distr_tracing/distr_tracing_rn/distr-tracing-rn-2-8.adoc
|
||||
// * distr_tracing/distr_tracing_rn/distr-tracing-rn-2-9.adoc
|
||||
// * distr_tracing/distr_tracing_rn/distr-tracing-rn-3-0.adoc
|
||||
|
||||
:_mod-docs-content-type: CONCEPT
|
||||
|
||||
|
||||
1255
modules/otel-config-collector.adoc
Normal file
1255
modules/otel-config-collector.adoc
Normal file
File diff suppressed because it is too large
Load Diff
336
modules/otel-config-instrumentation.adoc
Normal file
336
modules/otel-config-instrumentation.adoc
Normal file
@@ -0,0 +1,336 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * otel/otel-instrumentation.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="otel-instrumentation-config_{context}"]
|
||||
= OpenTelemetry instrumentation configuration options
|
||||
|
||||
The {OTELName} can inject and configure the OpenTelemetry auto-instrumentation libraries into your workloads. Currently, the project supports injection of the instrumentation libraries from Go, Java, Node.js, Python, .NET, and the Apache HTTP Server (`httpd`).
|
||||
|
||||
Auto-instrumentation in OpenTelemetry refers to the capability where the framework automatically instruments an application without manual code changes. This enables developers and administrators to get observability into their applications with minimal effort and changes to the existing codebase.
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
The {OTELName} Operator only supports the injection mechanism of the instrumentation libraries but does not support instrumentation libraries or upstream images. Customers can build their own instrumentation images or use community images.
|
||||
====
|
||||
|
||||
== Instrumentation options
|
||||
|
||||
Instrumentation options are specified in the `OpenTelemetryCollector` custom resource.
|
||||
|
||||
.Sample `OpenTelemetryCollector` custom resource file
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: opentelemetry.io/v1alpha1
|
||||
kind: Instrumentation
|
||||
metadata:
|
||||
name: java-instrumentation
|
||||
spec:
|
||||
env:
|
||||
- name: OTEL_EXPORTER_OTLP_TIMEOUT
|
||||
value: "20"
|
||||
exporter:
|
||||
endpoint: http://production-collector.observability.svc.cluster.local:4317
|
||||
propagators:
|
||||
- w3c
|
||||
sampler:
|
||||
type: parentbased_traceidratio
|
||||
argument: "0.25"
|
||||
java:
|
||||
env:
|
||||
- name: OTEL_JAVAAGENT_DEBUG
|
||||
value: "true"
|
||||
----
|
||||
|
||||
//[cols=",,",options="header",]
|
||||
|
||||
.Parameters used by the Operator to define the Instrumentation
|
||||
[options="header"]
|
||||
[cols="l, a, a"]
|
||||
|===
|
||||
|Parameter |Description |Values
|
||||
|
||||
|env
|
||||
|Common environment variables to define across all the instrumentations.
|
||||
|
|
||||
|
||||
|exporter
|
||||
|Exporter configuration.
|
||||
|
|
||||
|
||||
|propagators
|
||||
|Propagators defines inter-process context propagation configuration.
|
||||
|`tracecontext`, `baggage`, `b3`, `b3multi`, `jaeger`, `ottrace`, `none`
|
||||
|
||||
|resource
|
||||
|Resource attributes configuration.
|
||||
|
|
||||
|
||||
|sampler
|
||||
|Sampling configuration.
|
||||
|
|
||||
|
||||
|apacheHttpd
|
||||
|Configuration for the Apache HTTP Server instrumentation.
|
||||
|
|
||||
|
||||
|dotnet
|
||||
|Configuration for the .NET instrumentation.
|
||||
|
|
||||
|
||||
|go
|
||||
|Configuration for the Go instrumentation.
|
||||
|
|
||||
|
||||
|java
|
||||
|Configuration for the Java instrumentation.
|
||||
|
|
||||
|
||||
|nodejs
|
||||
|Configuration for the Node.js instrumentation.
|
||||
|
|
||||
|
||||
|python
|
||||
|Configuration for the Python instrumentation.
|
||||
|
|
||||
|
||||
|===
|
||||
|
||||
== Using the instrumentation CR with Service Mesh
|
||||
|
||||
When using the instrumentation custom resource (CR) with {SMProductName}, you must use the `b3multi` propagator.
|
||||
|
||||
=== Configuration of the Apache HTTP Server auto-instrumentation
|
||||
|
||||
.Prameters for the `+.spec.apacheHttpd+` field
|
||||
[options="header"]
|
||||
[cols="l, a, a"]
|
||||
|===
|
||||
|Name |Description |Default
|
||||
|
||||
|attrs
|
||||
|Attributes specific to the Apache HTTP Server.
|
||||
|
|
||||
|
||||
|configPath
|
||||
|Location of the Apache HTTP Server configuration.
|
||||
|/usr/local/apache2/conf
|
||||
|
||||
|env
|
||||
|Environment variables specific to the Apache HTTP Server.
|
||||
|
|
||||
|
||||
|image
|
||||
|Container image with the Apache SDK and auto-instrumentation.
|
||||
|
|
||||
|
||||
|resourceRequirements
|
||||
|The compute resource requirements.
|
||||
|
|
||||
|
||||
|version
|
||||
|Apache HTTP Server version.
|
||||
|2.4
|
||||
|
||||
|===
|
||||
|
||||
.The `PodSpec` annotation to enable injection
|
||||
[source,yaml]
|
||||
----
|
||||
instrumentation.opentelemetry.io/inject-apache-httpd: "true"
|
||||
----
|
||||
|
||||
=== Configuration of the .NET auto-instrumentation
|
||||
|
||||
[options="header"]
|
||||
[cols="l, a"]
|
||||
|===
|
||||
|Name |Description
|
||||
|
||||
|env
|
||||
|Environment variables specific to .NET.
|
||||
|
||||
|image
|
||||
|Container image with the .NET SDK and auto-instrumentation.
|
||||
|
||||
|resourceRequirements
|
||||
|The compute resource requirements.
|
||||
|
||||
|===
|
||||
|
||||
For the .NET auto-instrumentation, the required `OTEL_EXPORTER_OTLP_ENDPOINT` environment variable must be set if the endpoint of the exporters is set to `4317`. The .NET autoinstrumentation uses `http/proto` by default, and the telemetry data must be set to the `4318` port.
|
||||
|
||||
.The `PodSpec` annotation to enable injection
|
||||
[source,yaml]
|
||||
----
|
||||
instrumentation.opentelemetry.io/inject-dotnet: "true"
|
||||
----
|
||||
|
||||
=== Configuration of the Go auto-instrumentation
|
||||
|
||||
[options="header"]
|
||||
[cols="l, a"]
|
||||
|===
|
||||
|Name |Description
|
||||
|
||||
|env
|
||||
|Environment variables specific to Go.
|
||||
|
||||
|image
|
||||
|Container image with the Go SDK and auto-instrumentation.
|
||||
|
||||
|resourceRequirements
|
||||
|The compute resource requirements.
|
||||
|
||||
|===
|
||||
|
||||
.The `PodSpec` annotation to enable injection
|
||||
[source,yaml]
|
||||
----
|
||||
instrumentation.opentelemetry.io/inject-go: "true"
|
||||
----
|
||||
|
||||
.Additional permissions required for the Go auto-instrumentation in the OpenShift cluster
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: security.openshift.io/v1
|
||||
kind: SecurityContextConstraints
|
||||
metadata:
|
||||
name: otel-go-instrumentation-scc
|
||||
allowHostDirVolumePlugin: true
|
||||
allowPrivilegeEscalation: true
|
||||
allowPrivilegedContainer: true
|
||||
allowedCapabilities:
|
||||
- "SYS_PTRACE"
|
||||
fsGroup:
|
||||
type: RunAsAny
|
||||
runAsUser:
|
||||
type: RunAsAny
|
||||
seLinuxContext:
|
||||
type: RunAsAny
|
||||
seccompProfiles:
|
||||
- '*'
|
||||
supplementalGroups:
|
||||
type: RunAsAny
|
||||
----
|
||||
|
||||
[TIP]
|
||||
====
|
||||
The CLI command for applying the permissions for the Go auto-instrumentation in the OpenShift cluster is as follows:
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc adm policy add-scc-to-user otel-go-instrumentation-scc -z <service_account>
|
||||
----
|
||||
====
|
||||
|
||||
=== Configuration of the Java auto-instrumentation
|
||||
|
||||
[options="header"]
|
||||
[cols="l, a"]
|
||||
|===
|
||||
|Name |Description
|
||||
|
||||
|env
|
||||
|Environment variables specific to Java.
|
||||
|
||||
|image
|
||||
|Container image with the Java SDK and auto-instrumentation.
|
||||
|
||||
|resourceRequirements
|
||||
|The compute resource requirements.
|
||||
|
||||
|===
|
||||
|
||||
.The `PodSpec` annotation to enable injection
|
||||
[source,yaml]
|
||||
----
|
||||
instrumentation.opentelemetry.io/inject-java: "true"
|
||||
----
|
||||
|
||||
=== Configuration of the Node.js auto-instrumentation
|
||||
|
||||
[options="header"]
|
||||
[cols="l, a"]
|
||||
|===
|
||||
|Name |Description
|
||||
|
||||
|env
|
||||
|Environment variables specific to Node.js.
|
||||
|
||||
|image
|
||||
|Container image with the Node.js SDK and auto-instrumentation.
|
||||
|
||||
|resourceRequirements
|
||||
|The compute resource requirements.
|
||||
|
||||
|===
|
||||
|
||||
.The `PodSpec` annotations to enable injection
|
||||
[source,yaml]
|
||||
----
|
||||
instrumentation.opentelemetry.io/inject-nodejs: "true"
|
||||
instrumentation.opentelemetry.io/otel-go-auto-target-exe: "/path/to/container/executable"
|
||||
----
|
||||
|
||||
The `+instrumentation.opentelemetry.io/otel-go-auto-target-exe+` annotation sets the value for the required `OTEL_GO_AUTO_TARGET_EXE` environment variable.
|
||||
|
||||
=== Configuration of the Python auto-instrumentation
|
||||
|
||||
[options="header"]
|
||||
[cols="l, a"]
|
||||
|===
|
||||
|Name |Description
|
||||
|
||||
|env
|
||||
|Environment variables specific to Python.
|
||||
|
||||
|image
|
||||
|Container image with the Python SDK and auto-instrumentation.
|
||||
|
||||
|resourceRequirements
|
||||
|The compute resource requirements.
|
||||
|
||||
|===
|
||||
|
||||
For Python auto-instrumentation, the `OTEL_EXPORTER_OTLP_ENDPOINT` environment variable must be set if the endpoint of the exporters is set to `4317`. Python auto-instrumentation uses `http/proto` by default, and the telemetry data must be set to the `4318` port.
|
||||
|
||||
.The `PodSpec` annotation to enable injection
|
||||
[source,yaml]
|
||||
----
|
||||
instrumentation.opentelemetry.io/inject-python: "true"
|
||||
----
|
||||
|
||||
=== Configuration of the OpenTelemetry SDK variables
|
||||
|
||||
The OpenTelemetry SDK variables in your pod are configurable by using the following annotation:
|
||||
|
||||
[source,yaml]
|
||||
----
|
||||
instrumentation.opentelemetry.io/inject-sdk: "true"
|
||||
----
|
||||
|
||||
Note that all the annotations accept the following values:
|
||||
|
||||
`true`:: Injects the `+Instrumentation+` resource from the namespace.
|
||||
|
||||
`false`:: Does not inject any instrumentation.
|
||||
|
||||
`instrumentation-name`:: The name of the instrumentation resource to inject from the current namespace.
|
||||
|
||||
`other-namespace/instrumentation-name`:: The name of the instrumentation resource to inject from another namespace.
|
||||
|
||||
=== Multi-container pods
|
||||
|
||||
The instrumentation is run on the first container that is available by default according to the pod specification. In some cases, you can also specify target containers for injection.
|
||||
|
||||
.Pod annotation
|
||||
[source,yaml]
|
||||
----
|
||||
instrumentation.opentelemetry.io/container-names: "<container_1>,<container_2>"
|
||||
----
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
The Go auto-instrumentation does not support multi-container auto-instrumentation injection.
|
||||
====
|
||||
160
modules/otel-config-multicluster.adoc
Normal file
160
modules/otel-config-multicluster.adoc
Normal file
@@ -0,0 +1,160 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * otel/otel-configuring.adoc
|
||||
|
||||
:_mod-docs-content-type: PROCEDURE
|
||||
[id="gathering-observability-data-from-different-clusters_{context}"]
|
||||
= Gathering the observability data from different clusters with the OpenTelemetry Collector
|
||||
|
||||
For a multicluster configuration, you can create one OpenTelemetry
|
||||
Collector instance in each one of the remote clusters and forward all the telemetry
|
||||
data to one OpenTelemetry Collector instance.
|
||||
|
||||
.Prerequisites
|
||||
|
||||
* The {OTELOperator} is installed.
|
||||
* The {TempoOperator} is installed.
|
||||
* A TempoStack is deployed on the cluster.
|
||||
|
||||
.Procedure
|
||||
|
||||
. Create a service account for the OpenTelemetry Collector.
|
||||
+
|
||||
.Example ServiceAccount
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: otel-collector-deployment
|
||||
----
|
||||
|
||||
. Create a cluster role for the service account.
|
||||
+
|
||||
.Example ClusterRole
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: otel-collector
|
||||
rules:
|
||||
# <1>
|
||||
# <2>
|
||||
- apiGroups: ["", "config.openshift.io"]
|
||||
resources: ["pods", "namespaces", "infrastructures", "infrastructures/status"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
----
|
||||
<1> The `k8sattributesprocessor` requires permissions for pods and namespace resources.
|
||||
<2> The `resourcedetectionprocessor` requires permissions for infrastructures and status.
|
||||
|
||||
. Bind the cluster role to the service account.
|
||||
+
|
||||
.Example ClusterRoleBinding
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: otel-collector
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: otel-collector-deployment
|
||||
namespace: otel-collector-<example>
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: otel-collector
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
----
|
||||
|
||||
. Create the YAML file to define the `OpenTelemetryCollector` custom resource (CR) in the edge clusters.
|
||||
+
|
||||
.Example `OpenTelemetryCollector` custom resource for the edge clusters
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: opentelemetry.io/v1alpha1
|
||||
kind: OpenTelemetryCollector
|
||||
metadata:
|
||||
name: otel
|
||||
namespace: otel-collector-<example>
|
||||
spec:
|
||||
mode: daemonset
|
||||
serviceAccount: otel-collector-deployment
|
||||
config: |
|
||||
receivers:
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
thrift_binary:
|
||||
thrift_compact:
|
||||
thrift_http:
|
||||
opencensus:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
http:
|
||||
zipkin:
|
||||
processors:
|
||||
batch:
|
||||
k8sattributes:
|
||||
memory_limiter:
|
||||
check_interval: 1s
|
||||
limit_percentage: 50
|
||||
spike_limit_percentage: 30
|
||||
resourcedetection:
|
||||
detectors: [openshift]
|
||||
exporters:
|
||||
otlphttp:
|
||||
endpoint: https://observability-cluster.com:443 # <1>
|
||||
insecure: false
|
||||
compression: on
|
||||
tls:
|
||||
cert_file: "/path/to/server-cert.pem"
|
||||
key_file: "/path/to/server-key.pem"
|
||||
client_ca_file: "/path/to/client-ca.pem"
|
||||
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [jaeger, opencensus, otlp, zipkin]
|
||||
processors: [memory_limiter, k8sattributes, resourcedetection, batch]
|
||||
exporters: [otlp]
|
||||
----
|
||||
<1> The Collector exporter is configured to export OTLP HTTP and points to the OpenTelemetry Collector from the central cluster.
|
||||
|
||||
. Create the YAML file to define the `OpenTelemetryCollector` custom resource (CR) in the central cluster.
|
||||
+
|
||||
.Example `OpenTelemetryCollector` custom resource for the central cluster
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: opentelemetry.io/v1alpha1
|
||||
kind: OpenTelemetryCollector
|
||||
metadata:
|
||||
name: otlp-receiver
|
||||
namespace: observability
|
||||
spec:
|
||||
mode: "deployment"
|
||||
ingress:
|
||||
type: route
|
||||
hostname: "observability-cluster.com"
|
||||
route:
|
||||
termination: "edge"
|
||||
config: |
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
http:
|
||||
exporters:
|
||||
logging:
|
||||
otlp:
|
||||
endpoint: "tempo-<simplest>-distributor:4317" # <1>
|
||||
tls:
|
||||
insecure: true
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: []
|
||||
exporters: [otlp]
|
||||
----
|
||||
<1> The Collector exporter is configured to export OTLP and points to the Tempo distributor endpoint, which in this example is `"tempo-simplest-distributor:4317"` and already created.
|
||||
67
modules/otel-config-send-metrics-monitoring-stack.adoc
Normal file
67
modules/otel-config-send-metrics-monitoring-stack.adoc
Normal file
@@ -0,0 +1,67 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * otel/deploying-otel.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="configuration-for-sending-metrics-to-the-monitoring-stack_{context}"]
|
||||
= Configuration for sending metrics to the monitoring stack
|
||||
|
||||
The OpenTelemetry Collector custom resource (CR) can be configured to create a Prometheus `ServiceMonitor` CR for scraping the Collector's pipeline metrics and the enabled Prometheus exporters.
|
||||
|
||||
.Example of the OpenTelemetry Collector custom resource with the Prometheus exporter
|
||||
[source,yaml]
|
||||
----
|
||||
spec:
|
||||
mode: deployment
|
||||
observability:
|
||||
metrics:
|
||||
enableMetrics: true # <1>
|
||||
config: |
|
||||
exporters:
|
||||
prometheus:
|
||||
endpoint: 0.0.0.0:8889
|
||||
resource_to_telemetry_conversion:
|
||||
enabled: true # by default resource attributes are dropped
|
||||
service:
|
||||
telemetry:
|
||||
metrics:
|
||||
address: ":8888"
|
||||
pipelines:
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
exporters: [prometheus]
|
||||
----
|
||||
<1> Configures the operator to create the Prometheus `ServiceMonitor` CR to scrape the collector's internal metrics endpoint and Prometheus exporter metric endpoints. The metrics will be stored in the OpenShift monitoring stack.
|
||||
|
||||
Alternatively, a manually created Prometheus `PodMonitor` can provide fine control, for example removing duplicated labels added during Prometheus scraping.
|
||||
|
||||
.Example of the `PodMonitor` custom resource that configures the monitoring stack to scrape the Collector metrics
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PodMonitor
|
||||
metadata:
|
||||
name: otel-collector
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: `<cr_name>-collector` # <1>
|
||||
podMetricsEndpoints:
|
||||
- port: metrics # <2>
|
||||
- port: promexporter # <3>
|
||||
relabelings:
|
||||
- action: labeldrop
|
||||
regex: pod
|
||||
- action: labeldrop
|
||||
regex: container
|
||||
- action: labeldrop
|
||||
regex: endpoint
|
||||
metricRelabelings:
|
||||
- action: labeldrop
|
||||
regex: instance
|
||||
- action: labeldrop
|
||||
regex: job
|
||||
----
|
||||
<1> The name of the OpenTelemetry Collector custom resource.
|
||||
<2> The name of the internal metrics port for the OpenTelemetry Collector. This port name is always `metrics`.
|
||||
<3> The name of the Prometheus exporter port for the OpenTelemetry Collector.
|
||||
35
modules/otel-configuring-otelcol-metrics.adoc
Normal file
35
modules/otel-configuring-otelcol-metrics.adoc
Normal file
@@ -0,0 +1,35 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * otel/otel-configuring.adoc
|
||||
|
||||
:_mod-docs-content-type: PROCEDURE
|
||||
[id="configuring-otelcol-metrics_{context}"]
|
||||
= Configuring the OpenTelemetry Collector metrics
|
||||
|
||||
You can enable metrics and alerts of OpenTelemetry Collector instances.
|
||||
|
||||
.Prerequisites
|
||||
|
||||
* Monitoring for user-defined projects is enabled in the cluster.
|
||||
|
||||
.Procedure
|
||||
|
||||
* To enable metrics of a OpenTelemetry Collector instance, set the `spec.observability.metrics.enableMetrics` field to `true`:
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: opentelemetry.io/v1alpha1
|
||||
kind: OpenTelemetryCollector
|
||||
metadata:
|
||||
name: <name>
|
||||
spec:
|
||||
observability:
|
||||
metrics:
|
||||
enableMetrics: true
|
||||
----
|
||||
|
||||
.Verification
|
||||
|
||||
You can use the *Administrator* view of the web console to verify successful configuration:
|
||||
|
||||
* Go to *Observe* -> *Targets*, filter by *Source: User*, and check that the *ServiceMonitors* in the `opentelemetry-collector-<instance_name>` format have the *Up* status.
|
||||
@@ -1,9 +1,9 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * /distr_tracing/distr_tracing_otel/distr-tracing-otel-using.adoc
|
||||
// * otel/otel-using.adoc
|
||||
|
||||
:_mod-docs-content-type: PROCEDURE
|
||||
[id="distr-tracing-otel-forwarding_{context}"]
|
||||
[id="forwarding-traces_{context}"]
|
||||
= Forwarding traces to a TempoStack by using the OpenTelemetry Collector
|
||||
|
||||
To configure forwarding traces to a TempoStack, you can deploy and configure the OpenTelemetry Collector. You can deploy the OpenTelemetry Collector in the deployment mode by using the specified processors, receivers, and exporters. For other modes, see the OpenTelemetry Collector documentation linked in _Additional resources_.
|
||||
@@ -37,8 +37,8 @@ kind: ClusterRole
|
||||
metadata:
|
||||
name: otel-collector
|
||||
rules:
|
||||
<1>
|
||||
<2>
|
||||
# <1>
|
||||
# <2>
|
||||
- apiGroups: ["", "config.openshift.io"]
|
||||
resources: ["pods", "namespaces", "infrastructures", "infrastructures/status"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
@@ -102,13 +102,13 @@ spec:
|
||||
detectors: [openshift]
|
||||
exporters:
|
||||
otlp:
|
||||
endpoint: "tempo-simplest-distributor:4317" <1>
|
||||
endpoint: "tempo-simplest-distributor:4317" # <1>
|
||||
tls:
|
||||
insecure: true
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [jaeger, opencensus, otlp, zipkin] <2>
|
||||
receivers: [jaeger, opencensus, otlp, zipkin] # <2>
|
||||
processors: [memory_limiter, k8sattributes, resourcedetection, batch]
|
||||
exporters: [otlp]
|
||||
----
|
||||
179
modules/otel-install-cli.adoc
Normal file
179
modules/otel-install-cli.adoc
Normal file
@@ -0,0 +1,179 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
//* otel/otel-installing.adoc
|
||||
|
||||
:_content-type: PROCEDURE
|
||||
[id="installing-otel-by-using-the-cli_{context}"]
|
||||
= Installing the {OTELShortName} by using the CLI
|
||||
|
||||
You can install the {OTELShortName} from the command line.
|
||||
|
||||
.Prerequisites
|
||||
|
||||
* An active {oc-first} session by a cluster administrator with the `cluster-admin` role.
|
||||
+
|
||||
[TIP]
|
||||
====
|
||||
* Ensure that your {oc-first} version is up to date and matches your {product-title} version.
|
||||
|
||||
* Run `oc login`:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc login --username=<your_username>
|
||||
----
|
||||
====
|
||||
|
||||
.Procedure
|
||||
|
||||
. Install the {OTELOperator}:
|
||||
|
||||
.. Create a project for the {OTELOperator} by running the following command:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc apply -f - << EOF
|
||||
apiVersion: project.openshift.io/v1
|
||||
kind: Project
|
||||
metadata:
|
||||
labels:
|
||||
kubernetes.io/metadata.name: openshift-opentelemetry-operator
|
||||
openshift.io/cluster-monitoring: "true"
|
||||
name: openshift-opentelemetry-operator
|
||||
EOF
|
||||
----
|
||||
|
||||
.. Create an Operator group by running the following command:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc apply -f - << EOF
|
||||
apiVersion: operators.coreos.com/v1
|
||||
kind: OperatorGroup
|
||||
metadata:
|
||||
name: openshift-opentelemetry-operator
|
||||
namespace: openshift-opentelemetry-operator
|
||||
spec:
|
||||
upgradeStrategy: Default
|
||||
EOF
|
||||
----
|
||||
|
||||
.. Create a subscription by running the following command:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc apply -f - << EOF
|
||||
apiVersion: operators.coreos.com/v1alpha1
|
||||
kind: Subscription
|
||||
metadata:
|
||||
name: opentelemetry-product
|
||||
namespace: openshift-opentelemetry-operator
|
||||
spec:
|
||||
channel: stable
|
||||
installPlanApproval: Automatic
|
||||
name: opentelemetry-product
|
||||
source: redhat-operators
|
||||
sourceNamespace: openshift-marketplace
|
||||
EOF
|
||||
----
|
||||
|
||||
.. Check the Operator status by running the following command:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc get csv -n openshift-opentelemetry-operator
|
||||
----
|
||||
|
||||
. Create a project of your choice for the OpenTelemetry Collector instance that you will create in a subsequent step:
|
||||
|
||||
** To create a project without metadata, run the following command:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc new-project <project_of_opentelemetry_collector_instance>
|
||||
----
|
||||
|
||||
** To create a project with metadata, run the following command:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc apply -f - << EOF
|
||||
apiVersion: project.openshift.io/v1
|
||||
kind: Project
|
||||
metadata:
|
||||
name: <project_of_opentelemetry_collector_instance>
|
||||
EOF
|
||||
----
|
||||
|
||||
. Create an OpenTelemetry Collector instance in the project that you created for it.
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
You can create multiple OpenTelemetry Collector instances in separate projects on the same cluster.
|
||||
====
|
||||
+
|
||||
.. Customize the `OpenTelemetry Collector` custom resource (CR) with the OTLP, Jaeger, and Zipkin receivers and the debug exporter:
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: opentelemetry.io/v1alpha1
|
||||
kind: OpenTelemetryCollector
|
||||
metadata:
|
||||
name: otel
|
||||
namespace: <project_of_opentelemetry_collector_instance>
|
||||
spec:
|
||||
mode: deployment
|
||||
config: |
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
http:
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
thrift_binary:
|
||||
thrift_compact:
|
||||
thrift_http:
|
||||
zipkin:
|
||||
processors:
|
||||
batch:
|
||||
memory_limiter:
|
||||
check_interval: 1s
|
||||
limit_percentage: 50
|
||||
spike_limit_percentage: 30
|
||||
exporters:
|
||||
debug:
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp,jaeger,zipkin]
|
||||
processors: [memory_limiter,batch]
|
||||
exporters: [debug]
|
||||
----
|
||||
|
||||
.. Apply the customized CR by running the following command:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc apply -f - << EOF
|
||||
<OpenTelemetryCollector_custom_resource>
|
||||
EOF
|
||||
----
|
||||
|
||||
|
||||
.Verification
|
||||
|
||||
. Verify that the `status.phase` of the OpenTelemetry Collector pod is `Running` and the `conditions` are `type: Ready` by running the following command:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc get pod -l app.kubernetes.io/managed-by=opentelemetry-operator,app.kubernetes.io/instance=<namespace>.<instance_name> -o yaml
|
||||
----
|
||||
|
||||
. Get the OpenTelemetry Collector service by running the following command:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc get service -l app.kubernetes.io/managed-by=opentelemetry-operator,app.kubernetes.io/instance=<namespace>.<instance_name>
|
||||
----
|
||||
@@ -1,10 +1,9 @@
|
||||
////
|
||||
This module included in the following assemblies:
|
||||
- distr_tracing_otel/distr-tracing-otel-installing.adoc
|
||||
////
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * otel/otel-installing.adoc
|
||||
|
||||
:_mod-docs-content-type: PROCEDURE
|
||||
[id="distr-tracing-install-otel-operator_{context}"]
|
||||
[id="installing-otel-by-using-the-web-console_{context}"]
|
||||
= Installing the {OTELShortName} from the web console
|
||||
|
||||
You can install the {OTELShortName} from the *Administrator* view of the web console.
|
||||
@@ -15,20 +14,6 @@ You can install the {OTELShortName} from the *Administrator* view of the web con
|
||||
|
||||
* For {product-dedicated}, you must be logged in using an account with the `dedicated-admin` role.
|
||||
|
||||
* An active {oc-first} session by a cluster administrator with the `cluster-admin` role.
|
||||
+
|
||||
[TIP]
|
||||
====
|
||||
* Ensure that your {oc-first} version is up to date and matches your {product-title} version.
|
||||
|
||||
* Run `oc login`:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc login --username=<your_username>
|
||||
----
|
||||
====
|
||||
|
||||
.Procedure
|
||||
|
||||
. Install the {OTELOperator}:
|
||||
@@ -55,9 +40,9 @@ This installs the Operator with the default presets:
|
||||
|
||||
.. Go to *Operators* -> *Installed Operators*.
|
||||
|
||||
.. Select *OpenTelemetry Collector* -> *Create OpenTelemetryCollector* -> *YAML view*.
|
||||
.. Select *OpenTelemetry Collector* -> *Create OpenTelemetry Collector* -> *YAML view*.
|
||||
|
||||
.. In the *YAML view*, customize the `OpenTelemetryCollector` custom resource (CR) with the OTLP, Jaeger, Zipkin receiver, and logging exporter.
|
||||
.. In the *YAML view*, customize the `OpenTelemetryCollector` custom resource (CR) with the OTLP, Jaeger, Zipkin receivers and the debug exporter.
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
@@ -88,29 +73,21 @@ spec:
|
||||
limit_percentage: 50
|
||||
spike_limit_percentage: 30
|
||||
exporters:
|
||||
logging:
|
||||
debug:
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp,jaeger,zipkin]
|
||||
processors: [memory_limiter,batch]
|
||||
exporters: [logging]
|
||||
exporters: [debug]
|
||||
----
|
||||
|
||||
.. Select *Create*.
|
||||
|
||||
.Verification
|
||||
|
||||
. Verify that the `status.phase` of the OpenTelemetry Collector pod is `Running` and the `conditions` are `type: Ready` by running the following command:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc get pod -l app.kubernetes.io/managed-by=opentelemetry-operator,app.kubernetes.io/instance=<namespace>.<instance_name> -o yaml
|
||||
----
|
||||
. Use the *Project:* dropdown list to select the project of the *OpenTelemetry Collector* instance.
|
||||
|
||||
. Get the OpenTelemetry Collector service by running the following command:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc get service -l app.kubernetes.io/managed-by=opentelemetry-operator,app.kubernetes.io/instance=<namespace>.<instance_name>
|
||||
----
|
||||
. Go to *Operators* -> *Installed Operators* to verify that the *Status* of the *OpenTelemetry Collector* instance is *Condition: Ready*.
|
||||
|
||||
. Go to *Workloads* -> *Pods* to verify that all the component pods of the *OpenTelemetry Collector* instance are running.
|
||||
@@ -1,9 +1,9 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * distr-tracing-otel-migrating.adoc
|
||||
// * otel-migrating.adoc
|
||||
|
||||
:_mod-docs-content-type: PROCEDURE
|
||||
[id="distr-tracing-otel-migrating-from-jaeger-with-sidecars_{context}"]
|
||||
[id="migrating-to-otel-from-jaeger-with-sidecars_{context}"]
|
||||
= Migrating from the {JaegerShortName} to the {OTELShortName} with sidecars
|
||||
|
||||
The {OTELShortName} Operator supports sidecar injection into deployment workloads, so you can migrate from a {JaegerShortName} sidecar to a {OTELShortName} sidecar.
|
||||
@@ -45,7 +45,7 @@ spec:
|
||||
timeout: 2s
|
||||
exporters:
|
||||
otlp:
|
||||
endpoint: "tempo-<example>-gateway:8090" <1>
|
||||
endpoint: "tempo-<example>-gateway:8090" # <1>
|
||||
tls:
|
||||
insecure: true
|
||||
service:
|
||||
@@ -76,7 +76,7 @@ kind: ClusterRole
|
||||
metadata:
|
||||
name: otel-collector-sidecar
|
||||
rules:
|
||||
<1>
|
||||
# <1>
|
||||
- apiGroups: ["config.openshift.io"]
|
||||
resources: ["infrastructures", "infrastructures/status"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
@@ -1,9 +1,9 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * distr-tracing-otel-migrating.adoc
|
||||
// * otel/otel-migrating.adoc
|
||||
|
||||
:_mod-docs-content-type: PROCEDURE
|
||||
[id="distr-tracing-otel-migrating-from-jaeger-without-sidecars_{context}"]
|
||||
[id="migrating-to-otel-from-jaeger-without-sidecars_{context}"]
|
||||
= Migrating from the {JaegerShortName} to the {OTELShortName} without sidecars
|
||||
|
||||
You can migrate from the {JaegerShortName} to the {OTELShortName} without sidecar deployment.
|
||||
@@ -47,8 +47,8 @@ kind: ClusterRole
|
||||
metadata:
|
||||
name: otel-collector
|
||||
rules:
|
||||
<1>
|
||||
<2>
|
||||
# <1>
|
||||
# <2>
|
||||
- apiGroups: ["", "config.openshift.io"]
|
||||
resources: ["pods", "namespaces", "infrastructures", "infrastructures/status"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
@@ -76,7 +76,10 @@ roleRef:
|
||||
|
||||
. Create the OpenTelemetry Collector instance.
|
||||
+
|
||||
NOTE: This collector will export traces to a TempoStack instance. You must create your TempoStack instance by using the Red Hat Tempo Operator and place here the correct endpoint.
|
||||
[NOTE]
|
||||
====
|
||||
This collector will export traces to a TempoStack instance. You must create your TempoStack instance by using the Red Hat Tempo Operator and place here the correct endpoint.
|
||||
====
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
@@ -125,6 +128,6 @@ spec:
|
||||
.Example of exporting traces by using the `jaegerexporter` with Golang
|
||||
[source,golang]
|
||||
----
|
||||
exp, err := jaeger.New(jaeger.WithCollectorEndpoint(jaeger.WithEndpoint(url))) <1>
|
||||
exp, err := jaeger.New(jaeger.WithCollectorEndpoint(jaeger.WithEndpoint(url))) # <1>
|
||||
----
|
||||
<1> The URL points to the OpenTelemetry Collector API endpoint.
|
||||
@@ -1,12 +1,12 @@
|
||||
//Module included in the following assemblies:
|
||||
//
|
||||
//* distr_tracing_install/dist-tracing-otel-removing.adoc
|
||||
// * otel/otel-removing.adoc
|
||||
|
||||
:_mod-docs-content-type: PROCEDURE
|
||||
[id="distr-tracing-removing-otel-instance-cli_{context}"]
|
||||
= Removing a {OTELShortName} instance by using the CLI
|
||||
[id="removing-otel-instance-cli_{context}"]
|
||||
= Removing an OpenTelemetry Collector instance by using the CLI
|
||||
|
||||
You can remove a {OTELShortName} instance on the command line.
|
||||
You can remove an OpenTelemetry Collector instance on the command line.
|
||||
|
||||
.Prerequisites
|
||||
|
||||
@@ -26,14 +26,14 @@ $ oc login --username=<your_username>
|
||||
|
||||
.Procedure
|
||||
|
||||
. Get the name of the {OTELShortName} instance by running the following command:
|
||||
. Get the name of the OpenTelemetry Collector instance by running the following command:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc get deployments -n <project_of_opentelemetry_instance>
|
||||
----
|
||||
|
||||
. Remove the {OTELShortName} instance by running the following command:
|
||||
. Remove the OpenTelemetry Collector instance by running the following command:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
@@ -44,7 +44,7 @@ $ oc delete opentelemetrycollectors <opentelemetry_instance_name> -n <project_of
|
||||
|
||||
.Verification
|
||||
|
||||
* To verify successful removal of the {OTELShortName} instance, run `oc get deployments` again:
|
||||
* To verify successful removal of the OpenTelemetry Collector instance, run `oc get deployments` again:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
@@ -1,12 +1,12 @@
|
||||
//Module included in the following assemblies:
|
||||
//
|
||||
//* distr_tracing_install/dist-tracing-otel-removing.adoc
|
||||
// * otel/otel-removing.adoc
|
||||
|
||||
:_mod-docs-content-type: PROCEDURE
|
||||
[id="distr-tracing-removing-otel-instance_{context}"]
|
||||
= Removing a {OTELShortName} instance by using the web console
|
||||
[id="removing-otel-instance_{context}"]
|
||||
= Removing an OpenTelemetry Collector instance by using the web console
|
||||
|
||||
You can remove a {OTELShortName} instance in the *Administrator* view of the web console.
|
||||
You can remove an OpenTelemetry Collector instance in the *Administrator* view of the web console.
|
||||
|
||||
.Prerequisites
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * /distr_tracing/distr_tracing_otel/distr-tracing-otel-using.adoc
|
||||
// * otel/otel-using.adoc
|
||||
|
||||
:_mod-docs-content-type: PROCEDURE
|
||||
[id="distr-tracing-otel-send-traces-and-metrics-to-otel-collector-with-sidecar_{context}"]
|
||||
[id="sending-traces-and-metrics-to-otel-collector-with-sidecar_{context}"]
|
||||
= Sending traces and metrics to the OpenTelemetry Collector with sidecar injection
|
||||
|
||||
You can set up sending telemetry data to an OpenTelemetryCollector instance with sidecar injection.
|
||||
You can set up sending telemetry data to an OpenTelemetry Collector instance with sidecar injection.
|
||||
|
||||
The {OTELOperator} allows sidecar injection into deployment workloads and automatic configuration of your instrumentation to send telemetry data to the OpenTelemetry Collector.
|
||||
|
||||
.Prerequisites
|
||||
|
||||
* The {TempoName} is installed and a TempoStack instance is deployed.
|
||||
* The {TempoName} is installed, and a TempoStack instance is deployed.
|
||||
|
||||
* You have access to the cluster through the web console or the {oc-first}:
|
||||
|
||||
@@ -24,7 +24,7 @@ The {OTELOperator} allows sidecar injection into deployment workloads and automa
|
||||
|
||||
.Procedure
|
||||
|
||||
. Create a project for the OpenTelemtry Collector.
|
||||
. Create a project for an OpenTelemetry Collector instance.
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
@@ -45,7 +45,7 @@ metadata:
|
||||
namespace: observability
|
||||
----
|
||||
|
||||
. Grant permissions to the service account for the `k8sattributes` and `resourcedetection` processors.
|
||||
. Grant the permissions to the service account for the `k8sattributes` and `resourcedetection` processors.
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
@@ -102,7 +102,7 @@ spec:
|
||||
timeout: 2s
|
||||
exporters:
|
||||
otlp:
|
||||
endpoint: "tempo-<example>-gateway:8090" <1>
|
||||
endpoint: "tempo-<example>-gateway:8090" # <1>
|
||||
tls:
|
||||
insecure: true
|
||||
service:
|
||||
@@ -116,4 +116,4 @@ spec:
|
||||
|
||||
. Create your deployment using the `otel-collector-sidecar` service account.
|
||||
|
||||
. Add the `sidecar.opentelemetry.io/inject: "true"` annotation to your `Deployment` object. This will inject all the needed environment variables to send data from your workloads to the OpenTelemetryCollector instance.
|
||||
. Add the `sidecar.opentelemetry.io/inject: "true"` annotation to your `Deployment` object. This will inject all the needed environment variables to send data from your workloads to the OpenTelemetry Collector instance.
|
||||
@@ -1,16 +1,16 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * /distr_tracing/distr_tracing_otel/distr-tracing-otel-using.adoc
|
||||
// * otel/otel-using.adoc
|
||||
|
||||
:_mod-docs-content-type: PROCEDURE
|
||||
[id="distr-tracing-otel-send-traces-and-metrics-to-otel-collector-without-sidecar_{context}"]
|
||||
[id="sending-traces-and-metrics-to-otel-collector-without-sidecar_{context}"]
|
||||
= Sending traces and metrics to the OpenTelemetry Collector without sidecar injection
|
||||
|
||||
You can set up sending telemetry data to an OpenTelemetryCollector instance without sidecar injection, which involves manually setting several environment variables.
|
||||
You can set up sending telemetry data to an OpenTelemetry Collector instance without sidecar injection, which involves manually setting several environment variables.
|
||||
|
||||
.Prerequisites
|
||||
|
||||
* The {TempoName} is installed and a TempoStack instance is deployed.
|
||||
* The {TempoName} is installed, and a TempoStack instance is deployed.
|
||||
|
||||
* You have access to the cluster through the web console or the {oc-first}:
|
||||
|
||||
@@ -22,7 +22,7 @@ You can set up sending telemetry data to an OpenTelemetryCollector instance with
|
||||
|
||||
.Procedure
|
||||
|
||||
. Create a project for the OpenTelemtry Collector.
|
||||
. Create a project for an OpenTelemetry Collector instance.
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
@@ -43,7 +43,7 @@ metadata:
|
||||
namespace: observability
|
||||
----
|
||||
|
||||
. Grant permissions to the service account for the `k8sattributes` and `resourcedetection` processors.
|
||||
. Grant the permissions to the service account for the `k8sattributes` and `resourcedetection` processors.
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
@@ -70,7 +70,7 @@ roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
----
|
||||
|
||||
. Deploy the OpenTelemetryCollector instance.
|
||||
. Deploy the OpenTelemetry Collector instance with the `OpenTelemetryCollector` custom resource.
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
@@ -107,7 +107,7 @@ spec:
|
||||
detectors: [openshift]
|
||||
exporters:
|
||||
otlp:
|
||||
endpoint: "tempo-<example>-distributor:4317" <1>
|
||||
endpoint: "tempo-<example>-distributor:4317" # <1>
|
||||
tls:
|
||||
insecure: true
|
||||
service:
|
||||
@@ -119,7 +119,7 @@ spec:
|
||||
----
|
||||
<1> This points to the Gateway of the TempoStack instance deployed by using the `<example>` {TempoOperator}.
|
||||
|
||||
. Set the following environment variables in the container with your instrumented application:
|
||||
. Set the environment variables in the container with your instrumented application.
|
||||
+
|
||||
[options="header"]
|
||||
[cols="l, a, a"]
|
||||
@@ -146,10 +146,10 @@ spec:
|
||||
|`grpc`
|
||||
|
||||
|OTEL_EXPORTER_OTLP_TIMEOUT
|
||||
|Maximum time the OTLP exporter will wait for each batch export.
|
||||
|Maximum time interval for the OTLP exporter to wait for each batch export.
|
||||
|`10s`
|
||||
|
||||
|OTEL_EXPORTER_OTLP_INSECURE
|
||||
|Disables client transport security for gRPC requests; an HTTPS schema overrides it.
|
||||
|Disables client transport security for gRPC requests. An HTTPS schema overrides it.
|
||||
|`False`
|
||||
|===
|
||||
25
modules/otel-troubleshoot-collector-logs.adoc
Normal file
25
modules/otel-troubleshoot-collector-logs.adoc
Normal file
@@ -0,0 +1,25 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * otel/otel-troubleshooting.adoc
|
||||
|
||||
:_mod-docs-content-type: PROCEDURE
|
||||
[id="getting-otel-collector-logs_{context}"]
|
||||
= Getting the OpenTelemetry Collector logs
|
||||
|
||||
You can get the logs for the OpenTelemetry Collector as follows.
|
||||
|
||||
.Procedure
|
||||
|
||||
. Set the relevant log level in the `OpenTelemetryCollector` custom resource (CR):
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
config: |
|
||||
service:
|
||||
telemetry:
|
||||
logs:
|
||||
level: debug # <1>
|
||||
----
|
||||
<1> Collector's log level. Supported values include `info`, `warn`, `error`, or `debug`. Defaults to `info`.
|
||||
|
||||
. Use the `oc logs` command or the web console to retrieve the logs.
|
||||
31
modules/otel-troubleshoot-logging-exporter-stdout.adoc
Normal file
31
modules/otel-troubleshoot-logging-exporter-stdout.adoc
Normal file
@@ -0,0 +1,31 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * otel/otel-troubleshooting.adoc
|
||||
|
||||
:_mod-docs-content-type: PROCEDURE
|
||||
[id="debug-exporter-to-stdout_{context}"]
|
||||
= Debug exporter
|
||||
|
||||
You can configure the debug exporter to export the collected data to the standard output.
|
||||
|
||||
.Procedure
|
||||
|
||||
. Configure the `OpenTelemetryCollector` custom resource as follows:
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
config: |
|
||||
exporters:
|
||||
debug:
|
||||
verbosity: detailed
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
exporters: [debug]
|
||||
metrics:
|
||||
exporters: [debug]
|
||||
logs:
|
||||
exporters: [debug]
|
||||
----
|
||||
|
||||
. Use the `oc logs` command or the web console to export the logs to the standard output.
|
||||
@@ -1,22 +1,25 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * /distr_tracing/distr_tracing_otel/distr-tracing-otel-troubleshooting.adoc
|
||||
// * otel/otel-troubleshooting.adoc
|
||||
|
||||
:_mod-docs-content-type: PROCEDURE
|
||||
[id="distr-tracing-otel-troubleshoot-metrics_{context}"]
|
||||
[id="exposing-metrics_{context}"]
|
||||
= Exposing the metrics
|
||||
|
||||
The OpenTelemetry Collector exposes the metrics about the data volumes it has processed. The following metrics are for spans, although similar metrics are exposed for metrics and logs signals:
|
||||
|
||||
`otelcol_receiver_accepted_spans`:: The number of spans successfully pushed into the pipeline.
|
||||
|
||||
`otelcol_receiver_refused_spans`:: The number of spans that could not be pushed into the pipeline.
|
||||
`otelcol_exporter_sent_spans`:: The number of spans successfully sent to destination.
|
||||
`otelcol_exporter_sent_spans`:: The number of spans successfully sent to the destination.
|
||||
|
||||
`otelcol_exporter_enqueue_failed_spans`:: The number of spans failed to be added to the sending queue.
|
||||
|
||||
The operator creates a `<cr-name>-collector-monitoring` telemetry service that you can use to scrape the metrics endpoint.
|
||||
The operator creates a `<cr_name>-collector-monitoring` telemetry service that you can use to scrape the metrics endpoint.
|
||||
|
||||
.Procedure
|
||||
|
||||
. Enable the telemetry service by adding the following lines in the OpenTelemetry Collector custom resource:
|
||||
. Enable the telemetry service by adding the following lines in the `OpenTelemetryCollector` custom resource:
|
||||
|
||||
+
|
||||
[source,yaml]
|
||||
@@ -25,16 +28,17 @@ The operator creates a `<cr-name>-collector-monitoring` telemetry service that y
|
||||
service:
|
||||
telemetry:
|
||||
metrics:
|
||||
address: ":8888" <1>
|
||||
address: ":8888" # <1>
|
||||
----
|
||||
<1> The address on which internal collector metrics are exposed. Defaults to `:8888`.
|
||||
<1> The address at which the internal collector metrics are exposed. Defaults to `:8888`.
|
||||
|
||||
// TODO Operator 0.82.0 has spec.observability.metrics.enableMetrics config that creates ServiceMonitors for users
|
||||
|
||||
. Retrieve the metrics by running the following command, which uses the port forwarding collector pod:
|
||||
. Retrieve the metrics by running the following command, which uses the port-forwarding Collector pod:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc port-forward <collector-pod>
|
||||
$ oc port-forward <collector_pod>
|
||||
----
|
||||
|
||||
. Access the metrics endpoint at `+http://localhost:8888/metrics+`.
|
||||
@@ -61,7 +61,7 @@ spec:
|
||||
storage:
|
||||
files:
|
||||
- contents:
|
||||
source: data:text/plain;charset=utf-8;base64,<encoded_content> <1>
|
||||
source: data:text/plain;charset=utf-8;base64,<encoded_content> # <1>
|
||||
mode: 0644
|
||||
overwrite: true
|
||||
path: /etc/default/nodeip-configuration
|
||||
@@ -85,7 +85,7 @@ spec:
|
||||
storage:
|
||||
files:
|
||||
- contents:
|
||||
source: data:text/plain;charset=utf-8;base64,<encoded_content> <1>
|
||||
source: data:text/plain;charset=utf-8;base64,<encoded_content> # <1>
|
||||
mode: 0644
|
||||
overwrite: true
|
||||
path: /etc/default/nodeip-configuration
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
// * distr_tracing/distr_tracing_rn/distr-tracing-rn-2-7.adoc
|
||||
// * distr_tracing/distr_tracing_rn/distr-tracing-rn-2-8.adoc
|
||||
// * distr_tracing/distr_tracing_rn/distr-tracing-rn-2-9.adoc
|
||||
// * distr_tracing/distr_tracing_rn/distr-tracing-rn-3-0.adoc
|
||||
// * microshift_support/microshift-getting-support.adoc
|
||||
|
||||
[id="support_{context}"]
|
||||
|
||||
1
otel/_attributes
Symbolic link
1
otel/_attributes
Symbolic link
@@ -0,0 +1 @@
|
||||
../_attributes/
|
||||
1
otel/images
Symbolic link
1
otel/images
Symbolic link
@@ -0,0 +1 @@
|
||||
../images/
|
||||
1
otel/modules
Symbolic link
1
otel/modules
Symbolic link
@@ -0,0 +1 @@
|
||||
../modules/
|
||||
27
otel/otel-configuring.adoc
Normal file
27
otel/otel-configuring.adoc
Normal file
@@ -0,0 +1,27 @@
|
||||
:_mod-docs-content-type: ASSEMBLY
|
||||
[id="otel-configuring"]
|
||||
= Configuring and deploying the {OTELShortName}
|
||||
include::_attributes/common-attributes.adoc[]
|
||||
:context: otel-configuring
|
||||
|
||||
toc::[]
|
||||
|
||||
The {OTELName} Operator uses a custom resource definition (CRD) file that defines the architecture and configuration settings to be used when creating and deploying the {OTELShortName} resources. You can install the default configuration or modify the file.
|
||||
|
||||
include::modules/otel-config-collector.adoc[leveloffset=+1]
|
||||
include::modules/otel-config-multicluster.adoc[leveloffset=+1]
|
||||
include::modules/otel-config-send-metrics-monitoring-stack.adoc[leveloffset=+1]
|
||||
|
||||
[id="setting-up-monitoring-for-otel"]
|
||||
== Setting up monitoring for the {OTELShortName}
|
||||
|
||||
The {OTELOperator} supports monitoring and alerting of each OpenTelemtry Collector instance and exposes upgrade and operational metrics about the Operator itself.
|
||||
|
||||
include::modules/otel-configuring-otelcol-metrics.adoc[leveloffset=+2]
|
||||
|
||||
// modules/otel-configuring-oteloperator-metrics.adoc[leveloffset=+2]
|
||||
|
||||
[role="_additional-resources"]
|
||||
[id="additional-resources_deploy-otel"]
|
||||
== Additional resources
|
||||
* xref:../monitoring/enabling-monitoring-for-user-defined-projects.adoc#enabling-monitoring-for-user-defined-projects[Enabling monitoring for user-defined projects]
|
||||
27
otel/otel-installing.adoc
Normal file
27
otel/otel-installing.adoc
Normal file
@@ -0,0 +1,27 @@
|
||||
:_mod-docs-content-type: ASSEMBLY
|
||||
[id="install-otel"]
|
||||
= Installing the {OTELShortName}
|
||||
include::_attributes/common-attributes.adoc[]
|
||||
:context: install-otel
|
||||
|
||||
toc::[]
|
||||
|
||||
Installing the {OTELShortName} involves the following steps:
|
||||
|
||||
. Installing the {OTELOperator}.
|
||||
. Creating a namespace for an OpenTelemetry Collector instance.
|
||||
. Creating an `OpenTelemetryCollector` custom resource to deploy the OpenTelemetry Collector instance.
|
||||
|
||||
include::modules/otel-install-web-console.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/otel-install-cli.adoc[leveloffset=+1]
|
||||
|
||||
[role="_additional-resources"]
|
||||
[id="additional-resources_otel-installing"]
|
||||
== Additional resources
|
||||
* xref:../post_installation_configuration/preparing-for-users.adoc#creating-cluster-admin_post-install-preparing-for-users[Creating a cluster admin]
|
||||
* link:https://operatorhub.io/[OperatorHub.io]
|
||||
* xref:../web_console/web-console.adoc#web-console[Accessing the web console]
|
||||
* xref:../operators/admin/olm-adding-operators-to-cluster.adoc#olm-installing-from-operatorhub-using-web-console_olm-adding-operators-to-a-cluster[Installing from OperatorHub using the web console]
|
||||
* xref:../operators/user/olm-creating-apps-from-installed-operators.adoc#olm-creating-apps-from-installed-operators[Creating applications from installed Operators]
|
||||
* xref:../cli_reference/openshift_cli/getting-started-cli.adoc#getting-started-cli[Getting started with the OpenShift CLI]
|
||||
14
otel/otel-instrumentation.adoc
Normal file
14
otel/otel-instrumentation.adoc
Normal file
@@ -0,0 +1,14 @@
|
||||
:_mod-docs-content-type: ASSEMBLY
|
||||
[id="otel-instrumentation"]
|
||||
= Configuring and deploying the OpenTelemetry instrumentation injection
|
||||
include::_attributes/common-attributes.adoc[]
|
||||
:context: otel-instrumentation
|
||||
|
||||
toc::[]
|
||||
|
||||
:FeatureName: OpenTelemetry instrumentation injection
|
||||
include::snippets/technology-preview.adoc[leveloffset=+1]
|
||||
|
||||
The {OTELName} Operator uses a custom resource definition (CRD) file that defines the configuration of the instrumentation.
|
||||
|
||||
include::modules/otel-config-instrumentation.adoc[leveloffset=+1]
|
||||
@@ -6,12 +6,12 @@ include::_attributes/common-attributes.adoc[]
|
||||
|
||||
toc::[]
|
||||
|
||||
If you are already using {JaegerName} for your applications, you can migrate to the {OTELName}, which is based on the link:https://opentelemetry.io/[OpenTelemetry] open-source project.
|
||||
If you are already using the {JaegerName} for your applications, you can migrate to the {OTELName}, which is based on the link:https://opentelemetry.io/[OpenTelemetry] open-source project.
|
||||
|
||||
The {OTELShortName} provides a set of APIs, libraries, agents, and instrumentation to facilitate observability in distributed systems. The OpenTelemetry Collector in the {OTELShortName} can ingest the Jaeger protocol, so you do not need to change the SDKs in your applications.
|
||||
|
||||
Migration from the {JaegerShortName} to the {OTELShortName} requires configuring the OpenTelemetry Collector and your applications to report traces seamlessly. You can migrate sidecar and sidecarless deployments.
|
||||
|
||||
include::modules/distr-tracing-otel-migrating-from-jaeger-with-sidecars.adoc[leveloffset=+1]
|
||||
include::modules/otel-migrating-from-jaeger-with-sidecars.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/distr-tracing-otel-migrating-from-jaeger-without-sidecars.adoc[leveloffset=+1]
|
||||
include::modules/otel-migrating-from-jaeger-without-sidecars.adoc[leveloffset=+1]
|
||||
136
otel/otel-release-notes.adoc
Normal file
136
otel/otel-release-notes.adoc
Normal file
@@ -0,0 +1,136 @@
|
||||
:_mod-docs-content-type: ASSEMBLY
|
||||
include::_attributes/common-attributes.adoc[]
|
||||
[id="otel-release-notes"]
|
||||
= Release notes for {OTELName}
|
||||
:context: otel-release-notes
|
||||
|
||||
toc::[]
|
||||
|
||||
[id="otel-product-overview"]
|
||||
== {OTELName} overview
|
||||
|
||||
{OTELName} is based on the open source link:https://opentelemetry.io/[OpenTelemetry project], which aims to provide unified, standardized, and vendor-neutral telemetry data collection for cloud-native software. {OTELName} product provides support for deploying and managing the OpenTelemetry Collector and simplifying the workload instrumentation.
|
||||
|
||||
The link:https://opentelemetry.io/docs/collector/[OpenTelemetry Collector] can receive, process, and forward telemetry data in multiple formats, making it the ideal component for telemetry processing and interoperability between telemetry systems. The Collector provides a unified solution for collecting and processing metrics, traces, and logs.
|
||||
|
||||
The OpenTelemetry Collector has a number of features including the following:
|
||||
|
||||
Data Collection and Processing Hub:: It acts as a central component that gathers telemetry data like metrics and traces from various sources. This data can be created from instrumented applications and infrastructure.
|
||||
|
||||
Customizable telemetry data pipeline:: The OpenTelemetry Collector is designed to be customizable. It supports various processors, exporters, and receivers.
|
||||
|
||||
Auto-instrumentation features:: Automatic instrumentation simplifies the process of adding observability to applications. Developers don't need to manually instrument their code for basic telemetry data.
|
||||
|
||||
Here are some of the use cases for the OpenTelemetry Collector:
|
||||
|
||||
Centralized data collection:: In a microservices architecture, the Collector can be deployed to aggregate data from multiple services.
|
||||
|
||||
Data enrichment and processing:: Before forwarding data to analysis tools, the Collector can enrich, filter, and process this data.
|
||||
|
||||
Multi-backend receiving and exporting:: The Collector can receive and send data to multiple monitoring and analysis platforms simultaneously.
|
||||
|
||||
[id="otel-3-0-rn"]
|
||||
== {OTELName} {DTProductVersion}
|
||||
|
||||
{OTELName} {DTProductVersion} is based on link:https://opentelemetry.io/[OpenTelemetry] {OTELVersion}.
|
||||
|
||||
[id="new-features-and-enhancements_otel-3-0-rn"]
|
||||
=== New features and enhancements
|
||||
|
||||
This update introduces the following enhancements:
|
||||
|
||||
* The *OpenShift distributed tracing data collection Operator* is renamed as the *{OTELOperator}*.
|
||||
* Support for the ARM architecture.
|
||||
* Support for the Prometheus receiver for metrics collection.
|
||||
* Support for the Kafka receiver and exporter for sending traces and metrics to Kafka.
|
||||
* Support for cluster-wide proxy environments.
|
||||
* The {OTELOperator} creates the Prometheus `ServiceMonitor` custom resource if the Prometheus exporter is enabled.
|
||||
* The Operator enables the `Instrumentation` custom resource that allows injecting upstream OpenTelemetry auto-instrumentation libraries.
|
||||
|
||||
[id="removal-notice_otel-3-0-rn"]
|
||||
=== Removal notice
|
||||
|
||||
* In {OTELName} {DTProductVersion}, the Jaeger exporter has been removed. Bug fixes and support are provided only through the end of the 2.9 lifecycle. As an alternative to the Jaeger exporter for sending data to the Jaeger collector, you can use the OTLP exporter instead.
|
||||
|
||||
[id="bug-fixes_otel-3-0-rn"]
|
||||
=== Bug fixes
|
||||
|
||||
This update introduces the following bug fixes:
|
||||
|
||||
* Fixed support for disconnected environments when using the `oc adm catalog mirror` CLI command.
|
||||
|
||||
[id="known-issues_otel-3-0-rn"]
|
||||
=== Known issues
|
||||
|
||||
Curently, the cluster monitoring of the {OTELOperator} is disabled due to a bug (link:https://issues.redhat.com/browse/TRACING-3761[TRACING-3761]). The bug is preventing the cluster monitoring from scraping metrics from the {OTELOperator} due to a missing label `openshift.io/cluster-monitoring=true`
|
||||
that is required for the cluster monitoring and service monitor object.
|
||||
|
||||
.Workaround
|
||||
|
||||
You can enable the cluster monitoring as follows:
|
||||
|
||||
. Add the following label in the Operator namespace: `oc label namespace openshift-opentelemetry-operator openshift.io/cluster-monitoring=true`
|
||||
|
||||
. Create a service monitor, role, and role binding:
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: opentelemetry-operator-controller-manager-metrics-service
|
||||
namespace: openshift-opentelemetry-operator
|
||||
spec:
|
||||
endpoints:
|
||||
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
path: /metrics
|
||||
port: https
|
||||
scheme: https
|
||||
tlsConfig:
|
||||
insecureSkipVerify: true
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: opentelemetry-operator
|
||||
control-plane: controller-manager
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: otel-operator-prometheus
|
||||
namespace: openshift-opentelemetry-operator
|
||||
annotations:
|
||||
include.release.openshift.io/self-managed-high-availability: "true"
|
||||
include.release.openshift.io/single-node-developer: "true"
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
- endpoints
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: otel-operator-prometheus
|
||||
namespace: openshift-opentelemetry-operator
|
||||
annotations:
|
||||
include.release.openshift.io/self-managed-high-availability: "true"
|
||||
include.release.openshift.io/single-node-developer: "true"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: otel-operator-prometheus
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: prometheus-k8s
|
||||
namespace: openshift-monitoring
|
||||
----
|
||||
|
||||
include::modules/support.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/making-open-source-more-inclusive.adoc[leveloffset=+1]
|
||||
@@ -12,13 +12,13 @@ The steps for removing the {OTELShortName} from an {product-title} cluster are a
|
||||
. Remove any OpenTelemetryCollector instances.
|
||||
. Remove the {OTELOperator}.
|
||||
|
||||
include::modules/distr-tracing-otel-remove-web-console.adoc[leveloffset=+1]
|
||||
include::modules/otel-remove-web-console.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/distr-tracing-otel-remove-cli.adoc[leveloffset=+1]
|
||||
include::modules/otel-remove-cli.adoc[leveloffset=+1]
|
||||
|
||||
[role="_additional-resources"]
|
||||
[id="additional-resources_dist-tracing-otel-removing"]
|
||||
== Additional resources
|
||||
|
||||
* xref:../../operators/admin/olm-deleting-operators-from-cluster.adoc#olm-deleting-operators-from-a-cluster[Deleting Operators from a cluster]
|
||||
* xref:../../cli_reference/openshift_cli/getting-started-cli.adoc#getting-started-cli[Getting started with the OpenShift CLI]
|
||||
* xref:../operators/admin/olm-deleting-operators-from-cluster.adoc#olm-deleting-operators-from-a-cluster[Deleting Operators from a cluster]
|
||||
* xref:../cli_reference/openshift_cli/getting-started-cli.adoc#getting-started-cli[Getting started with the OpenShift CLI]
|
||||
13
otel/otel-troubleshooting.adoc
Normal file
13
otel/otel-troubleshooting.adoc
Normal file
@@ -0,0 +1,13 @@
|
||||
:_mod-docs-content-type: ASSEMBLY
|
||||
[id="otel-troubleshoot"]
|
||||
= Troubleshooting the {OTELShortName}
|
||||
include::_attributes/common-attributes.adoc[]
|
||||
:context: otel-troubleshoot
|
||||
|
||||
toc::[]
|
||||
|
||||
The OpenTelemetry Collector offers multiple ways to measure its health as well as investigate data ingestion issues.
|
||||
|
||||
include::modules/otel-troubleshoot-collector-logs.adoc[leveloffset=+1]
|
||||
include::modules/otel-troubleshoot-metrics.adoc[leveloffset=+1]
|
||||
include::modules/otel-troubleshoot-logging-exporter-stdout.adoc[leveloffset=+1]
|
||||
20
otel/otel-updating.adoc
Normal file
20
otel/otel-updating.adoc
Normal file
@@ -0,0 +1,20 @@
|
||||
:_mod-docs-content-type: ASSEMBLY
|
||||
[id="dist-tracing-otel-updating"]
|
||||
= Updating the {OTELShortName}
|
||||
include::_attributes/common-attributes.adoc[]
|
||||
:context: dist-tracing-otel-updating
|
||||
|
||||
toc::[]
|
||||
|
||||
For version upgrades, the {OTELOperator} uses the Operator Lifecycle Manager (OLM), which controls installation, upgrade, and role-based access control (RBAC) of Operators in a cluster.
|
||||
|
||||
The OLM runs in the {product-title} by default. The OLM queries for available Operators as well as upgrades for installed Operators.
|
||||
|
||||
When the {OTELOperator} is upgraded to the new version, it scans for running OpenTelemetry Collector instances that it manages and upgrades them to the version corresponding to the Operator's new version.
|
||||
|
||||
[role="_additional-resources"]
|
||||
[id="additional-resources_dist-tracing-otel-updating"]
|
||||
== Additional resources
|
||||
|
||||
* xref:../operators/understanding/olm/olm-understanding-olm.adoc#olm-understanding-olm[Operator Lifecycle Manager concepts and resources]
|
||||
* xref:../operators/admin/olm-upgrading-operators.adoc#olm-upgrading-operators[Updating installed Operators]
|
||||
20
otel/otel-using.adoc
Normal file
20
otel/otel-using.adoc
Normal file
@@ -0,0 +1,20 @@
|
||||
:_mod-docs-content-type: ASSEMBLY
|
||||
[id="otel-temp"]
|
||||
= Using the {OTELShortName}
|
||||
include::_attributes/common-attributes.adoc[]
|
||||
:context: otel-temp
|
||||
|
||||
toc::[]
|
||||
|
||||
You can set up and use the {OTELShortName} to send traces to the OpenTelemetry Collector or the TempoStack.
|
||||
|
||||
include::modules/otel-forwarding.adoc[leveloffset=+1]
|
||||
|
||||
[id="otel-send-traces-and-metrics-to-otel-collector_{context}"]
|
||||
== Sending traces and metrics to the OpenTelemetry Collector
|
||||
|
||||
Sending tracing and metrics to the OpenTelemetry Collector is possible with or without sidecar injection.
|
||||
|
||||
include::modules/otel-send-traces-and-metrics-to-otel-collector-with-sidecar.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/otel-send-traces-and-metrics-to-otel-collector-without-sidecar.adoc[leveloffset=+2]
|
||||
1
otel/snippets
Symbolic link
1
otel/snippets
Symbolic link
@@ -0,0 +1 @@
|
||||
../snippets/
|
||||
Reference in New Issue
Block a user