mirror of
https://github.com/openshift/openshift-docs.git
synced 2026-02-06 06:46:26 +01:00
changed fluentd in files to collector
This commit is contained in:
@@ -832,7 +832,7 @@ Topics:
|
||||
- Name: Configuring Curator
|
||||
File: cluster-logging-curator
|
||||
- Name: Configuring the logging collector
|
||||
File: cluster-logging-fluentd
|
||||
File: cluster-logging-collector
|
||||
- Name: Using tolerations to control cluster logging pod placement
|
||||
File: cluster-logging-tolerations
|
||||
- Name: Sending logs to external devices
|
||||
|
||||
@@ -32,7 +32,7 @@ include::modules/cluster-logging-about-components.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/cluster-logging-about-elasticsearch.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/cluster-logging-about-fluentd.adoc[leveloffset=+2]
|
||||
include::modules/cluster-logging-about-collector.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/cluster-logging-about-kibana.adoc[leveloffset=+2]
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
:context: cluster-logging-fluentd
|
||||
[id="cluster-logging-fluentd"]
|
||||
:context: cluster-logging-collector
|
||||
[id="cluster-logging-collector"]
|
||||
= Configuring the logging collector
|
||||
include::modules/common-attributes.adoc[]
|
||||
|
||||
@@ -21,26 +21,26 @@ For more information, see xref:../../logging/config/cluster-logging-management.a
|
||||
// assemblies.
|
||||
|
||||
|
||||
include::modules/cluster-logging-fluentd-pod-location.adoc[leveloffset=+1]
|
||||
include::modules/cluster-logging-collector-pod-location.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/cluster-logging-fluentd-limits.adoc[leveloffset=+1]
|
||||
include::modules/cluster-logging-collector-limits.adoc[leveloffset=+1]
|
||||
|
||||
////
|
||||
4.1
|
||||
modules/cluster-logging-fluentd-log-rotation.adoc[leveloffset=+1]
|
||||
include::modules/cluster-logging-fluentd-collector.adoc[leveloffset=+1]
|
||||
modules/cluster-logging-collector-log-rotation.adoc[leveloffset=+1]
|
||||
include::modules/cluster-logging-collector-collector.adoc[leveloffset=+1]
|
||||
////
|
||||
|
||||
include::modules/cluster-logging-fluentd-log-location.adoc[leveloffset=+1]
|
||||
include::modules/cluster-logging-collector-log-location.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/cluster-logging-fluentd-external.adoc[leveloffset=+1]
|
||||
include::modules/cluster-logging-collector-external.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/cluster-logging-fluentd-throttling.adoc[leveloffset=+1]
|
||||
include::modules/cluster-logging-collector-throttling.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/cluster-logging-fluentd-json.adoc[leveloffset=+1]
|
||||
include::modules/cluster-logging-collector-json.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/cluster-logging-fluentd-undefined.adoc[leveloffset=+1]
|
||||
include::modules/cluster-logging-collector-undefined.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/cluster-logging-fluentd-envvar.adoc[leveloffset=+1]
|
||||
include::modules/cluster-logging-collector-envvar.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/cluster-logging-fluentd-alerts.adoc[leveloffset=+1]
|
||||
include::modules/cluster-logging-collector-alerts.adoc[leveloffset=+1]
|
||||
@@ -23,4 +23,4 @@ include::modules/cluster-logging-external-elasticsearch.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/cluster-logging-external-syslog.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/cluster-logging-fluentd-external.adoc[leveloffset=+1]
|
||||
include::modules/cluster-logging-collector-external.adoc[leveloffset=+1]
|
||||
|
||||
@@ -104,7 +104,7 @@ include::modules/cluster-logging-kibana-tolerations.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/cluster-logging-curator-tolerations.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/cluster-logging-fluentd-tolerations.adoc[leveloffset=+1]
|
||||
include::modules/cluster-logging-collector-tolerations.adoc[leveloffset=+1]
|
||||
|
||||
[id="cluster-logging-tolerations-addtl-resources"]
|
||||
== Additional resources
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * logging/cluster-logging-fluentd.adoc
|
||||
// * logging/cluster-logging-collector.adoc
|
||||
|
||||
[id="cluster-logging-fluentd-alerts_{context}"]
|
||||
[id="cluster-logging-collector-alerts_{context}"]
|
||||
= About logging collector alerts
|
||||
|
||||
The following alerts are generated by the logging collector and can be viewed on the *Alerts* tab of the Prometheus UI.
|
||||
@@ -1,8 +1,8 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * logging/cluster-logging-fluentd.adoc
|
||||
// * logging/cluster-logging-collector.adoc
|
||||
|
||||
[id="cluster-logging-fluentd-collector_{context}"]
|
||||
[id="cluster-logging-collector-collector_{context}"]
|
||||
= Selecting the logging collector
|
||||
|
||||
{product-title} cluster logging uses Fluentd by default.
|
||||
@@ -1,8 +1,8 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * logging/cluster-logging-fluentd.adoc
|
||||
// * logging/cluster-logging-collector.adoc
|
||||
|
||||
[id="cluster-logging-fluentd-envvar_{context}"]
|
||||
[id="cluster-logging-collector-envvar_{context}"]
|
||||
= Configuring the logging collector using environment variables
|
||||
|
||||
You can use environment variables to modify the
|
||||
@@ -1,8 +1,8 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * logging/cluster-logging-fluentd.adoc
|
||||
// * logging/cluster-logging-collector.adoc
|
||||
|
||||
[id="cluster-logging-fluentd-external_{context}"]
|
||||
[id="cluster-logging-collector-external_{context}"]
|
||||
= Configuring Fluentd to send logs to an external log aggregator
|
||||
|
||||
You can configure Fluentd to send a copy of its logs to an external log
|
||||
@@ -58,7 +58,7 @@ $ oc edit configmap/fluentd -n openshift-logging
|
||||
. Add certificates to be used in `secure-forward.conf` to the existing
|
||||
secret that is mounted on the Fluentd pods. The `your_ca_cert` and
|
||||
`your_private_key` values must match what is specified in `secure-forward.conf`
|
||||
in `configmap/logging-fluentd`:
|
||||
in the `fluentd` ConfigMap:
|
||||
+
|
||||
----
|
||||
$ oc patch secrets/fluentd --type=json \
|
||||
75
modules/cluster-logging-collector-json.adoc
Normal file
75
modules/cluster-logging-collector-json.adoc
Normal file
@@ -0,0 +1,75 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * logging/cluster-logging-collector.adoc
|
||||
|
||||
[id="cluster-logging-collector-json_{context}"]
|
||||
= Configuring log collection JSON parsing
|
||||
|
||||
You can configure the Fluentd log collector to determine if a log message is in *JSON* format and merge
|
||||
the message into the JSON payload document posted to Elasticsearch. This feature is disabled by default.
|
||||
|
||||
You can enable or disable this feature by editing the `MERGE_JSON_LOG` environment variable in the *fluentd* daemonset.
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
Enabling this feature comes with risks, including:
|
||||
|
||||
* Possible log loss due to Elasticsearch rejecting documents due to inconsistent type mappings.
|
||||
* Potential buffer storage leak caused by rejected message cycling.
|
||||
* Overwrite of data for field with same names.
|
||||
|
||||
The features in this topic should be used by only experienced Fluentd and Elasticsearch users.
|
||||
====
|
||||
|
||||
.Prerequisites
|
||||
|
||||
Set cluster logging to the unmanaged state.
|
||||
|
||||
.Procedure
|
||||
|
||||
Use the following command to enable this feature:
|
||||
|
||||
----
|
||||
oc set env ds/fluentd MERGE_JSON_LOG=true <1>
|
||||
----
|
||||
<1> Set this to `false` to disable this feature or `true` to enable this feature.
|
||||
|
||||
////
|
||||
----
|
||||
oc set env ds/rsyslog MERGE_JSON_LOG=true <1>
|
||||
----
|
||||
////
|
||||
|
||||
*Setting MERGE_JSON_LOG and CDM_UNDEFINED_TO_STRING*
|
||||
|
||||
If you set the `MERGE_JSON_LOG` and `CDM_UNDEFINED_TO_STRING` enviroment variables to `true`, you might receive an Elasticsearch *400* error. The error occurs because when`MERGE_JSON_LOG=true`, Fluentd adds fields with data types other than *string*. When you set `CDM_UNDEFINED_TO_STRING=true`, Fluentd attempts to add those fields as a *string* value resulting in the Elasticsearch *400* error. The error clears when the indices roll over for the next day.
|
||||
|
||||
When Fluentd rolls over the indices for the next day's logs, it will create a brand new index. The field definitions are updated and you will not get the *400* error.
|
||||
|
||||
Records that have *hard* errors, such as schema violations, corrupted data, and so forth, cannot be retried. The log collector sends the records for error handling. If you link:https://docs.fluentd.org/v1.0/articles/config-file#@error-label[add a
|
||||
`<label @ERROR>` section] to your Fluentd config, as the last <label>, you can handle these records as needed.
|
||||
|
||||
For example:
|
||||
|
||||
----
|
||||
data:
|
||||
fluent.conf:
|
||||
|
||||
....
|
||||
|
||||
<label @ERROR>
|
||||
<match **>
|
||||
@type file
|
||||
path /var/log/fluent/dlq
|
||||
time_slice_format %Y%m%d
|
||||
time_slice_wait 10m
|
||||
time_format %Y%m%dT%H%M%S%z
|
||||
compress gzip
|
||||
</match>
|
||||
</label>
|
||||
----
|
||||
|
||||
This section writes error records to the link:https://www.elastic.co/guide/en/logstash/current/dead-letter-queues.html[Elasticsearch dead letter queue (DLQ) file]. See link:https://docs.fluentd.org/v0.12/articles/out_file[the fluentd documentation] for more information about the file output.
|
||||
|
||||
Then you can edit the file to clean up the records manually, edit the file to use with the Elasticsearch `/_bulk index` API and use cURL to add those records. For more information on
|
||||
Elasticsearch Bulk API, see link:https://www.elastic.co/guide/en/elasticsearch/reference/5.6/docs-bulk.html[the Elasticsearch documentation].
|
||||
@@ -1,8 +1,8 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * logging/cluster-logging-fluentd.adoc
|
||||
// * logging/cluster-logging-collector.adoc
|
||||
|
||||
[id="cluster-logging-fluentd-limits_{context}"]
|
||||
[id="cluster-logging-collector-limits_{context}"]
|
||||
= Configure log collector CPU and memory limits
|
||||
|
||||
The log collector allows for adjustments to both the CPU and memory limits.
|
||||
@@ -1,8 +1,8 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * logging/cluster-logging-fluentd.adoc
|
||||
// * logging/cluster-logging-collector.adoc
|
||||
|
||||
[id="cluster-logging-fluentd-log-location_{context}"]
|
||||
[id="cluster-logging-collector-log-location_{context}"]
|
||||
= Configuring the collected log location
|
||||
|
||||
The log collector writes logs to a specified file or to the default location, `/var/log/fluentd/fluentd.log` based on the `LOGGING_FILE_PATH` environment variable.
|
||||
@@ -1,8 +1,8 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * logging/cluster-logging-fluentd.adoc
|
||||
// * logging/cluster-logging-collector.adoc
|
||||
|
||||
[id="cluster-logging-fluentd-log-rotation_{context}"]
|
||||
[id="cluster-logging-collector-log-rotation_{context}"]
|
||||
= Configuring log rotation
|
||||
|
||||
When the current log collector file reaches a specified size, {product-title} automatically renames the log collector file so that new logging data can be collected.
|
||||
@@ -1,8 +1,8 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * logging/cluster-logging-fluentd.adoc
|
||||
// * logging/cluster-logging-collector.adoc
|
||||
|
||||
[id="cluster-logging-fluentd-pod-location_{context}"]
|
||||
[id="cluster-logging-collector-pod-location_{context}"]
|
||||
= Viewing logging collector pods
|
||||
|
||||
You can use the `oc get pods -o wide` command to see the nodes where the Fluentd are deployed.
|
||||
@@ -1,8 +1,8 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * logging/cluster-logging-fluentd.adoc
|
||||
// * logging/cluster-logging-collector.adoc
|
||||
|
||||
[id="cluster-logging-fluentd-throttling_{context}"]
|
||||
[id="cluster-logging-collector-throttling_{context}"]
|
||||
= Throttling log collection
|
||||
|
||||
For projects that are especially verbose, an administrator can throttle down the
|
||||
@@ -1,8 +1,8 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * logging/cluster-logging-fluentd.adoc
|
||||
// * logging/cluster-logging-collector.adoc
|
||||
|
||||
[id="cluster-logging-fluentd-tolerations_{context}"]
|
||||
[id="cluster-logging-collector-tolerations_{context}"]
|
||||
= Using tolerations to control the log collector Pod placement
|
||||
|
||||
You can ensure which nodes the logging collector Pods run on and prevent
|
||||
@@ -1,8 +1,8 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * logging/cluster-logging-fluentd.adoc
|
||||
// * logging/cluster-logging-collector.adoc
|
||||
|
||||
[id="cluster-logging-fluentd-undefined_{context}"]
|
||||
[id="cluster-logging-collector-undefined_{context}"]
|
||||
= Configuring how the log collector normalizes logs
|
||||
|
||||
Cluster Logging uses a specific data model, like a database schema, to store log records and their metadata in the logging store. There are some restrictions on the data:
|
||||
@@ -1,8 +1,8 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * logging/cluster-logging-fluentd.adoc
|
||||
// * logging/cluster-logging-collector.adoc
|
||||
|
||||
[id="cluster-logging-fluentd-json_{context}"]
|
||||
[id="cluster-logging-collector-json_{context}"]
|
||||
= Configuring log collection JSON parsing
|
||||
|
||||
You can configure the Fluentd log collector to determine if a log message is in *JSON* format and merge
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
//
|
||||
// * logging/cluster-logging-deploy.adoc
|
||||
|
||||
[id="cluster-logging-fluentd-scaling_{context}"]
|
||||
[id="cluster-logging-collector-scaling_{context}"]
|
||||
= Scaling up systemd-journald
|
||||
|
||||
As you scale up your project, the default logging environment might need some
|
||||
|
||||
Reference in New Issue
Block a user