1
0
mirror of https://github.com/openshift/openshift-docs.git synced 2026-02-05 12:46:18 +01:00

OBSDOCS-134: Restructure log forwarding content

This commit is contained in:
Ashleigh Brennan
2024-01-15 10:52:56 -06:00
committed by openshift-cherrypick-robot
parent 9627ee4d41
commit 63f5d14c95
18 changed files with 241 additions and 202 deletions

View File

@@ -2586,10 +2586,14 @@ Topics:
File: logging-output-types
- Name: Enabling JSON log forwarding
File: cluster-logging-enabling-json-logging
- Name: Configuring log forwarding
File: configuring-log-forwarding
- Name: Configuring the logging collector
File: cluster-logging-collector
- Name: Collecting and storing Kubernetes events
File: cluster-logging-eventrouter
- Name: Troubleshooting log forwarding
File: log-forwarding-troubleshooting
- Name: Log storage
Dir: log_storage
Topics:

View File

@@ -1072,10 +1072,14 @@ Topics:
File: logging-output-types
- Name: Enabling JSON log forwarding
File: cluster-logging-enabling-json-logging
- Name: Configuring log forwarding
File: configuring-log-forwarding
- Name: Configuring the logging collector
File: cluster-logging-collector
- Name: Collecting and storing Kubernetes events
File: cluster-logging-eventrouter
- Name: Troubleshooting log forwarding
File: log-forwarding-troubleshooting
- Name: Log storage
Dir: log_storage
Topics:

View File

@@ -1302,10 +1302,14 @@ Topics:
File: logging-output-types
- Name: Enabling JSON log forwarding
File: cluster-logging-enabling-json-logging
- Name: Configuring log forwarding
File: configuring-log-forwarding
- Name: Configuring the logging collector
File: cluster-logging-collector
- Name: Collecting and storing Kubernetes events
File: cluster-logging-eventrouter
- Name: Troubleshooting log forwarding
File: log-forwarding-troubleshooting
- Name: Log storage
Dir: log_storage
Topics:

View File

@@ -25,5 +25,5 @@ include::modules/deleting-service.adoc[leveloffset=+1]
ifdef::openshift-rosa[]
[role="_additional-resources"]
== Additional resources
* xref:../logging/log_collection_forwarding/log-forwarding.adoc#cluster-logging-collector-log-forward-cloudwatch_log-forwarding[Forwarding logs to Amazon CloudWatch]
* xref:../logging/log_collection_forwarding/configuring-log-forwarding.adoc#cluster-logging-collector-log-forward-cloudwatch_configuring-log-forwarding[Forwarding logs to Amazon CloudWatch]
endif::[]

View File

@@ -16,7 +16,7 @@ include::modules/aws-cloudwatch.adoc[leveloffset=+1]
.Additional resources
* link:https://aws.amazon.com/cloudwatch/[Amazon CloudWatch product information]
* xref:../logging/log_collection_forwarding/log-forwarding.adoc#cluster-logging-collector-log-forward-cloudwatch_log-forwarding[Forwarding logs to Amazon CloudWatch]
* xref:../logging/log_collection_forwarding/configuring-log-forwarding.adoc#cluster-logging-collector-log-forward-cloudwatch_configuring-log-forwarding[Forwarding logs to Amazon CloudWatch]
include::modules/osd-rhoam.adoc[leveloffset=+1]

View File

@@ -31,7 +31,7 @@ include::modules/cluster-logging-about.adoc[leveloffset=+1]
ifdef::openshift-rosa,openshift-dedicated[]
include::modules/cluster-logging-cloudwatch.adoc[leveloffset=+1]
.Next steps
* See xref:../logging/log_collection_forwarding/log-forwarding.adoc#cluster-logging-collector-log-forward-cloudwatch_log-forwarding[Forwarding logs to Amazon CloudWatch] for instructions.
* See xref:../logging/log_collection_forwarding/configuring-log-forwarding.adoc#cluster-logging-collector-log-forward-cloudwatch_configuring-log-forwarding[Forwarding logs to Amazon CloudWatch] for instructions.
endif::[]
include::modules/cluster-logging-json-logging-about.adoc[leveloffset=+2]

View File

@@ -11,6 +11,7 @@ You can configure the Log Forwarding API to parse JSON strings into a structured
include::modules/cluster-logging-json-log-forwarding.adoc[leveloffset=+1]
include::modules/cluster-logging-configuration-of-json-log-data-for-default-elasticsearch.adoc[leveloffset=+1]
include::modules/cluster-logging-forwarding-json-logs-to-the-default-elasticsearch.adoc[leveloffset=+1]
include::modules/cluster-logging-forwarding-separate-indices.adoc[leveloffset=+1]
[role="_additional-resources"]
.Additional resources

View File

@@ -0,0 +1,73 @@
:_mod-docs-content-type: ASSEMBLY
include::_attributes/common-attributes.adoc[]
include::_attributes/attributes-openshift-dedicated.adoc[]
[id="configuring-log-forwarding"]
= Configuring log forwarding
:context: configuring-log-forwarding
toc::[]
By default, the {logging} sends container and infrastructure logs to the default internal log store defined in the `ClusterLogging` custom resource. However, it does not send audit logs to the internal store because it does not provide secure storage. If this default configuration meets your needs, you do not need to configure the Cluster Log Forwarder.
[NOTE]
====
To send audit logs to the internal Elasticsearch log store, use the Cluster Log Forwarder as described in xref:../../logging/log_storage/logging-config-es-store.adoc#cluster-logging-elasticsearch-audit_logging-config-es-store[Forwarding audit logs to the log store].
====
include::modules/cluster-logging-collector-log-forwarding-about.adoc[leveloffset=+1]
include::modules/logging-create-clf.adoc[leveloffset=+1]
include::modules/logging-multiline-except.adoc[leveloffset=+1]
ifndef::openshift-rosa[]
include::modules/cluster-logging-collector-log-forward-gcp.adoc[leveloffset=+1]
endif::openshift-rosa[]
include::modules/logging-forward-splunk.adoc[leveloffset=+1]
include::modules/logging-http-forward.adoc[leveloffset=+1]
include::modules/cluster-logging-collector-log-forward-project.adoc[leveloffset=+1]
include::modules/cluster-logging-collector-log-forward-logs-from-application-pods.adoc[leveloffset=+1]
[role="_additional-resources"]
.Additional resources
ifdef::openshift-enterprise,openshift-origin[]
* xref:../../networking/ovn_kubernetes_network_provider/logging-network-policy.adoc#logging-network-policy[Logging for egress firewall and network policy rules]
endif::[]
ifdef::openshift-rosa,openshift-dedicated[]
* link:https://docs.openshift.com/container-platform/latest/networking/ovn_kubernetes_network_provider/logging-network-policy.html#logging-network-policy[Logging for egress firewall and network policy rules]
endif::[]
include::modules/cluster-logging-collector-log-forward-loki.adoc[leveloffset=+1]
[role="_additional-resources"]
.Additional resources
* link:https://grafana.com/docs/loki/latest/configuration/[Configuring Loki server]
include::modules/cluster-logging-collector-log-forward-es.adoc[leveloffset=+1]
include::modules/cluster-logging-collector-log-forward-fluentd.adoc[leveloffset=+1]
include::modules/cluster-logging-collector-log-forward-syslog.adoc[leveloffset=+1]
include::modules/cluster-logging-collector-log-forward-kafka.adoc[leveloffset=+1]
// Cloudwatch docs
include::modules/cluster-logging-collector-log-forward-cloudwatch.adoc[leveloffset=+1]
include::modules/cluster-logging-collector-log-forward-secret-cloudwatch.adoc[leveloffset=+1]
ifdef::openshift-rosa[]
include::modules/rosa-cluster-logging-collector-log-forward-sts-cloudwatch.adoc[leveloffset=+1]
endif::[]
ifdef::openshift-enterprise,openshift-origin,openshift-dedicated[]
include::modules/cluster-logging-collector-log-forward-sts-cloudwatch.adoc[leveloffset=+1]
endif::[]
[role="_additional-resources"]
.Additional resources
* link:https://docs.aws.amazon.com/STS/latest/APIReference/welcome.html[AWS STS API Reference]

View File

@@ -0,0 +1,11 @@
:_mod-docs-content-type: ASSEMBLY
include::_attributes/common-attributes.adoc[]
include::_attributes/attributes-openshift-dedicated.adoc[]
[id="log-forwarding-troubleshooting"]
= Troubleshooting log forwarding
:context: log-forwarding-troubleshooting
toc::[]
include::modules/redeploying-fluentd-pods.adoc[leveloffset=+1]
include::modules/loki-rate-limit-errors.adoc[leveloffset=+1]

View File

@@ -26,10 +26,10 @@ Administrators can create `ClusterLogForwarder` resources that specify which log
Administrators can also authorize RBAC permissions that define which service accounts and users can access and forward which types of logs.
include::modules/log-forwarding-implementations.adoc[leveloffset=+1]
include::modules/log-forwarding-implementations.adoc[leveloffset=+2]
[id="log-forwarding-enabling-multi-clf-feature"]
== Enabling the multi log forwarder feature for a cluster
=== Enabling the multi log forwarder feature for a cluster
To use the multi log forwarder feature, you must create a service account and cluster role bindings for that service account. You can then reference the service account in the `ClusterLogForwarder` resource to control access permissions.
@@ -38,7 +38,7 @@ To use the multi log forwarder feature, you must create a service account and cl
In order to support multi log forwarding in additional namespaces other than the `openshift-logging` namespace, you must xref:../../logging/cluster-logging-upgrading.adoc#logging-operator-upgrading-all-ns_cluster-logging-upgrading[update the {clo} to watch all namespaces]. This functionality is supported by default in new {clo} version 5.8 installations.
====
include::modules/log-collection-rbac-permissions.adoc[leveloffset=+2]
include::modules/log-collection-rbac-permissions.adoc[leveloffset=+3]
[role="_additional-resources"]
.Additional resources
@@ -47,192 +47,3 @@ ifdef::openshift-enterprise[]
* xref:../../authentication/using-service-accounts-in-applications.adoc#using-service-accounts-in-applications[Using service accounts in applications]
endif::[]
* link:https://kubernetes.io/docs/reference/access-authn-authz/rbac/[Using RBAC Authorization Kubernetes documentation]
include::modules/logging-create-clf.adoc[leveloffset=+1]
include::modules/logging-multiline-except.adoc[leveloffset=+1]
[id="log-forwarding-audit-logs"]
== Sending audit logs to the internal log store
By default, the {logging} sends container and infrastructure logs to the default internal log store defined in the `ClusterLogging` custom resource. However, it does not send audit logs to the internal store because it does not provide secure storage. If this default configuration meets your needs, you do not need to configure the Cluster Log Forwarder.
[NOTE]
====
To send audit logs to the internal Elasticsearch log store, use the Cluster Log Forwarder as described in xref:../../logging/log_storage/logging-config-es-store.adoc#cluster-logging-elasticsearch-audit_logging-config-es-store[Forwarding audit logs to the log store].
====
include::modules/cluster-logging-collector-log-forwarding-about.adoc[leveloffset=+1]
include::modules/cluster-logging-forwarding-separate-indices.adoc[leveloffset=+1]
include::modules/cluster-logging-collector-log-forward-es.adoc[leveloffset=+1]
include::modules/cluster-logging-collector-log-forward-fluentd.adoc[leveloffset=+1]
include::modules/cluster-logging-collector-log-forward-syslog.adoc[leveloffset=+1]
include::modules/cluster-logging-collector-log-forward-kafka.adoc[leveloffset=+1]
include::modules/cluster-logging-collector-log-forward-cloudwatch.adoc[leveloffset=+1]
ifdef::openshift-rosa[]
include::modules/rosa-cluster-logging-collector-log-forward-sts-cloudwatch.adoc[leveloffset=+2]
endif::[]
ifdef::openshift-enterprise,openshift-origin,openshift-dedicated[]
[id="cluster-logging-collector-log-forward-sts-cloudwatch_{context}"]
=== Forwarding logs to Amazon CloudWatch from STS enabled clusters
For clusters with AWS Security Token Service (STS) enabled, you can create an AWS service account manually or create a credentials request by using the
ifdef::openshift-enterprise,openshift-origin[]
xref:../../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc[Cloud Credential Operator(CCO)]
endif::[]
ifdef::openshift-dedicated[]
link:https://docs.openshift.com/container-platform/latest/authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.html[Cloud Credential Operator(CCO)]
endif::[]
utility `ccoctl`.
.Prerequisites
* {logging-title-uc}: 5.5 and later
.Procedure
. Create a `CredentialsRequest` custom resource YAML by using the template below:
+
.CloudWatch credentials request template
[source,yaml]
----
apiVersion: cloudcredential.openshift.io/v1
kind: CredentialsRequest
metadata:
name: <your_role_name>-credrequest
namespace: openshift-cloud-credential-operator
spec:
providerSpec:
apiVersion: cloudcredential.openshift.io/v1
kind: AWSProviderSpec
statementEntries:
- action:
- logs:PutLogEvents
- logs:CreateLogGroup
- logs:PutRetentionPolicy
- logs:CreateLogStream
- logs:DescribeLogGroups
- logs:DescribeLogStreams
effect: Allow
resource: arn:aws:logs:*:*:*
secretRef:
name: <your_role_name>
namespace: openshift-logging
serviceAccountNames:
- logcollector
----
+
. Use the `ccoctl` command to create a role for AWS using your `CredentialsRequest` CR. With the `CredentialsRequest` object, this `ccoctl` command creates an IAM role with a trust policy that is tied to the specified OIDC identity provider, and a permissions policy that grants permissions to perform operations on CloudWatch resources. This command also creates a YAML configuration file in `/<path_to_ccoctl_output_dir>/manifests/openshift-logging-<your_role_name>-credentials.yaml`. This secret file contains the `role_arn` key/value used during authentication with the AWS IAM identity provider.
+
[source,terminal]
----
$ ccoctl aws create-iam-roles \
--name=<name> \
--region=<aws_region> \
--credentials-requests-dir=<path_to_directory_with_list_of_credentials_requests>/credrequests \
--identity-provider-arn=arn:aws:iam::<aws_account_id>:oidc-provider/<name>-oidc.s3.<aws_region>.amazonaws.com <1>
----
<1> <name> is the name used to tag your cloud resources and should match the name used during your STS cluster install
+
. Apply the secret created:
[source,terminal]
+
----
$ oc apply -f output/manifests/openshift-logging-<your_role_name>-credentials.yaml
----
+
. Create or edit a `ClusterLogForwarder` custom resource:
+
[source,yaml]
----
apiVersion: logging.openshift.io/v1
kind: ClusterLogForwarder
metadata:
name: <log_forwarder_name> <1>
namespace: <log_forwarder_namespace> <2>
spec:
serviceAccountName: clf-collector <3>
outputs:
- name: cw <4>
type: cloudwatch <5>
cloudwatch:
groupBy: logType <6>
groupPrefix: <group prefix> <7>
region: us-east-2 <8>
secret:
name: <your_secret_name> <9>
pipelines:
- name: to-cloudwatch <10>
inputRefs: <11>
- infrastructure
- audit
- application
outputRefs:
- cw <12>
----
<1> In legacy implementations, the CR name must be `instance`. In multi log forwarder implementations, you can use any name.
<2> In legacy implementations, the CR namespace must be `openshift-logging`. In multi log forwarder implementations, you can use any namespace.
<3> Specify the `clf-collector` service account. The service account is only required in multi log forwarder implementations if the log forwarder is not deployed in the `openshift-logging` namespace.
<4> Specify a name for the output.
<5> Specify the `cloudwatch` type.
<6> Optional: Specify how to group the logs:
+
* `logType` creates log groups for each log type.
* `namespaceName` creates a log group for each application name space. Infrastructure and audit logs are unaffected, remaining grouped by `logType`.
* `namespaceUUID` creates a new log groups for each application namespace UUID. It also creates separate log groups for infrastructure and audit logs.
<7> Optional: Specify a string to replace the default `infrastructureName` prefix in the names of the log groups.
<8> Specify the AWS region.
<9> Specify the name of the secret that contains your AWS credentials.
<10> Optional: Specify a name for the pipeline.
<11> Specify which log types to forward by using the pipeline: `application,` `infrastructure`, or `audit`.
<12> Specify the name of the output to use when forwarding logs with this pipeline.
endif::[]
[role="_additional-resources"]
.Additional resources
* link:https://docs.aws.amazon.com/STS/latest/APIReference/welcome.html[AWS STS API Reference]
include::modules/cluster-logging-collector-log-forward-secret-cloudwatch.adoc[leveloffset=+2]
include::modules/cluster-logging-collector-log-forward-loki.adoc[leveloffset=+1]
include::modules/loki-rate-limit-errors.adoc[leveloffset=+2]
[role="_additional-resources"]
.Additional resources
* xref:../../logging/cluster-logging-exported-fields.adoc#cluster-logging-exported-fields-kubernetes_cluster-logging-exported-fields[Log Record Fields]
* link:https://grafana.com/docs/loki/latest/configuration/[Configuring Loki server]
ifndef::openshift-rosa[]
include::modules/cluster-logging-collector-log-forward-gcp.adoc[leveloffset=+1]
endif::openshift-rosa[]
include::modules/logging-forward-splunk.adoc[leveloffset=+1]
include::modules/logging-http-forward.adoc[leveloffset=+1]
include::modules/cluster-logging-collector-log-forward-project.adoc[leveloffset=+1]
include::modules/cluster-logging-collector-log-forward-logs-from-application-pods.adoc[leveloffset=+1]
[role="_additional-resources"]
.Additional resources
ifdef::openshift-enterprise,openshift-origin[]
* xref:../../networking/ovn_kubernetes_network_provider/logging-network-policy.adoc#logging-network-policy[Logging for egress firewall and network policy rules]
endif::[]
ifdef::openshift-rosa,openshift-dedicated[]
* link:https://docs.openshift.com/container-platform/latest/networking/ovn_kubernetes_network_provider/logging-network-policy.html#logging-network-policy[Logging for egress firewall and network policy rules]
endif::[]
include::modules/cluster-logging-troubleshooting-log-forwarding.adoc[leveloffset=+1]

View File

@@ -7,7 +7,7 @@ include::_attributes/attributes-openshift-dedicated.adoc[]
toc::[]
You can use an internal Loki or Elasticsearch log store on your cluster for storing logs, or you can use a xref:../../logging/log_collection_forwarding/log-forwarding.adoc#logging-create-clf_log-forwarding[`ClusterLogForwarder` custom resource (CR)] to forward logs to an external store.
You can use an internal Loki or Elasticsearch log store on your cluster for storing logs, or you can use a xref:../../logging/log_collection_forwarding/configuring-log-forwarding.adoc#logging-create-clf_configuring-log-forwarding[`ClusterLogForwarder` custom resource (CR)] to forward logs to an external store.
[id="log-storage-overview-types"]
== Log storage types

View File

@@ -1,6 +1,6 @@
// Module included in the following assemblies:
//
// * logging/log_collection_forwarding/log-forwarding.adoc
// * logging/log_collection_forwarding/configuring-log-forwarding.adoc
:_mod-docs-content-type: PROCEDURE
[id="cluster-logging-collector-log-forward-gcp_{context}"]

View File

@@ -1,3 +1,7 @@
// Module included in the following assemblies:
//
// * logging/log_collection_forwarding/configuring-log-forwarding.adoc
:_mod-docs-content-type: PROCEDURE
[id="cluster-logging-collector-log-forward-logs-from-application-pods_{context}"]
= Forwarding application logs from specific pods

View File

@@ -1,3 +1,7 @@
// Module included in the following assemblies:
//
// * logging/log_collection_forwarding/configuring-log-forwarding.adoc
:_mod-docs-content-type: PROCEDURE
[id="cluster-logging-collector-log-forward-project_{context}"]
= Forwarding application logs from specific projects

View File

@@ -0,0 +1,118 @@
// Module included in the following assemblies:
//
// * logging/log_collection_forwarding/configuring-log-forwarding.adoc
:_mod-docs-content-type: PROCEDURE
[id="cluster-logging-collector-log-forward-sts-cloudwatch_{context}"]
= Forwarding logs to Amazon CloudWatch from STS enabled clusters
For clusters with AWS Security Token Service (STS) enabled, you can create an AWS service account manually or create a credentials request by using the
ifdef::openshift-enterprise,openshift-origin[]
xref:../../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc[Cloud Credential Operator(CCO)]
endif::[]
ifdef::openshift-dedicated[]
link:https://docs.openshift.com/container-platform/latest/authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.html[Cloud Credential Operator(CCO)]
endif::[]
utility `ccoctl`.
.Prerequisites
* {logging-title-uc}: 5.5 and later
.Procedure
. Create a `CredentialsRequest` custom resource YAML by using the template below:
+
.CloudWatch credentials request template
[source,yaml]
----
apiVersion: cloudcredential.openshift.io/v1
kind: CredentialsRequest
metadata:
name: <your_role_name>-credrequest
namespace: openshift-cloud-credential-operator
spec:
providerSpec:
apiVersion: cloudcredential.openshift.io/v1
kind: AWSProviderSpec
statementEntries:
- action:
- logs:PutLogEvents
- logs:CreateLogGroup
- logs:PutRetentionPolicy
- logs:CreateLogStream
- logs:DescribeLogGroups
- logs:DescribeLogStreams
effect: Allow
resource: arn:aws:logs:*:*:*
secretRef:
name: <your_role_name>
namespace: openshift-logging
serviceAccountNames:
- logcollector
----
+
. Use the `ccoctl` command to create a role for AWS using your `CredentialsRequest` CR. With the `CredentialsRequest` object, this `ccoctl` command creates an IAM role with a trust policy that is tied to the specified OIDC identity provider, and a permissions policy that grants permissions to perform operations on CloudWatch resources. This command also creates a YAML configuration file in `/<path_to_ccoctl_output_dir>/manifests/openshift-logging-<your_role_name>-credentials.yaml`. This secret file contains the `role_arn` key/value used during authentication with the AWS IAM identity provider.
+
[source,terminal]
----
$ ccoctl aws create-iam-roles \
--name=<name> \
--region=<aws_region> \
--credentials-requests-dir=<path_to_directory_with_list_of_credentials_requests>/credrequests \
--identity-provider-arn=arn:aws:iam::<aws_account_id>:oidc-provider/<name>-oidc.s3.<aws_region>.amazonaws.com <1>
----
<1> <name> is the name used to tag your cloud resources and should match the name used during your STS cluster install
+
. Apply the secret created:
[source,terminal]
+
----
$ oc apply -f output/manifests/openshift-logging-<your_role_name>-credentials.yaml
----
+
. Create or edit a `ClusterLogForwarder` custom resource:
+
[source,yaml]
----
apiVersion: logging.openshift.io/v1
kind: ClusterLogForwarder
metadata:
name: <log_forwarder_name> <1>
namespace: <log_forwarder_namespace> <2>
spec:
serviceAccountName: clf-collector <3>
outputs:
- name: cw <4>
type: cloudwatch <5>
cloudwatch:
groupBy: logType <6>
groupPrefix: <group prefix> <7>
region: us-east-2 <8>
secret:
name: <your_secret_name> <9>
pipelines:
- name: to-cloudwatch <10>
inputRefs: <11>
- infrastructure
- audit
- application
outputRefs:
- cw <12>
----
<1> In legacy implementations, the CR name must be `instance`. In multi log forwarder implementations, you can use any name.
<2> In legacy implementations, the CR namespace must be `openshift-logging`. In multi log forwarder implementations, you can use any namespace.
<3> Specify the `clf-collector` service account. The service account is only required in multi log forwarder implementations if the log forwarder is not deployed in the `openshift-logging` namespace.
<4> Specify a name for the output.
<5> Specify the `cloudwatch` type.
<6> Optional: Specify how to group the logs:
+
* `logType` creates log groups for each log type.
* `namespaceName` creates a log group for each application name space. Infrastructure and audit logs are unaffected, remaining grouped by `logType`.
* `namespaceUUID` creates a new log groups for each application namespace UUID. It also creates separate log groups for infrastructure and audit logs.
<7> Optional: Specify a string to replace the default `infrastructureName` prefix in the names of the log groups.
<8> Specify the AWS region.
<9> Specify the name of the secret that contains your AWS credentials.
<10> Optional: Specify a name for the pipeline.
<11> Specify which log types to forward by using the pipeline: `application,` `infrastructure`, or `audit`.
<12> Specify the name of the output to use when forwarding logs with this pipeline.

View File

@@ -1,5 +1,6 @@
// Module included in the following assemblies:
// * logging/log_collection_forwarding/log-forwarding.adoc
//
// * logging/log_collection_forwarding/configuring-log-forwarding.adoc
:_mod-docs-content-type: PROCEDURE
[id="logging-forward-splunk_{context}"]

View File

@@ -1,5 +1,6 @@
// Module included in the following assemblies:
// * logging/log_collection_forwarding/log-forwarding
//
// * logging/log_collection_forwarding/configuring-log-forwarding.adoc
:_mod-docs-content-type: PROCEDURE
[id="logging-http-forward_{context}"]

View File

@@ -1,7 +1,10 @@
// Module included in the following assemblies:
//
// * logging/log_collection_forwarding/log-forwarding-troubleshooting.adoc
:_mod-docs-content-type: PROCEDURE
[id="cluster-logging-troubleshooting-log-forwarding_{context}"]
= Troubleshooting log forwarding
[id="redeploying-fluentd-pods_{context}"]
= Redeploying Fluentd pods
When you create a `ClusterLogForwarder` custom resource (CR), if the {clo} does not redeploy the Fluentd pods automatically, you can delete the Fluentd pods to force them to redeploy.
@@ -11,7 +14,7 @@ When you create a `ClusterLogForwarder` custom resource (CR), if the {clo} does
.Procedure
* Delete the Fluentd pods to force them to redeploy.
* Delete the Fluentd pods to force them to redeploy by running the following command:
+
[source,terminal]
----