diff --git a/_topic_maps/_topic_map.yml b/_topic_maps/_topic_map.yml index 5759574078..017c7ea9fa 100644 --- a/_topic_maps/_topic_map.yml +++ b/_topic_maps/_topic_map.yml @@ -2586,10 +2586,14 @@ Topics: File: logging-output-types - Name: Enabling JSON log forwarding File: cluster-logging-enabling-json-logging + - Name: Configuring log forwarding + File: configuring-log-forwarding - Name: Configuring the logging collector File: cluster-logging-collector - Name: Collecting and storing Kubernetes events File: cluster-logging-eventrouter + - Name: Troubleshooting log forwarding + File: log-forwarding-troubleshooting - Name: Log storage Dir: log_storage Topics: diff --git a/_topic_maps/_topic_map_osd.yml b/_topic_maps/_topic_map_osd.yml index 45eecd6a71..38fa69e3d1 100644 --- a/_topic_maps/_topic_map_osd.yml +++ b/_topic_maps/_topic_map_osd.yml @@ -1072,10 +1072,14 @@ Topics: File: logging-output-types - Name: Enabling JSON log forwarding File: cluster-logging-enabling-json-logging + - Name: Configuring log forwarding + File: configuring-log-forwarding - Name: Configuring the logging collector File: cluster-logging-collector - Name: Collecting and storing Kubernetes events File: cluster-logging-eventrouter + - Name: Troubleshooting log forwarding + File: log-forwarding-troubleshooting - Name: Log storage Dir: log_storage Topics: diff --git a/_topic_maps/_topic_map_rosa.yml b/_topic_maps/_topic_map_rosa.yml index d2ea99a0cd..a6a0f14c0d 100644 --- a/_topic_maps/_topic_map_rosa.yml +++ b/_topic_maps/_topic_map_rosa.yml @@ -1302,10 +1302,14 @@ Topics: File: logging-output-types - Name: Enabling JSON log forwarding File: cluster-logging-enabling-json-logging + - Name: Configuring log forwarding + File: configuring-log-forwarding - Name: Configuring the logging collector File: cluster-logging-collector - Name: Collecting and storing Kubernetes events File: cluster-logging-eventrouter + - Name: Troubleshooting log forwarding + File: log-forwarding-troubleshooting - Name: Log storage Dir: log_storage Topics: diff --git a/adding_service_cluster/adding-service.adoc b/adding_service_cluster/adding-service.adoc index 1831dc7c6c..b5684aa306 100644 --- a/adding_service_cluster/adding-service.adoc +++ b/adding_service_cluster/adding-service.adoc @@ -25,5 +25,5 @@ include::modules/deleting-service.adoc[leveloffset=+1] ifdef::openshift-rosa[] [role="_additional-resources"] == Additional resources -* xref:../logging/log_collection_forwarding/log-forwarding.adoc#cluster-logging-collector-log-forward-cloudwatch_log-forwarding[Forwarding logs to Amazon CloudWatch] +* xref:../logging/log_collection_forwarding/configuring-log-forwarding.adoc#cluster-logging-collector-log-forward-cloudwatch_configuring-log-forwarding[Forwarding logs to Amazon CloudWatch] endif::[] diff --git a/adding_service_cluster/rosa-available-services.adoc b/adding_service_cluster/rosa-available-services.adoc index 8dfa266869..feec8febc4 100644 --- a/adding_service_cluster/rosa-available-services.adoc +++ b/adding_service_cluster/rosa-available-services.adoc @@ -16,7 +16,7 @@ include::modules/aws-cloudwatch.adoc[leveloffset=+1] .Additional resources * link:https://aws.amazon.com/cloudwatch/[Amazon CloudWatch product information] -* xref:../logging/log_collection_forwarding/log-forwarding.adoc#cluster-logging-collector-log-forward-cloudwatch_log-forwarding[Forwarding logs to Amazon CloudWatch] +* xref:../logging/log_collection_forwarding/configuring-log-forwarding.adoc#cluster-logging-collector-log-forward-cloudwatch_configuring-log-forwarding[Forwarding logs to Amazon CloudWatch] include::modules/osd-rhoam.adoc[leveloffset=+1] diff --git a/logging/cluster-logging.adoc b/logging/cluster-logging.adoc index ecb6fcbb6a..a8b2dc6ef1 100644 --- a/logging/cluster-logging.adoc +++ b/logging/cluster-logging.adoc @@ -31,7 +31,7 @@ include::modules/cluster-logging-about.adoc[leveloffset=+1] ifdef::openshift-rosa,openshift-dedicated[] include::modules/cluster-logging-cloudwatch.adoc[leveloffset=+1] .Next steps -* See xref:../logging/log_collection_forwarding/log-forwarding.adoc#cluster-logging-collector-log-forward-cloudwatch_log-forwarding[Forwarding logs to Amazon CloudWatch] for instructions. +* See xref:../logging/log_collection_forwarding/configuring-log-forwarding.adoc#cluster-logging-collector-log-forward-cloudwatch_configuring-log-forwarding[Forwarding logs to Amazon CloudWatch] for instructions. endif::[] include::modules/cluster-logging-json-logging-about.adoc[leveloffset=+2] diff --git a/logging/log_collection_forwarding/cluster-logging-enabling-json-logging.adoc b/logging/log_collection_forwarding/cluster-logging-enabling-json-logging.adoc index e5e01666b3..52f93e1e64 100644 --- a/logging/log_collection_forwarding/cluster-logging-enabling-json-logging.adoc +++ b/logging/log_collection_forwarding/cluster-logging-enabling-json-logging.adoc @@ -11,6 +11,7 @@ You can configure the Log Forwarding API to parse JSON strings into a structured include::modules/cluster-logging-json-log-forwarding.adoc[leveloffset=+1] include::modules/cluster-logging-configuration-of-json-log-data-for-default-elasticsearch.adoc[leveloffset=+1] include::modules/cluster-logging-forwarding-json-logs-to-the-default-elasticsearch.adoc[leveloffset=+1] +include::modules/cluster-logging-forwarding-separate-indices.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources diff --git a/logging/log_collection_forwarding/configuring-log-forwarding.adoc b/logging/log_collection_forwarding/configuring-log-forwarding.adoc new file mode 100644 index 0000000000..dc278ff09c --- /dev/null +++ b/logging/log_collection_forwarding/configuring-log-forwarding.adoc @@ -0,0 +1,73 @@ +:_mod-docs-content-type: ASSEMBLY +include::_attributes/common-attributes.adoc[] +include::_attributes/attributes-openshift-dedicated.adoc[] +[id="configuring-log-forwarding"] += Configuring log forwarding +:context: configuring-log-forwarding + +toc::[] + +By default, the {logging} sends container and infrastructure logs to the default internal log store defined in the `ClusterLogging` custom resource. However, it does not send audit logs to the internal store because it does not provide secure storage. If this default configuration meets your needs, you do not need to configure the Cluster Log Forwarder. + +[NOTE] +==== +To send audit logs to the internal Elasticsearch log store, use the Cluster Log Forwarder as described in xref:../../logging/log_storage/logging-config-es-store.adoc#cluster-logging-elasticsearch-audit_logging-config-es-store[Forwarding audit logs to the log store]. +==== + +include::modules/cluster-logging-collector-log-forwarding-about.adoc[leveloffset=+1] + +include::modules/logging-create-clf.adoc[leveloffset=+1] + +include::modules/logging-multiline-except.adoc[leveloffset=+1] + +ifndef::openshift-rosa[] +include::modules/cluster-logging-collector-log-forward-gcp.adoc[leveloffset=+1] +endif::openshift-rosa[] + +include::modules/logging-forward-splunk.adoc[leveloffset=+1] + +include::modules/logging-http-forward.adoc[leveloffset=+1] + +include::modules/cluster-logging-collector-log-forward-project.adoc[leveloffset=+1] + +include::modules/cluster-logging-collector-log-forward-logs-from-application-pods.adoc[leveloffset=+1] + +[role="_additional-resources"] +.Additional resources + +ifdef::openshift-enterprise,openshift-origin[] +* xref:../../networking/ovn_kubernetes_network_provider/logging-network-policy.adoc#logging-network-policy[Logging for egress firewall and network policy rules] +endif::[] +ifdef::openshift-rosa,openshift-dedicated[] +* link:https://docs.openshift.com/container-platform/latest/networking/ovn_kubernetes_network_provider/logging-network-policy.html#logging-network-policy[Logging for egress firewall and network policy rules] +endif::[] + +include::modules/cluster-logging-collector-log-forward-loki.adoc[leveloffset=+1] + +[role="_additional-resources"] +.Additional resources +* link:https://grafana.com/docs/loki/latest/configuration/[Configuring Loki server] + +include::modules/cluster-logging-collector-log-forward-es.adoc[leveloffset=+1] + +include::modules/cluster-logging-collector-log-forward-fluentd.adoc[leveloffset=+1] + +include::modules/cluster-logging-collector-log-forward-syslog.adoc[leveloffset=+1] + +include::modules/cluster-logging-collector-log-forward-kafka.adoc[leveloffset=+1] + +// Cloudwatch docs +include::modules/cluster-logging-collector-log-forward-cloudwatch.adoc[leveloffset=+1] +include::modules/cluster-logging-collector-log-forward-secret-cloudwatch.adoc[leveloffset=+1] + +ifdef::openshift-rosa[] +include::modules/rosa-cluster-logging-collector-log-forward-sts-cloudwatch.adoc[leveloffset=+1] +endif::[] + +ifdef::openshift-enterprise,openshift-origin,openshift-dedicated[] +include::modules/cluster-logging-collector-log-forward-sts-cloudwatch.adoc[leveloffset=+1] +endif::[] + +[role="_additional-resources"] +.Additional resources +* link:https://docs.aws.amazon.com/STS/latest/APIReference/welcome.html[AWS STS API Reference] diff --git a/logging/log_collection_forwarding/log-forwarding-troubleshooting.adoc b/logging/log_collection_forwarding/log-forwarding-troubleshooting.adoc new file mode 100644 index 0000000000..2e345a01f1 --- /dev/null +++ b/logging/log_collection_forwarding/log-forwarding-troubleshooting.adoc @@ -0,0 +1,11 @@ +:_mod-docs-content-type: ASSEMBLY +include::_attributes/common-attributes.adoc[] +include::_attributes/attributes-openshift-dedicated.adoc[] +[id="log-forwarding-troubleshooting"] += Troubleshooting log forwarding +:context: log-forwarding-troubleshooting + +toc::[] + +include::modules/redeploying-fluentd-pods.adoc[leveloffset=+1] +include::modules/loki-rate-limit-errors.adoc[leveloffset=+1] diff --git a/logging/log_collection_forwarding/log-forwarding.adoc b/logging/log_collection_forwarding/log-forwarding.adoc index e1e347b075..47ec3d4d0c 100644 --- a/logging/log_collection_forwarding/log-forwarding.adoc +++ b/logging/log_collection_forwarding/log-forwarding.adoc @@ -26,10 +26,10 @@ Administrators can create `ClusterLogForwarder` resources that specify which log Administrators can also authorize RBAC permissions that define which service accounts and users can access and forward which types of logs. -include::modules/log-forwarding-implementations.adoc[leveloffset=+1] +include::modules/log-forwarding-implementations.adoc[leveloffset=+2] [id="log-forwarding-enabling-multi-clf-feature"] -== Enabling the multi log forwarder feature for a cluster +=== Enabling the multi log forwarder feature for a cluster To use the multi log forwarder feature, you must create a service account and cluster role bindings for that service account. You can then reference the service account in the `ClusterLogForwarder` resource to control access permissions. @@ -38,7 +38,7 @@ To use the multi log forwarder feature, you must create a service account and cl In order to support multi log forwarding in additional namespaces other than the `openshift-logging` namespace, you must xref:../../logging/cluster-logging-upgrading.adoc#logging-operator-upgrading-all-ns_cluster-logging-upgrading[update the {clo} to watch all namespaces]. This functionality is supported by default in new {clo} version 5.8 installations. ==== -include::modules/log-collection-rbac-permissions.adoc[leveloffset=+2] +include::modules/log-collection-rbac-permissions.adoc[leveloffset=+3] [role="_additional-resources"] .Additional resources @@ -47,192 +47,3 @@ ifdef::openshift-enterprise[] * xref:../../authentication/using-service-accounts-in-applications.adoc#using-service-accounts-in-applications[Using service accounts in applications] endif::[] * link:https://kubernetes.io/docs/reference/access-authn-authz/rbac/[Using RBAC Authorization Kubernetes documentation] - -include::modules/logging-create-clf.adoc[leveloffset=+1] - -include::modules/logging-multiline-except.adoc[leveloffset=+1] - -[id="log-forwarding-audit-logs"] -== Sending audit logs to the internal log store - -By default, the {logging} sends container and infrastructure logs to the default internal log store defined in the `ClusterLogging` custom resource. However, it does not send audit logs to the internal store because it does not provide secure storage. If this default configuration meets your needs, you do not need to configure the Cluster Log Forwarder. - -[NOTE] -==== -To send audit logs to the internal Elasticsearch log store, use the Cluster Log Forwarder as described in xref:../../logging/log_storage/logging-config-es-store.adoc#cluster-logging-elasticsearch-audit_logging-config-es-store[Forwarding audit logs to the log store]. -==== - -include::modules/cluster-logging-collector-log-forwarding-about.adoc[leveloffset=+1] - -include::modules/cluster-logging-forwarding-separate-indices.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forward-es.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forward-fluentd.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forward-syslog.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forward-kafka.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forward-cloudwatch.adoc[leveloffset=+1] - -ifdef::openshift-rosa[] -include::modules/rosa-cluster-logging-collector-log-forward-sts-cloudwatch.adoc[leveloffset=+2] -endif::[] - -ifdef::openshift-enterprise,openshift-origin,openshift-dedicated[] -[id="cluster-logging-collector-log-forward-sts-cloudwatch_{context}"] -=== Forwarding logs to Amazon CloudWatch from STS enabled clusters - -For clusters with AWS Security Token Service (STS) enabled, you can create an AWS service account manually or create a credentials request by using the -ifdef::openshift-enterprise,openshift-origin[] -xref:../../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc[Cloud Credential Operator(CCO)] -endif::[] -ifdef::openshift-dedicated[] -link:https://docs.openshift.com/container-platform/latest/authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.html[Cloud Credential Operator(CCO)] -endif::[] - utility `ccoctl`. - -.Prerequisites - -* {logging-title-uc}: 5.5 and later - -.Procedure - -. Create a `CredentialsRequest` custom resource YAML by using the template below: -+ -.CloudWatch credentials request template -[source,yaml] ----- -apiVersion: cloudcredential.openshift.io/v1 -kind: CredentialsRequest -metadata: - name: -credrequest - namespace: openshift-cloud-credential-operator -spec: - providerSpec: - apiVersion: cloudcredential.openshift.io/v1 - kind: AWSProviderSpec - statementEntries: - - action: - - logs:PutLogEvents - - logs:CreateLogGroup - - logs:PutRetentionPolicy - - logs:CreateLogStream - - logs:DescribeLogGroups - - logs:DescribeLogStreams - effect: Allow - resource: arn:aws:logs:*:*:* - secretRef: - name: - namespace: openshift-logging - serviceAccountNames: - - logcollector ----- -+ -. Use the `ccoctl` command to create a role for AWS using your `CredentialsRequest` CR. With the `CredentialsRequest` object, this `ccoctl` command creates an IAM role with a trust policy that is tied to the specified OIDC identity provider, and a permissions policy that grants permissions to perform operations on CloudWatch resources. This command also creates a YAML configuration file in `//manifests/openshift-logging--credentials.yaml`. This secret file contains the `role_arn` key/value used during authentication with the AWS IAM identity provider. -+ -[source,terminal] ----- -$ ccoctl aws create-iam-roles \ ---name= \ ---region= \ ---credentials-requests-dir=/credrequests \ ---identity-provider-arn=arn:aws:iam:::oidc-provider/-oidc.s3..amazonaws.com <1> ----- -<1> is the name used to tag your cloud resources and should match the name used during your STS cluster install -+ -. Apply the secret created: -[source,terminal] -+ ----- -$ oc apply -f output/manifests/openshift-logging--credentials.yaml ----- -+ -. Create or edit a `ClusterLogForwarder` custom resource: -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: <1> - namespace: <2> -spec: - serviceAccountName: clf-collector <3> - outputs: - - name: cw <4> - type: cloudwatch <5> - cloudwatch: - groupBy: logType <6> - groupPrefix: <7> - region: us-east-2 <8> - secret: - name: <9> - pipelines: - - name: to-cloudwatch <10> - inputRefs: <11> - - infrastructure - - audit - - application - outputRefs: - - cw <12> ----- -<1> In legacy implementations, the CR name must be `instance`. In multi log forwarder implementations, you can use any name. -<2> In legacy implementations, the CR namespace must be `openshift-logging`. In multi log forwarder implementations, you can use any namespace. -<3> Specify the `clf-collector` service account. The service account is only required in multi log forwarder implementations if the log forwarder is not deployed in the `openshift-logging` namespace. -<4> Specify a name for the output. -<5> Specify the `cloudwatch` type. -<6> Optional: Specify how to group the logs: -+ -* `logType` creates log groups for each log type. -* `namespaceName` creates a log group for each application name space. Infrastructure and audit logs are unaffected, remaining grouped by `logType`. -* `namespaceUUID` creates a new log groups for each application namespace UUID. It also creates separate log groups for infrastructure and audit logs. -<7> Optional: Specify a string to replace the default `infrastructureName` prefix in the names of the log groups. -<8> Specify the AWS region. -<9> Specify the name of the secret that contains your AWS credentials. -<10> Optional: Specify a name for the pipeline. -<11> Specify which log types to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -<12> Specify the name of the output to use when forwarding logs with this pipeline. -endif::[] - -[role="_additional-resources"] -.Additional resources -* link:https://docs.aws.amazon.com/STS/latest/APIReference/welcome.html[AWS STS API Reference] - -include::modules/cluster-logging-collector-log-forward-secret-cloudwatch.adoc[leveloffset=+2] - -include::modules/cluster-logging-collector-log-forward-loki.adoc[leveloffset=+1] - -include::modules/loki-rate-limit-errors.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../logging/cluster-logging-exported-fields.adoc#cluster-logging-exported-fields-kubernetes_cluster-logging-exported-fields[Log Record Fields] - -* link:https://grafana.com/docs/loki/latest/configuration/[Configuring Loki server] - -ifndef::openshift-rosa[] -include::modules/cluster-logging-collector-log-forward-gcp.adoc[leveloffset=+1] -endif::openshift-rosa[] - -include::modules/logging-forward-splunk.adoc[leveloffset=+1] - -include::modules/logging-http-forward.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forward-project.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forward-logs-from-application-pods.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -ifdef::openshift-enterprise,openshift-origin[] -* xref:../../networking/ovn_kubernetes_network_provider/logging-network-policy.adoc#logging-network-policy[Logging for egress firewall and network policy rules] -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -* link:https://docs.openshift.com/container-platform/latest/networking/ovn_kubernetes_network_provider/logging-network-policy.html#logging-network-policy[Logging for egress firewall and network policy rules] -endif::[] - -include::modules/cluster-logging-troubleshooting-log-forwarding.adoc[leveloffset=+1] diff --git a/logging/log_storage/about-log-storage.adoc b/logging/log_storage/about-log-storage.adoc index ebbe734f2c..c09b3c8c84 100644 --- a/logging/log_storage/about-log-storage.adoc +++ b/logging/log_storage/about-log-storage.adoc @@ -7,7 +7,7 @@ include::_attributes/attributes-openshift-dedicated.adoc[] toc::[] -You can use an internal Loki or Elasticsearch log store on your cluster for storing logs, or you can use a xref:../../logging/log_collection_forwarding/log-forwarding.adoc#logging-create-clf_log-forwarding[`ClusterLogForwarder` custom resource (CR)] to forward logs to an external store. +You can use an internal Loki or Elasticsearch log store on your cluster for storing logs, or you can use a xref:../../logging/log_collection_forwarding/configuring-log-forwarding.adoc#logging-create-clf_configuring-log-forwarding[`ClusterLogForwarder` custom resource (CR)] to forward logs to an external store. [id="log-storage-overview-types"] == Log storage types diff --git a/modules/cluster-logging-collector-log-forward-gcp.adoc b/modules/cluster-logging-collector-log-forward-gcp.adoc index 3615bf44a7..70a4170ef3 100644 --- a/modules/cluster-logging-collector-log-forward-gcp.adoc +++ b/modules/cluster-logging-collector-log-forward-gcp.adoc @@ -1,6 +1,6 @@ // Module included in the following assemblies: // -// * logging/log_collection_forwarding/log-forwarding.adoc +// * logging/log_collection_forwarding/configuring-log-forwarding.adoc :_mod-docs-content-type: PROCEDURE [id="cluster-logging-collector-log-forward-gcp_{context}"] diff --git a/modules/cluster-logging-collector-log-forward-logs-from-application-pods.adoc b/modules/cluster-logging-collector-log-forward-logs-from-application-pods.adoc index 12657bb14e..285ea042d5 100644 --- a/modules/cluster-logging-collector-log-forward-logs-from-application-pods.adoc +++ b/modules/cluster-logging-collector-log-forward-logs-from-application-pods.adoc @@ -1,3 +1,7 @@ +// Module included in the following assemblies: +// +// * logging/log_collection_forwarding/configuring-log-forwarding.adoc + :_mod-docs-content-type: PROCEDURE [id="cluster-logging-collector-log-forward-logs-from-application-pods_{context}"] = Forwarding application logs from specific pods diff --git a/modules/cluster-logging-collector-log-forward-project.adoc b/modules/cluster-logging-collector-log-forward-project.adoc index 96d7901f21..378e780a53 100644 --- a/modules/cluster-logging-collector-log-forward-project.adoc +++ b/modules/cluster-logging-collector-log-forward-project.adoc @@ -1,3 +1,7 @@ +// Module included in the following assemblies: +// +// * logging/log_collection_forwarding/configuring-log-forwarding.adoc + :_mod-docs-content-type: PROCEDURE [id="cluster-logging-collector-log-forward-project_{context}"] = Forwarding application logs from specific projects diff --git a/modules/cluster-logging-collector-log-forward-sts-cloudwatch.adoc b/modules/cluster-logging-collector-log-forward-sts-cloudwatch.adoc new file mode 100644 index 0000000000..0549fddb79 --- /dev/null +++ b/modules/cluster-logging-collector-log-forward-sts-cloudwatch.adoc @@ -0,0 +1,118 @@ +// Module included in the following assemblies: +// +// * logging/log_collection_forwarding/configuring-log-forwarding.adoc + +:_mod-docs-content-type: PROCEDURE +[id="cluster-logging-collector-log-forward-sts-cloudwatch_{context}"] += Forwarding logs to Amazon CloudWatch from STS enabled clusters + +For clusters with AWS Security Token Service (STS) enabled, you can create an AWS service account manually or create a credentials request by using the +ifdef::openshift-enterprise,openshift-origin[] +xref:../../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc[Cloud Credential Operator(CCO)] +endif::[] +ifdef::openshift-dedicated[] +link:https://docs.openshift.com/container-platform/latest/authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.html[Cloud Credential Operator(CCO)] +endif::[] + utility `ccoctl`. + +.Prerequisites + +* {logging-title-uc}: 5.5 and later + +.Procedure + +. Create a `CredentialsRequest` custom resource YAML by using the template below: ++ +.CloudWatch credentials request template +[source,yaml] +---- +apiVersion: cloudcredential.openshift.io/v1 +kind: CredentialsRequest +metadata: + name: -credrequest + namespace: openshift-cloud-credential-operator +spec: + providerSpec: + apiVersion: cloudcredential.openshift.io/v1 + kind: AWSProviderSpec + statementEntries: + - action: + - logs:PutLogEvents + - logs:CreateLogGroup + - logs:PutRetentionPolicy + - logs:CreateLogStream + - logs:DescribeLogGroups + - logs:DescribeLogStreams + effect: Allow + resource: arn:aws:logs:*:*:* + secretRef: + name: + namespace: openshift-logging + serviceAccountNames: + - logcollector +---- ++ +. Use the `ccoctl` command to create a role for AWS using your `CredentialsRequest` CR. With the `CredentialsRequest` object, this `ccoctl` command creates an IAM role with a trust policy that is tied to the specified OIDC identity provider, and a permissions policy that grants permissions to perform operations on CloudWatch resources. This command also creates a YAML configuration file in `//manifests/openshift-logging--credentials.yaml`. This secret file contains the `role_arn` key/value used during authentication with the AWS IAM identity provider. ++ +[source,terminal] +---- +$ ccoctl aws create-iam-roles \ +--name= \ +--region= \ +--credentials-requests-dir=/credrequests \ +--identity-provider-arn=arn:aws:iam:::oidc-provider/-oidc.s3..amazonaws.com <1> +---- +<1> is the name used to tag your cloud resources and should match the name used during your STS cluster install ++ +. Apply the secret created: +[source,terminal] ++ +---- +$ oc apply -f output/manifests/openshift-logging--credentials.yaml +---- ++ +. Create or edit a `ClusterLogForwarder` custom resource: ++ +[source,yaml] +---- +apiVersion: logging.openshift.io/v1 +kind: ClusterLogForwarder +metadata: + name: <1> + namespace: <2> +spec: + serviceAccountName: clf-collector <3> + outputs: + - name: cw <4> + type: cloudwatch <5> + cloudwatch: + groupBy: logType <6> + groupPrefix: <7> + region: us-east-2 <8> + secret: + name: <9> + pipelines: + - name: to-cloudwatch <10> + inputRefs: <11> + - infrastructure + - audit + - application + outputRefs: + - cw <12> +---- +<1> In legacy implementations, the CR name must be `instance`. In multi log forwarder implementations, you can use any name. +<2> In legacy implementations, the CR namespace must be `openshift-logging`. In multi log forwarder implementations, you can use any namespace. +<3> Specify the `clf-collector` service account. The service account is only required in multi log forwarder implementations if the log forwarder is not deployed in the `openshift-logging` namespace. +<4> Specify a name for the output. +<5> Specify the `cloudwatch` type. +<6> Optional: Specify how to group the logs: ++ +* `logType` creates log groups for each log type. +* `namespaceName` creates a log group for each application name space. Infrastructure and audit logs are unaffected, remaining grouped by `logType`. +* `namespaceUUID` creates a new log groups for each application namespace UUID. It also creates separate log groups for infrastructure and audit logs. +<7> Optional: Specify a string to replace the default `infrastructureName` prefix in the names of the log groups. +<8> Specify the AWS region. +<9> Specify the name of the secret that contains your AWS credentials. +<10> Optional: Specify a name for the pipeline. +<11> Specify which log types to forward by using the pipeline: `application,` `infrastructure`, or `audit`. +<12> Specify the name of the output to use when forwarding logs with this pipeline. diff --git a/modules/logging-forward-splunk.adoc b/modules/logging-forward-splunk.adoc index 4674ed0abd..0d304595c5 100644 --- a/modules/logging-forward-splunk.adoc +++ b/modules/logging-forward-splunk.adoc @@ -1,5 +1,6 @@ // Module included in the following assemblies: -// * logging/log_collection_forwarding/log-forwarding.adoc +// +// * logging/log_collection_forwarding/configuring-log-forwarding.adoc :_mod-docs-content-type: PROCEDURE [id="logging-forward-splunk_{context}"] diff --git a/modules/logging-http-forward.adoc b/modules/logging-http-forward.adoc index cdd74f7fa9..86a9e98c1a 100644 --- a/modules/logging-http-forward.adoc +++ b/modules/logging-http-forward.adoc @@ -1,5 +1,6 @@ // Module included in the following assemblies: -// * logging/log_collection_forwarding/log-forwarding +// +// * logging/log_collection_forwarding/configuring-log-forwarding.adoc :_mod-docs-content-type: PROCEDURE [id="logging-http-forward_{context}"] diff --git a/modules/cluster-logging-troubleshooting-log-forwarding.adoc b/modules/redeploying-fluentd-pods.adoc similarity index 58% rename from modules/cluster-logging-troubleshooting-log-forwarding.adoc rename to modules/redeploying-fluentd-pods.adoc index 2a54928a02..59d76fa53f 100644 --- a/modules/cluster-logging-troubleshooting-log-forwarding.adoc +++ b/modules/redeploying-fluentd-pods.adoc @@ -1,7 +1,10 @@ +// Module included in the following assemblies: +// +// * logging/log_collection_forwarding/log-forwarding-troubleshooting.adoc :_mod-docs-content-type: PROCEDURE -[id="cluster-logging-troubleshooting-log-forwarding_{context}"] -= Troubleshooting log forwarding +[id="redeploying-fluentd-pods_{context}"] += Redeploying Fluentd pods When you create a `ClusterLogForwarder` custom resource (CR), if the {clo} does not redeploy the Fluentd pods automatically, you can delete the Fluentd pods to force them to redeploy. @@ -11,7 +14,7 @@ When you create a `ClusterLogForwarder` custom resource (CR), if the {clo} does .Procedure -* Delete the Fluentd pods to force them to redeploy. +* Delete the Fluentd pods to force them to redeploy by running the following command: + [source,terminal] ----