mirror of
https://github.com/openshift/openshift-docs.git
synced 2026-02-07 00:48:01 +01:00
- Indentation fault in Configuring CPU and memory limits for logging components in Documentation. - Here is the documentation link: https://docs.openshift.com/container-platform/4.14/observability/logging/config/cluster-logging-memory.html#cluster-logging-memory-limits_cluster-logging-memory - Here, `spec.collection` indentation is wrongly mentioned. ~~~ apiVersion: "logging.openshift.io/v1" kind: "ClusterLogging" metadata: name: "instance" namespace: openshift-logging spec: collection: <<== collection indentation is wrongly mentioned resources: limits: memory: 736Mi requests: cpu: 200m memory: 736Mi type: fluentd ~~~ - Due to this `spec.collection` will not get applied in the ClusterLogging instance CR. - The correct configuration should look like the following: ~~~ apiVersion: "logging.openshift.io/v1" kind: "ClusterLogging" metadata: name: "instance" namespace: openshift-logging spec: collection: <<== Here is the correct indentation resources: limits: memory: 736Mi requests: cpu: 200m memory: 736Mi type: fluentd ~~~
76 lines
1.8 KiB
Plaintext
76 lines
1.8 KiB
Plaintext
// Module included in the following assemblies:
|
|
//
|
|
// * observability/logging/cluster-logging-collector.adoc
|
|
// * observability/logging/log_visualization/logging-kibana.adoc
|
|
|
|
:_mod-docs-content-type: PROCEDURE
|
|
[id="cluster-logging-memory-limits_{context}"]
|
|
= Configuring CPU and memory limits
|
|
|
|
The {logging} components allow for adjustments to both the CPU and memory limits.
|
|
|
|
.Procedure
|
|
|
|
. Edit the `ClusterLogging` custom resource (CR) in the `openshift-logging` project:
|
|
+
|
|
[source,terminal]
|
|
----
|
|
$ oc -n openshift-logging edit ClusterLogging instance
|
|
----
|
|
+
|
|
[source,yaml]
|
|
----
|
|
apiVersion: "logging.openshift.io/v1"
|
|
kind: "ClusterLogging"
|
|
metadata:
|
|
name: "instance"
|
|
namespace: openshift-logging
|
|
|
|
...
|
|
|
|
spec:
|
|
managementState: "Managed"
|
|
logStore:
|
|
type: "elasticsearch"
|
|
elasticsearch:
|
|
nodeCount: 3
|
|
resources: <1>
|
|
limits:
|
|
memory: 16Gi
|
|
requests:
|
|
cpu: 200m
|
|
memory: 16Gi
|
|
storage:
|
|
storageClassName: "gp2"
|
|
size: "200G"
|
|
redundancyPolicy: "SingleRedundancy"
|
|
visualization:
|
|
type: "kibana"
|
|
kibana:
|
|
resources: <2>
|
|
limits:
|
|
memory: 1Gi
|
|
requests:
|
|
cpu: 500m
|
|
memory: 1Gi
|
|
proxy:
|
|
resources: <2>
|
|
limits:
|
|
memory: 100Mi
|
|
requests:
|
|
cpu: 100m
|
|
memory: 100Mi
|
|
replicas: 2
|
|
collection:
|
|
resources: <3>
|
|
limits:
|
|
memory: 736Mi
|
|
requests:
|
|
cpu: 200m
|
|
memory: 736Mi
|
|
type: fluentd
|
|
----
|
|
<1> Specify the CPU and memory limits and requests for the log store as needed. For Elasticsearch, you must adjust both the request value and the limit value.
|
|
<2> Specify the CPU and memory limits and requests for the log visualizer as needed.
|
|
<3> Specify the CPU and memory limits and requests for the log collector as needed.
|