mirror of
https://github.com/openshift/openshift-docs.git
synced 2026-02-05 21:46:22 +01:00
Editing CLO section of Infra MachineSet topc
This commit is contained in:
committed by
openshift-cherrypick-robot
parent
1933966ecd
commit
28ad08d5e1
@@ -19,3 +19,4 @@ For more information, see xref:../../logging/config/efk-logging-management.adoc#
|
||||
// assemblies.
|
||||
|
||||
include::modules/efk-logging-deploying-about.adoc[leveloffset=+1]
|
||||
include::modules/infrastructure-moving-logging.adoc[leveloffset=+1]
|
||||
|
||||
@@ -3,10 +3,85 @@
|
||||
// * machine_management/creating-infrastructure-machinesets.adoc
|
||||
|
||||
[id="infrastructure-moving-logging_{context}"]
|
||||
= Moving the logging aggregation solution
|
||||
= Moving the cluster logging resources
|
||||
|
||||
[IMPORTANT]
|
||||
You can configure the Cluster Logging Operator to deploy the pods
|
||||
for any or all of the Cluster Logging components, Elasticsearch, Kibana, and Curator to different nodes.
|
||||
You cannot move the Cluster Logging Operator pod from its installed location.
|
||||
|
||||
For example, you can move the Elasticsearch pods to a separate node because of
|
||||
high CPU, memory, and disk requirements.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
Cluster logging in {product-title} is not installed by default.
|
||||
You currently cannot move the logging component to another MachineSet.
|
||||
You should set your MachineSet to use at least 6 replicas.
|
||||
====
|
||||
|
||||
.Prerequisites
|
||||
|
||||
* Cluster logging and Elasticsearch must be installed. These features are not installed by default.
|
||||
|
||||
* If needed, get the name of the Cluster Logging Custom Resource in the openshift-logging project:
|
||||
+
|
||||
----
|
||||
$ oc get ClusterLogging
|
||||
NAME AGE
|
||||
instance 112m
|
||||
----
|
||||
|
||||
.Procedure
|
||||
|
||||
. Edit the Cluster Logging Custom Resource:
|
||||
+
|
||||
----
|
||||
apiVersion: logging.openshift.io/v1
|
||||
kind: ClusterLogging
|
||||
|
||||
....
|
||||
|
||||
spec:
|
||||
collection:
|
||||
logs:
|
||||
fluentd:
|
||||
resources: null
|
||||
rsyslog:
|
||||
resources: null
|
||||
type: fluentd
|
||||
curation:
|
||||
curator:
|
||||
nodeSelector: <1>
|
||||
node-role.kubernetes.io/infra: ''
|
||||
resources: null
|
||||
schedule: 30 3 * * *
|
||||
type: curator
|
||||
logStore:
|
||||
elasticsearch:
|
||||
nodeCount: 3
|
||||
nodeSelector: <1>
|
||||
node-role.kubernetes.io/infra: ''
|
||||
redundancyPolicy: SingleRedundancy
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 4Gi
|
||||
requests:
|
||||
cpu: 500m
|
||||
memory: 4Gi
|
||||
storage: {}
|
||||
type: elasticsearch
|
||||
managementState: Managed
|
||||
visualization:
|
||||
kibana:
|
||||
nodeSelector: <1>
|
||||
node-role.kubernetes.io/infra: '' <1>
|
||||
proxy:
|
||||
resources: null
|
||||
replicas: 1
|
||||
resources: null
|
||||
type: kibana
|
||||
|
||||
....
|
||||
|
||||
----
|
||||
<1> Add a `nodeSelector` parameter with the appropriate value to the component you want to move. You can use a `nodeSelector` in the format shown or use `<key>: <value>` pairs, based on the value specified for the node.
|
||||
|
||||
|
||||
@@ -23,7 +23,41 @@ metadata:
|
||||
resourceVersion: '1049773'
|
||||
creationTimestamp: '2019-03-11T17:24:23Z'
|
||||
data:
|
||||
policy.cfg: "{\n\"kind\" : \"Policy\",\n\"apiVersion\" : \"v1\",\n\"predicates\" : [\n\t{\"name\" : \"PodFitsHostPorts\"},\n\t{\"name\" : \"PodFitsResources\"},\n\t{\"name\" : \"NoDiskConflict\"},\n\t{\"name\" : \"NoVolumeZoneConflict\"},\n\t{\"name\" : \"MatchNodeSelector\"},\n\t{\"name\" : \"HostName\"}\n\t],\n\"priorities\" : [\n\t{\"name\" : \"LeastRequestedPriority\", \"weight\" : 10},\n\t{\"name\" : \"BalancedResourceAllocation\", \"weight\" : 1},\n\t{\"name\" : \"ServiceSpreadingPriority\", \"weight\" : 1},\n\t{\"name\" : \"EqualPriority\", \"weight\" : 1}\n\t]\n}\n"
|
||||
policy.cfg: |
|
||||
{
|
||||
"kind" : "Policy",
|
||||
"apiVersion" : "v1",
|
||||
"predicates" : [
|
||||
{"name" : "MaxGCEPDVolumeCount"},
|
||||
{"name" : "GeneralPredicates"},
|
||||
{"name" : "MaxAzureDiskVolumeCount"},
|
||||
{"name" : "MaxCSIVolumeCountPred"},
|
||||
{"name" : "CheckVolumeBinding"},
|
||||
{"name" : "MaxEBSVolumeCount"},
|
||||
{"name" : "PodFitsResources"},
|
||||
{"name" : "MatchInterPodAffinity"},
|
||||
{"name" : "CheckNodeUnschedulable"},
|
||||
{"name" : "NoDiskConflict"},
|
||||
{"name" : "CheckServiceAffinity"},
|
||||
{"name" : "NoVolumeZoneConflict"},
|
||||
{"name" : "MatchNodeSelector"},
|
||||
{"name" : "PodToleratesNodeNoExecuteTaints"},
|
||||
{"name" : "HostName"},
|
||||
{"name" : "PodToleratesNodeTaints"}
|
||||
],
|
||||
"priorities" : [
|
||||
{"name" : "LeastRequestedPriority", "weight" : 1},
|
||||
{"name" : "BalancedResourceAllocation", "weight" : 1},
|
||||
{"name" : "ServiceSpreadingPriority", "weight" : 1},
|
||||
{"name" : "NodePreferAvoidPodsPriority", "weight" : 1},
|
||||
{"name" : "NodeAffinityPriority", "weight" : 1},
|
||||
{"name" : "TaintTolerationPriority", "weight" : 1},
|
||||
{"name" : "ImageLocalityPriority", "weight" : 1},
|
||||
{"name" : "SelectorSpreadPriority", "weight" : 1},
|
||||
{"name" : "InterPodAffinityPriority", "weight" : 1},
|
||||
{"name" : "EqualPriority", "weight" : 1}
|
||||
]
|
||||
}
|
||||
----
|
||||
|
||||
.Procedure
|
||||
|
||||
Reference in New Issue
Block a user