From 28ad08d5e187ca23533dccccae007affe1b75600 Mon Sep 17 00:00:00 2001 From: Michael Burke Date: Wed, 26 Jun 2019 12:34:50 -0400 Subject: [PATCH] Editing CLO section of Infra MachineSet topc --- .../config/efk-logging-configuring-about.adoc | 1 + modules/infrastructure-moving-logging.adoc | 83 ++++++++++++++++++- modules/nodes-scheduler-default-creating.adoc | 36 +++++++- 3 files changed, 115 insertions(+), 5 deletions(-) diff --git a/logging/config/efk-logging-configuring-about.adoc b/logging/config/efk-logging-configuring-about.adoc index e2bbd633b2..a2843e89bd 100644 --- a/logging/config/efk-logging-configuring-about.adoc +++ b/logging/config/efk-logging-configuring-about.adoc @@ -19,3 +19,4 @@ For more information, see xref:../../logging/config/efk-logging-management.adoc# // assemblies. include::modules/efk-logging-deploying-about.adoc[leveloffset=+1] +include::modules/infrastructure-moving-logging.adoc[leveloffset=+1] diff --git a/modules/infrastructure-moving-logging.adoc b/modules/infrastructure-moving-logging.adoc index c7bae1bca4..589f5a8b08 100644 --- a/modules/infrastructure-moving-logging.adoc +++ b/modules/infrastructure-moving-logging.adoc @@ -3,10 +3,85 @@ // * machine_management/creating-infrastructure-machinesets.adoc [id="infrastructure-moving-logging_{context}"] -= Moving the logging aggregation solution += Moving the cluster logging resources -[IMPORTANT] +You can configure the Cluster Logging Operator to deploy the pods +for any or all of the Cluster Logging components, Elasticsearch, Kibana, and Curator to different nodes. +You cannot move the Cluster Logging Operator pod from its installed location. + +For example, you can move the Elasticsearch pods to a separate node because of +high CPU, memory, and disk requirements. + +[NOTE] ==== -Cluster logging in {product-title} is not installed by default. -You currently cannot move the logging component to another MachineSet. +You should set your MachineSet to use at least 6 replicas. ==== + +.Prerequisites + +* Cluster logging and Elasticsearch must be installed. These features are not installed by default. + +* If needed, get the name of the Cluster Logging Custom Resource in the openshift-logging project: ++ +---- +$ oc get ClusterLogging +NAME AGE +instance 112m +---- + +.Procedure + +. Edit the Cluster Logging Custom Resource: ++ +---- +apiVersion: logging.openshift.io/v1 +kind: ClusterLogging + +.... + +spec: + collection: + logs: + fluentd: + resources: null + rsyslog: + resources: null + type: fluentd + curation: + curator: + nodeSelector: <1> + node-role.kubernetes.io/infra: '' + resources: null + schedule: 30 3 * * * + type: curator + logStore: + elasticsearch: + nodeCount: 3 + nodeSelector: <1> + node-role.kubernetes.io/infra: '' + redundancyPolicy: SingleRedundancy + resources: + limits: + cpu: 500m + memory: 4Gi + requests: + cpu: 500m + memory: 4Gi + storage: {} + type: elasticsearch + managementState: Managed + visualization: + kibana: + nodeSelector: <1> + node-role.kubernetes.io/infra: '' <1> + proxy: + resources: null + replicas: 1 + resources: null + type: kibana + +.... + +---- +<1> Add a `nodeSelector` parameter with the appropriate value to the component you want to move. You can use a `nodeSelector` in the format shown or use `: ` pairs, based on the value specified for the node. + diff --git a/modules/nodes-scheduler-default-creating.adoc b/modules/nodes-scheduler-default-creating.adoc index 7a55ddfce4..e2710394a5 100644 --- a/modules/nodes-scheduler-default-creating.adoc +++ b/modules/nodes-scheduler-default-creating.adoc @@ -23,7 +23,41 @@ metadata: resourceVersion: '1049773' creationTimestamp: '2019-03-11T17:24:23Z' data: - policy.cfg: "{\n\"kind\" : \"Policy\",\n\"apiVersion\" : \"v1\",\n\"predicates\" : [\n\t{\"name\" : \"PodFitsHostPorts\"},\n\t{\"name\" : \"PodFitsResources\"},\n\t{\"name\" : \"NoDiskConflict\"},\n\t{\"name\" : \"NoVolumeZoneConflict\"},\n\t{\"name\" : \"MatchNodeSelector\"},\n\t{\"name\" : \"HostName\"}\n\t],\n\"priorities\" : [\n\t{\"name\" : \"LeastRequestedPriority\", \"weight\" : 10},\n\t{\"name\" : \"BalancedResourceAllocation\", \"weight\" : 1},\n\t{\"name\" : \"ServiceSpreadingPriority\", \"weight\" : 1},\n\t{\"name\" : \"EqualPriority\", \"weight\" : 1}\n\t]\n}\n" + policy.cfg: | + { + "kind" : "Policy", + "apiVersion" : "v1", + "predicates" : [ + {"name" : "MaxGCEPDVolumeCount"}, + {"name" : "GeneralPredicates"}, + {"name" : "MaxAzureDiskVolumeCount"}, + {"name" : "MaxCSIVolumeCountPred"}, + {"name" : "CheckVolumeBinding"}, + {"name" : "MaxEBSVolumeCount"}, + {"name" : "PodFitsResources"}, + {"name" : "MatchInterPodAffinity"}, + {"name" : "CheckNodeUnschedulable"}, + {"name" : "NoDiskConflict"}, + {"name" : "CheckServiceAffinity"}, + {"name" : "NoVolumeZoneConflict"}, + {"name" : "MatchNodeSelector"}, + {"name" : "PodToleratesNodeNoExecuteTaints"}, + {"name" : "HostName"}, + {"name" : "PodToleratesNodeTaints"} + ], + "priorities" : [ + {"name" : "LeastRequestedPriority", "weight" : 1}, + {"name" : "BalancedResourceAllocation", "weight" : 1}, + {"name" : "ServiceSpreadingPriority", "weight" : 1}, + {"name" : "NodePreferAvoidPodsPriority", "weight" : 1}, + {"name" : "NodeAffinityPriority", "weight" : 1}, + {"name" : "TaintTolerationPriority", "weight" : 1}, + {"name" : "ImageLocalityPriority", "weight" : 1}, + {"name" : "SelectorSpreadPriority", "weight" : 1}, + {"name" : "InterPodAffinityPriority", "weight" : 1}, + {"name" : "EqualPriority", "weight" : 1} + ] + } ---- .Procedure