From 3c007c47a4095d7923d6b47ca11e8976fe790729 Mon Sep 17 00:00:00 2001 From: Michael Burke Date: Thu, 7 Aug 2025 08:13:27 -0400 Subject: [PATCH] OSDOCS 15410 CQA: Automatically scaling pods with the horizontal pod autoscaler --- modules/nodes-pods-autoscaling-about.adoc | 7 +- ...s-pods-autoscaling-best-practices-hpa.adoc | 4 + ...pods-autoscaling-creating-cpu-percent.adoc | 67 +++++ ...ods-autoscaling-creating-cpu-specific.adoc | 82 ++++++ .../nodes-pods-autoscaling-creating-cpu.adoc | 144 +---------- ...s-autoscaling-creating-memory-percent.adoc | 145 +++++++++++ ...-autoscaling-creating-memory-specific.adoc | 144 +++++++++++ ...odes-pods-autoscaling-creating-memory.adoc | 237 ------------------ ...autoscaling-creating-web-console-edit.adoc | 22 ++ ...toscaling-creating-web-console-remove.adoc | 17 ++ ...pods-autoscaling-creating-web-console.adoc | 20 +- ...s-autoscaling-requests-and-limits-hpa.adoc | 2 +- .../nodes-pods-autoscaling-status-about.adoc | 2 +- ...nodes-pods-autoscaling-status-viewing.adoc | 2 +- .../nodes-pods-autoscaling-workflow-hpa.adoc | 2 +- nodes/pods/nodes-pods-autoscaling.adoc | 19 +- nodes/pods/nodes-pods-configuring.adoc | 1 - ...pods-autoscaling-creating-cpu-prereqs.adoc | 43 ++++ 18 files changed, 555 insertions(+), 405 deletions(-) create mode 100644 modules/nodes-pods-autoscaling-creating-cpu-percent.adoc create mode 100644 modules/nodes-pods-autoscaling-creating-cpu-specific.adoc create mode 100644 modules/nodes-pods-autoscaling-creating-memory-percent.adoc create mode 100644 modules/nodes-pods-autoscaling-creating-memory-specific.adoc delete mode 100644 modules/nodes-pods-autoscaling-creating-memory.adoc create mode 100644 modules/nodes-pods-autoscaling-creating-web-console-edit.adoc create mode 100644 modules/nodes-pods-autoscaling-creating-web-console-remove.adoc create mode 100644 snippets/nodes-pods-autoscaling-creating-cpu-prereqs.adoc diff --git a/modules/nodes-pods-autoscaling-about.adoc b/modules/nodes-pods-autoscaling-about.adoc index 932ea15e0d..be35016dd6 100644 --- a/modules/nodes-pods-autoscaling-about.adoc +++ b/modules/nodes-pods-autoscaling-about.adoc @@ -1,6 +1,6 @@ // Module included in the following assemblies: // -// * nodes/nodes-pods-autoscaling-about.adoc +// * nodes/nodes-pods-autoscaling.adoc :_mod-docs-content-type: CONCEPT [id="nodes-pods-autoscaling-about_{context}"] @@ -18,12 +18,9 @@ ifdef::openshift-origin,openshift-enterprise,openshift-webscale[] To use horizontal pod autoscalers, your cluster administrator must have properly configured cluster metrics. endif::openshift-origin,openshift-enterprise,openshift-webscale[] -[id="supported-metrics_{context}"] -== Supported metrics - The following metrics are supported by horizontal pod autoscalers: -.Metrics +.Supported metrics [cols="3a,5a,5a",options="header"] |=== diff --git a/modules/nodes-pods-autoscaling-best-practices-hpa.adoc b/modules/nodes-pods-autoscaling-best-practices-hpa.adoc index a2a7db577c..147c5e1cb8 100644 --- a/modules/nodes-pods-autoscaling-best-practices-hpa.adoc +++ b/modules/nodes-pods-autoscaling-best-practices-hpa.adoc @@ -11,6 +11,8 @@ For optimal performance, configure resource requests for all pods. To prevent fr All pods must have resource requests configured:: The HPA makes a scaling decision based on the observed CPU or memory usage values of pods in an {product-title} cluster. Utilization values are calculated as a percentage of the resource requests of each pod. Missing resource request values can affect the optimal performance of the HPA. +For more information, see "Understanding resource requests and limits". + Configure the cool down period:: During horizontal pod autoscaling, there might be a rapid scaling of events without a time gap. Configure the cool down period to prevent frequent replica fluctuations. You can specify a cool down period by configuring the `stabilizationWindowSeconds` field. The stabilization window is used to restrict the fluctuation of replicas count when the metrics used for scaling keep fluctuating. The autoscaling algorithm uses this window to infer a previous required state and avoid unwanted changes to workload scale. @@ -24,3 +26,5 @@ behavior: ---- In the previous example, all intended states for the past 5 minutes are considered. This approximates a rolling maximum, and avoids having the scaling algorithm often remove pods only to trigger recreating an equal pod just moments later. + +For more information, see "Scaling policies". diff --git a/modules/nodes-pods-autoscaling-creating-cpu-percent.adoc b/modules/nodes-pods-autoscaling-creating-cpu-percent.adoc new file mode 100644 index 0000000000..0bb0c90bc9 --- /dev/null +++ b/modules/nodes-pods-autoscaling-creating-cpu-percent.adoc @@ -0,0 +1,67 @@ +// Module included in the following assemblies: +// +// * nodes/nodes-pods-autoscaling.adoc + +:_mod-docs-content-type: PROCEDURE +[id="nodes-pods-autoscaling-creating-cpu-percent_{context}"] += Creating a horizontal pod autoscaler for a percent of CPU use + +Using the {product-title} CLI, you can create a horizontal pod autoscaler (HPA) to automatically scale an existing object based on percent of CPU use. The HPA scales the pods associated with that object to maintain the CPU use that you specify. + +When autoscaling for a percent of CPU use, you can use the `oc autoscale` command to specify the minimum and maximum number of pods that you want to run at any given time and the average CPU use your pods should target. If you do not specify a minimum, the pods are given default values from the {product-title} server. + +[NOTE] +==== +Use a `Deployment` object or `ReplicaSet` object unless you need a specific feature or behavior provided by other objects. +==== + +.Prerequisites + +include::snippets/nodes-pods-autoscaling-creating-cpu-prereqs.adoc[] + +.Procedure + +. Create a `HorizontalPodAutoscaler` object for an existing object: ++ +[source,terminal] +---- +$ oc autoscale / \// <1> + --min \// <2> + --max \// <3> + --cpu-percent= <4> +---- ++ +<1> Specify the type and name of the object to autoscale. The object must exist and be a `Deployment`, `DeploymentConfig`/`dc`, `ReplicaSet`/`rs`, `ReplicationController`/`rc`, or `StatefulSet`. +<2> Optional: Specify the minimum number of replicas when scaling down. +<3> Specify the maximum number of replicas when scaling up. +<4> Specify the target average CPU use over all the pods, represented as a percent of requested CPU. If not specified or negative, a default autoscaling policy is used. ++ +For example, the following command shows autoscaling for the `hello-node` deployment object. The initial deployment requires 3 pods. The HPA object increases the minimum to 5. If CPU usage on the pods reaches 75%, the pods will increase to 7: ++ +[source,terminal] +---- +$ oc autoscale deployment/hello-node --min=5 --max=7 --cpu-percent=75 +---- + +. Create the horizontal pod autoscaler: ++ +[source,terminal] +---- +$ oc create -f .yaml +---- + +.Verification + +* Ensure that the horizontal pod autoscaler was created: ++ +[source,terminal] +---- +$ oc get hpa cpu-autoscale +---- ++ +.Example output +[source,terminal] +---- +NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE +cpu-autoscale Deployment/example 173m/500m 1 10 1 20m +---- diff --git a/modules/nodes-pods-autoscaling-creating-cpu-specific.adoc b/modules/nodes-pods-autoscaling-creating-cpu-specific.adoc new file mode 100644 index 0000000000..745baef116 --- /dev/null +++ b/modules/nodes-pods-autoscaling-creating-cpu-specific.adoc @@ -0,0 +1,82 @@ +// Module included in the following assemblies: +// +// * nodes/nodes-pods-autoscaling.adoc + +:_mod-docs-content-type: PROCEDURE +[id="nodes-pods-autoscaling-creating-cpu-specific_{context}"] += Creating a horizontal pod autoscaler for a specific CPU value + +Using the {product-title} CLI, you can create a horizontal pod autoscaler (HPA) to automatically scale an existing object based on a specific CPU value by creating a `HorizontalPodAutoscaler` object with the target CPU and pod limits. The HPA scales the pods associated with that object to maintain the CPU use that you specify. + +[NOTE] +==== +Use a `Deployment` object or `ReplicaSet` object unless you need a specific feature or behavior provided by other objects. +==== + +.Prerequisites + +include::snippets/nodes-pods-autoscaling-creating-cpu-prereqs.adoc[] + +.Procedure + +. Create a YAML file similar to the following for an existing object: ++ +[source,yaml,options="nowrap"] +---- +apiVersion: autoscaling/v2 <1> +kind: HorizontalPodAutoscaler +metadata: + name: cpu-autoscale <2> + namespace: default +spec: + scaleTargetRef: + apiVersion: apps/v1 <3> + kind: Deployment <4> + name: example <5> + minReplicas: 1 <6> + maxReplicas: 10 <7> + metrics: <8> + - type: Resource + resource: + name: cpu <9> + target: + type: AverageValue <10> + averageValue: 500m <11> +---- +<1> Use the `autoscaling/v2` API. +<2> Specify a name for this horizontal pod autoscaler object. +<3> Specify the API version of the object to scale: +* For a `Deployment`, `ReplicaSet`, `Statefulset` object, use `apps/v1`. +* For a `ReplicationController`, use `v1`. +* For a `DeploymentConfig`, use `apps.openshift.io/v1`. +<4> Specify the type of object. The object must be a `Deployment`, `DeploymentConfig`/`dc`, `ReplicaSet`/`rs`, `ReplicationController`/`rc`, or `StatefulSet`. +<5> Specify the name of the object to scale. The object must exist. +<6> Specify the minimum number of replicas when scaling down. +<7> Specify the maximum number of replicas when scaling up. +<8> Use the `metrics` parameter for memory use. +<9> Specify `cpu` for CPU usage. +<10> Set to `AverageValue`. +<11> Set to `averageValue` with the targeted CPU value. + +. Create the horizontal pod autoscaler: ++ +[source,terminal] +---- +$ oc create -f .yaml +---- + +.Verification + +* Check that the horizontal pod autoscaler was created: ++ +[source,terminal] +---- +$ oc get hpa cpu-autoscale +---- ++ +.Example output +[source,terminal] +---- +NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE +cpu-autoscale Deployment/example 173m/500m 1 10 1 20m +---- diff --git a/modules/nodes-pods-autoscaling-creating-cpu.adoc b/modules/nodes-pods-autoscaling-creating-cpu.adoc index 85f0ec5b74..ede04941e7 100644 --- a/modules/nodes-pods-autoscaling-creating-cpu.adoc +++ b/modules/nodes-pods-autoscaling-creating-cpu.adoc @@ -1,147 +1,13 @@ // Module included in the following assemblies: // -// * nodes/nodes-pods-autoscaling-about.adoc +// * nodes/nodes-pods-autoscaling.adoc :_mod-docs-content-type: PROCEDURE [id="nodes-pods-autoscaling-creating-cpu_{context}"] -= Creating a horizontal pod autoscaler for CPU utilization by using the CLI += Creating a horizontal pod autoscaler by using the CLI -Using the {product-title} CLI, you can create a horizontal pod autoscaler (HPA) to automatically scale an existing `Deployment`, `DeploymentConfig`, `ReplicaSet`, `ReplicationController`, or `StatefulSet` object. The HPA scales the pods associated with that object to maintain the CPU usage you specify. +Using the {product-title} CLI, you can create a horizontal pod autoscaler (HPA) to automatically scale an existing `Deployment`, `DeploymentConfig`, `ReplicaSet`, `ReplicationController`, or `StatefulSet` object. The HPA scales the pods associated with that object to maintain the CPU or memory resources that you specify. -[NOTE] -==== -It is recommended to use a `Deployment` object or `ReplicaSet` object unless you need a specific feature or behavior provided by other objects. -==== - -The HPA increases and decreases the number of replicas between the minimum and maximum numbers to maintain the specified CPU utilization across all pods. - -When autoscaling for CPU utilization, you can use the `oc autoscale` command and specify the minimum and maximum number of pods you want to run at any given time and the average CPU utilization your pods should target. If you do not specify a minimum, the pods are given default values from the {product-title} server. - -To autoscale for a specific CPU value, create a `HorizontalPodAutoscaler` object with the target CPU and pod limits. - -.Prerequisites - -To use horizontal pod autoscalers, your cluster administrator must have properly configured cluster metrics. -You can use the `oc describe PodMetrics ` command to determine if metrics are configured. If metrics are -configured, the output appears similar to the following, with `Cpu` and `Memory` displayed under `Usage`. - -[source,terminal] ----- -$ oc describe PodMetrics openshift-kube-scheduler-ip-10-0-135-131.ec2.internal ----- - -.Example output -[source,text,options="nowrap"] ----- -Name: openshift-kube-scheduler-ip-10-0-135-131.ec2.internal -Namespace: openshift-kube-scheduler -Labels: -Annotations: -API Version: metrics.k8s.io/v1beta1 -Containers: - Name: wait-for-host-port - Usage: - Memory: 0 - Name: scheduler - Usage: - Cpu: 8m - Memory: 45440Ki -Kind: PodMetrics -Metadata: - Creation Timestamp: 2019-05-23T18:47:56Z - Self Link: /apis/metrics.k8s.io/v1beta1/namespaces/openshift-kube-scheduler/pods/openshift-kube-scheduler-ip-10-0-135-131.ec2.internal -Timestamp: 2019-05-23T18:47:56Z -Window: 1m0s -Events: ----- - -.Procedure - -To create a horizontal pod autoscaler for CPU utilization: - -. Perform one of the following: - -** To scale based on the percent of CPU utilization, create a `HorizontalPodAutoscaler` object for an existing object: -+ -[source,terminal] ----- -$ oc autoscale / \// <1> - --min \// <2> - --max \// <3> - --cpu-percent= <4> ----- -+ -<1> Specify the type and name of the object to autoscale. The object must exist and be a `Deployment`, `DeploymentConfig`/`dc`, `ReplicaSet`/`rs`, `ReplicationController`/`rc`, or `StatefulSet`. -<2> Optionally, specify the minimum number of replicas when scaling down. -<3> Specify the maximum number of replicas when scaling up. -<4> Specify the target average CPU utilization over all the pods, represented as a percent of requested CPU. If not specified or negative, a default autoscaling policy is used. -+ -For example, the following command shows autoscaling for the `hello-node` deployment object. The initial deployment requires 3 pods. The HPA object increases the minimum to 5. If CPU usage on the pods reaches 75%, the pods will increase to 7: -+ -[source,terminal] ----- -$ oc autoscale deployment/hello-node --min=5 --max=7 --cpu-percent=75 ----- - -** To scale for a specific CPU value, create a YAML file similar to the following for an existing object: -+ -.. Create a YAML file similar to the following: -+ -[source,yaml,options="nowrap"] ----- -apiVersion: autoscaling/v2 <1> -kind: HorizontalPodAutoscaler -metadata: - name: cpu-autoscale <2> - namespace: default -spec: - scaleTargetRef: - apiVersion: apps/v1 <3> - kind: Deployment <4> - name: example <5> - minReplicas: 1 <6> - maxReplicas: 10 <7> - metrics: <8> - - type: Resource - resource: - name: cpu <9> - target: - type: AverageValue <10> - averageValue: 500m <11> ----- -<1> Use the `autoscaling/v2` API. -<2> Specify a name for this horizontal pod autoscaler object. -<3> Specify the API version of the object to scale: -* For a `Deployment`, `ReplicaSet`, `Statefulset` object, use `apps/v1`. -* For a `ReplicationController`, use `v1`. -* For a `DeploymentConfig`, use `apps.openshift.io/v1`. -<4> Specify the type of object. The object must be a `Deployment`, `DeploymentConfig`/`dc`, `ReplicaSet`/`rs`, `ReplicationController`/`rc`, or `StatefulSet`. -<5> Specify the name of the object to scale. The object must exist. -<6> Specify the minimum number of replicas when scaling down. -<7> Specify the maximum number of replicas when scaling up. -<8> Use the `metrics` parameter for memory utilization. -<9> Specify `cpu` for CPU utilization. -<10> Set to `AverageValue`. -<11> Set to `averageValue` with the targeted CPU value. - -.. Create the horizontal pod autoscaler: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- - -. Verify that the horizontal pod autoscaler was created: -+ -[source,terminal] ----- -$ oc get hpa cpu-autoscale ----- -+ -.Example output -[source,terminal] ----- -NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE -cpu-autoscale Deployment/example 173m/500m 1 10 1 20m ----- +You can autoscale based on CPU or memory use by specifying a percentage of resource usage or a specific value, as described in the following sections. +The HPA increases and decreases the number of replicas between the minimum and maximum numbers to maintain the specified resource use across all pods. diff --git a/modules/nodes-pods-autoscaling-creating-memory-percent.adoc b/modules/nodes-pods-autoscaling-creating-memory-percent.adoc new file mode 100644 index 0000000000..2b4163bf80 --- /dev/null +++ b/modules/nodes-pods-autoscaling-creating-memory-percent.adoc @@ -0,0 +1,145 @@ +// Module included in the following assemblies: +// +// * nodes/nodes-pods-autoscaling.adoc + +:_mod-docs-content-type: PROCEDURE +[id="nodes-pods-autoscaling-creating-memory-percent_{context}"] + += Creating a horizontal pod autoscaler object for a percent of memory use + +Using the {product-title} CLI, you can create a horizontal pod autoscaler (HPA) to automatically scale an existing object based on a percent of memory use. The HPA scales the pods associated with that object to maintain the memory use that you specify. + +[NOTE] +==== +Use a `Deployment` object or `ReplicaSet` object unless you need a specific feature or behavior provided by other objects. +==== + +You can specify the minimum and maximum number of pods and the average memory use that your pods should target. If you do not specify a minimum, the pods are given default values from the {product-title} server. + +.Prerequisites + +include::snippets/nodes-pods-autoscaling-creating-cpu-prereqs.adoc[] + +.Procedure + +. Create a `HorizontalPodAutoscaler` object similar to the following for an existing object: ++ +[source,yaml,options="nowrap"] +---- +apiVersion: autoscaling/v2 <1> +kind: HorizontalPodAutoscaler +metadata: + name: memory-autoscale <2> + namespace: default +spec: + scaleTargetRef: + apiVersion: apps/v1 <3> + kind: Deployment <4> + name: example <5> + minReplicas: 1 <6> + maxReplicas: 10 <7> + metrics: <8> + - type: Resource + resource: + name: memory <9> + target: + type: Utilization <10> + averageUtilization: 50 <11> + behavior: <12> + scaleUp: + stabilizationWindowSeconds: 180 + policies: + - type: Pods + value: 6 + periodSeconds: 120 + - type: Percent + value: 10 + periodSeconds: 120 + selectPolicy: Max +---- +<1> Use the `autoscaling/v2` API. +<2> Specify a name for this horizontal pod autoscaler object. +<3> Specify the API version of the object to scale: +* For a ReplicationController, use `v1`. +* For a DeploymentConfig, use `apps.openshift.io/v1`. +* For a Deployment, ReplicaSet, Statefulset object, use `apps/v1`. +<4> Specify the type of object. The object must be a `Deployment`, `DeploymentConfig`, +`ReplicaSet`, `ReplicationController`, or `StatefulSet`. +<5> Specify the name of the object to scale. The object must exist. +<6> Specify the minimum number of replicas when scaling down. +<7> Specify the maximum number of replicas when scaling up. +<8> Use the `metrics` parameter for memory usage. +<9> Specify `memory` for memory usage. +<10> Set to `Utilization`. +<11> Specify `averageUtilization` and a target average memory usage over all the pods, +represented as a percent of requested memory. The target pods must have memory requests configured. +<12> Optional: Specify a scaling policy to control the rate of scaling up or down. + +. Create the horizontal pod autoscaler by using a command similar to the following: ++ +[source,terminal] +---- +$ oc create -f .yaml +---- ++ +For example: ++ +[source,terminal] +---- +$ oc create -f hpa.yaml +---- ++ +.Example output +[source,terminal] +---- +horizontalpodautoscaler.autoscaling/hpa-resource-metrics-memory created +---- + +.Verification + +* Check that the horizontal pod autoscaler was created by using a command similar to the following: ++ +[source,terminal] +---- +$ oc get hpa hpa-resource-metrics-memory +---- ++ +.Example output +[source,terminal] +---- +NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE +hpa-resource-metrics-memory Deployment/example 2441216/500Mi 1 10 1 20m +---- + +* Check the details of the horizontal pod autoscaler by using a command similar to the following: ++ +[source,terminal] +---- +$ oc describe hpa hpa-resource-metrics-memory +---- ++ +.Example output +[source,text] +---- +Name: hpa-resource-metrics-memory +Namespace: default +Labels: +Annotations: +CreationTimestamp: Wed, 04 Mar 2020 16:31:37 +0530 +Reference: Deployment/example +Metrics: ( current / target ) + resource memory on pods: 2441216 / 500Mi +Min replicas: 1 +Max replicas: 10 +ReplicationController pods: 1 current / 1 desired +Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True ReadyForNewScale recommended size matches current size + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulRescale 6m34s horizontal-pod-autoscaler New size: 1; reason: All metrics below target +---- diff --git a/modules/nodes-pods-autoscaling-creating-memory-specific.adoc b/modules/nodes-pods-autoscaling-creating-memory-specific.adoc new file mode 100644 index 0000000000..4c63910b7d --- /dev/null +++ b/modules/nodes-pods-autoscaling-creating-memory-specific.adoc @@ -0,0 +1,144 @@ +// Module included in the following assemblies: +// +// * nodes/nodes-pods-autoscaling.adoc + +:_mod-docs-content-type: PROCEDURE +[id="nodes-pods-autoscaling-creating-memory-specific_{context}"] + += Creating a horizontal pod autoscaler object for specific memory use + +Using the {product-title} CLI, you can create a horizontal pod autoscaler (HPA) to automatically scale an existing object. The HPA scales the pods associated with that object to maintain the average memory use that you specify. + +[NOTE] +==== +Use a `Deployment` object or `ReplicaSet` object unless you need a specific feature or behavior provided by other objects. +==== + +You can specify the minimum and maximum number of pods and the average memory use that your pods should target. If you do not specify a minimum, the pods are given default values from the {product-title} server. + +.Prerequisites + +include::snippets/nodes-pods-autoscaling-creating-cpu-prereqs.adoc[] + +.Procedure + +. Create a `HorizontalPodAutoscaler` object similar to the following for an existing object: ++ +[source,yaml,options="nowrap"] +---- +apiVersion: autoscaling/v2 <1> +kind: HorizontalPodAutoscaler +metadata: + name: hpa-resource-metrics-memory <2> + namespace: default +spec: + scaleTargetRef: + apiVersion: apps/v1 <3> + kind: Deployment <4> + name: example <5> + minReplicas: 1 <6> + maxReplicas: 10 <7> + metrics: <8> + - type: Resource + resource: + name: memory <9> + target: + type: AverageValue <10> + averageValue: 500Mi <11> + behavior: <12> + scaleDown: + stabilizationWindowSeconds: 300 + policies: + - type: Pods + value: 4 + periodSeconds: 60 + - type: Percent + value: 10 + periodSeconds: 60 + selectPolicy: Max +---- +<1> Use the `autoscaling/v2` API. +<2> Specify a name for this horizontal pod autoscaler object. +<3> Specify the API version of the object to scale: +* For a `Deployment`, `ReplicaSet`, or `Statefulset` object, use `apps/v1`. +* For a `ReplicationController`, use `v1`. +* For a `DeploymentConfig`, use `apps.openshift.io/v1`. +<4> Specify the type of object. The object must be a `Deployment`, `DeploymentConfig`, +`ReplicaSet`, `ReplicationController`, or `StatefulSet`. +<5> Specify the name of the object to scale. The object must exist. +<6> Specify the minimum number of replicas when scaling down. +<7> Specify the maximum number of replicas when scaling up. +<8> Use the `metrics` parameter for memory usage. +<9> Specify `memory` for memory usage. +<10> Set the type to `AverageValue`. +<11> Specify `averageValue` and a specific memory value. +<12> Optional: Specify a scaling policy to control the rate of scaling up or down. + +. Create the horizontal pod autoscaler by using a command similar to the following: ++ +[source,terminal] +---- +$ oc create -f .yaml +---- ++ +For example: ++ +[source,terminal] +---- +$ oc create -f hpa.yaml +---- ++ +.Example output +[source,terminal] +---- +horizontalpodautoscaler.autoscaling/hpa-resource-metrics-memory created +---- + +.Verification + +* Check that the horizontal pod autoscaler was created by using a command similar to the following: ++ +[source,terminal] +---- +$ oc get hpa hpa-resource-metrics-memory +---- ++ +.Example output +[source,terminal] +---- +NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE +hpa-resource-metrics-memory Deployment/example 2441216/500Mi 1 10 1 20m +---- + +* Check the details of the horizontal pod autoscaler by using a command similar to the following: ++ +[source,terminal] +---- +$ oc describe hpa hpa-resource-metrics-memory +---- ++ +.Example output +[source,text] +---- +Name: hpa-resource-metrics-memory +Namespace: default +Labels: +Annotations: +CreationTimestamp: Wed, 04 Mar 2020 16:31:37 +0530 +Reference: Deployment/example +Metrics: ( current / target ) + resource memory on pods: 2441216 / 500Mi +Min replicas: 1 +Max replicas: 10 +ReplicationController pods: 1 current / 1 desired +Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True ReadyForNewScale recommended size matches current size + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulRescale 6m34s horizontal-pod-autoscaler New size: 1; reason: All metrics below target +---- diff --git a/modules/nodes-pods-autoscaling-creating-memory.adoc b/modules/nodes-pods-autoscaling-creating-memory.adoc deleted file mode 100644 index ea886504b1..0000000000 --- a/modules/nodes-pods-autoscaling-creating-memory.adoc +++ /dev/null @@ -1,237 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-autoscaling-about.adoc - -:_mod-docs-content-type: PROCEDURE -[id="nodes-pods-autoscaling-creating-memory_{context}"] - -= Creating a horizontal pod autoscaler object for memory utilization by using the CLI - -Using the {product-title} CLI, you can create a horizontal pod autoscaler (HPA) to automatically scale an existing -`Deployment`, `DeploymentConfig`, `ReplicaSet`, `ReplicationController`, or `StatefulSet` object. The HPA -scales the pods associated with that object to maintain the average memory utilization you specify, either a direct value or a percentage -of requested memory. - -[NOTE] -==== -It is recommended to use a `Deployment` object or `ReplicaSet` object unless you need a specific feature or behavior provided by other objects. -==== - -The HPA increases and decreases the number of replicas between the minimum and maximum numbers to maintain -the specified memory utilization across all pods. - -For memory utilization, you can specify the minimum and maximum number of pods and the average memory utilization -your pods should target. If you do not specify a minimum, the pods are given default values from the {product-title} server. - -.Prerequisites - -To use horizontal pod autoscalers, your cluster administrator must have properly configured cluster metrics. -You can use the `oc describe PodMetrics ` command to determine if metrics are configured. If metrics are -configured, the output appears similar to the following, with `Cpu` and `Memory` displayed under `Usage`. - -[source,terminal] ----- -$ oc describe PodMetrics openshift-kube-scheduler-ip-10-0-129-223.compute.internal -n openshift-kube-scheduler ----- - -.Example output -[source,text,options="nowrap"] ----- -Name: openshift-kube-scheduler-ip-10-0-129-223.compute.internal -Namespace: openshift-kube-scheduler -Labels: -Annotations: -API Version: metrics.k8s.io/v1beta1 -Containers: - Name: wait-for-host-port - Usage: - Cpu: 0 - Memory: 0 - Name: scheduler - Usage: - Cpu: 8m - Memory: 45440Ki -Kind: PodMetrics -Metadata: - Creation Timestamp: 2020-02-14T22:21:14Z - Self Link: /apis/metrics.k8s.io/v1beta1/namespaces/openshift-kube-scheduler/pods/openshift-kube-scheduler-ip-10-0-129-223.compute.internal -Timestamp: 2020-02-14T22:21:14Z -Window: 5m0s -Events: ----- - -.Procedure - -To create a horizontal pod autoscaler for memory utilization: - -. Create a YAML file for one of the following: - -** To scale for a specific memory value, create a `HorizontalPodAutoscaler` object similar to the following for an existing object: -+ -[source,yaml,options="nowrap"] ----- -apiVersion: autoscaling/v2 <1> -kind: HorizontalPodAutoscaler -metadata: - name: hpa-resource-metrics-memory <2> - namespace: default -spec: - scaleTargetRef: - apiVersion: apps/v1 <3> - kind: Deployment <4> - name: example <5> - minReplicas: 1 <6> - maxReplicas: 10 <7> - metrics: <8> - - type: Resource - resource: - name: memory <9> - target: - type: AverageValue <10> - averageValue: 500Mi <11> - behavior: <12> - scaleDown: - stabilizationWindowSeconds: 300 - policies: - - type: Pods - value: 4 - periodSeconds: 60 - - type: Percent - value: 10 - periodSeconds: 60 - selectPolicy: Max ----- -<1> Use the `autoscaling/v2` API. -<2> Specify a name for this horizontal pod autoscaler object. -<3> Specify the API version of the object to scale: -* For a `Deployment`, `ReplicaSet`, or `Statefulset` object, use `apps/v1`. -* For a `ReplicationController`, use `v1`. -* For a `DeploymentConfig`, use `apps.openshift.io/v1`. -<4> Specify the type of object. The object must be a `Deployment`, `DeploymentConfig`, -`ReplicaSet`, `ReplicationController`, or `StatefulSet`. -<5> Specify the name of the object to scale. The object must exist. -<6> Specify the minimum number of replicas when scaling down. -<7> Specify the maximum number of replicas when scaling up. -<8> Use the `metrics` parameter for memory utilization. -<9> Specify `memory` for memory utilization. -<10> Set the type to `AverageValue`. -<11> Specify `averageValue` and a specific memory value. -<12> Optional: Specify a scaling policy to control the rate of scaling up or down. - -** To scale for a percentage, create a `HorizontalPodAutoscaler` object similar to the following for an existing object: -+ -[source,yaml,options="nowrap"] ----- -apiVersion: autoscaling/v2 <1> -kind: HorizontalPodAutoscaler -metadata: - name: memory-autoscale <2> - namespace: default -spec: - scaleTargetRef: - apiVersion: apps/v1 <3> - kind: Deployment <4> - name: example <5> - minReplicas: 1 <6> - maxReplicas: 10 <7> - metrics: <8> - - type: Resource - resource: - name: memory <9> - target: - type: Utilization <10> - averageUtilization: 50 <11> - behavior: <12> - scaleUp: - stabilizationWindowSeconds: 180 - policies: - - type: Pods - value: 6 - periodSeconds: 120 - - type: Percent - value: 10 - periodSeconds: 120 - selectPolicy: Max ----- -<1> Use the `autoscaling/v2` API. -<2> Specify a name for this horizontal pod autoscaler object. -<3> Specify the API version of the object to scale: -* For a ReplicationController, use `v1`. -* For a DeploymentConfig, use `apps.openshift.io/v1`. -* For a Deployment, ReplicaSet, Statefulset object, use `apps/v1`. -<4> Specify the type of object. The object must be a `Deployment`, `DeploymentConfig`, -`ReplicaSet`, `ReplicationController`, or `StatefulSet`. -<5> Specify the name of the object to scale. The object must exist. -<6> Specify the minimum number of replicas when scaling down. -<7> Specify the maximum number of replicas when scaling up. -<8> Use the `metrics` parameter for memory utilization. -<9> Specify `memory` for memory utilization. -<10> Set to `Utilization`. -<11> Specify `averageUtilization` and a target average memory utilization over all the pods, -represented as a percent of requested memory. The target pods must have memory requests configured. -<12> Optional: Specify a scaling policy to control the rate of scaling up or down. - -. Create the horizontal pod autoscaler: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc create -f hpa.yaml ----- -+ -.Example output -[source,terminal] ----- -horizontalpodautoscaler.autoscaling/hpa-resource-metrics-memory created ----- - -. Verify that the horizontal pod autoscaler was created: -+ -[source,terminal] ----- -$ oc get hpa hpa-resource-metrics-memory ----- -+ -.Example output -[source,terminal] ----- -NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE -hpa-resource-metrics-memory Deployment/example 2441216/500Mi 1 10 1 20m ----- -+ -[source,terminal] ----- -$ oc describe hpa hpa-resource-metrics-memory ----- -+ -.Example output -[source,text] ----- -Name: hpa-resource-metrics-memory -Namespace: default -Labels: -Annotations: -CreationTimestamp: Wed, 04 Mar 2020 16:31:37 +0530 -Reference: Deployment/example -Metrics: ( current / target ) - resource memory on pods: 2441216 / 500Mi -Min replicas: 1 -Max replicas: 10 -ReplicationController pods: 1 current / 1 desired -Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True ReadyForNewScale recommended size matches current size - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 6m34s horizontal-pod-autoscaler New size: 1; reason: All metrics below target ----- diff --git a/modules/nodes-pods-autoscaling-creating-web-console-edit.adoc b/modules/nodes-pods-autoscaling-creating-web-console-edit.adoc new file mode 100644 index 0000000000..0369ced9ea --- /dev/null +++ b/modules/nodes-pods-autoscaling-creating-web-console-edit.adoc @@ -0,0 +1,22 @@ +// Module included in the following assemblies: +// +// * nodes/nodes-pods-autoscaling.adoc + +:_mod-docs-content-type: PROCEDURE +[id="nodes-pods-autoscaling-creating-web-console-edit_{context}"] += Editing a horizontal pod autoscaler by using the web console + +From the web console, you can modify a horizontal pod autoscaler (HPA) that specifies the minimum and maximum number of pods you want to run on a `Deployment` or `DeploymentConfig` object. You can also define the amount of CPU or memory usage that your pods should target. + +.Procedure + +. In the *Topology* view, click the node to reveal the side pane. + +. From the *Actions* drop-down list, select *Edit HorizontalPodAutoscaler* to open the *Edit Horizontal Pod Autoscaler* form. + +. From the *Edit Horizontal Pod Autoscaler* form, edit the minimum and maximum pod limits and the CPU and memory usage, and click *Save*. + +[NOTE] +==== +While creating or editing the horizontal pod autoscaler in the web console, you can switch from *Form view* to *YAML view*. +==== diff --git a/modules/nodes-pods-autoscaling-creating-web-console-remove.adoc b/modules/nodes-pods-autoscaling-creating-web-console-remove.adoc new file mode 100644 index 0000000000..1378f0ac80 --- /dev/null +++ b/modules/nodes-pods-autoscaling-creating-web-console-remove.adoc @@ -0,0 +1,17 @@ +// Module included in the following assemblies: +// +// * nodes/nodes-pods-autoscaling.adoc + +:_mod-docs-content-type: PROCEDURE +[id="nodes-pods-autoscaling-creating-web-console-remove_{context}"] += Removing a horizontal pod autoscaler by using the web console + +You can remove a horizontal pod autoscaler (HPA) in the web console. + +.Procedure + +. In the *Topology* view, click the node to reveal the side panel. + +. From the *Actions* drop-down list, select *Remove HorizontalPodAutoscaler*. + +. In the confirmation window, click *Remove* to remove the HPA. diff --git a/modules/nodes-pods-autoscaling-creating-web-console.adoc b/modules/nodes-pods-autoscaling-creating-web-console.adoc index 2ae3a6bf08..6a0cb38216 100644 --- a/modules/nodes-pods-autoscaling-creating-web-console.adoc +++ b/modules/nodes-pods-autoscaling-creating-web-console.adoc @@ -1,6 +1,6 @@ // Module included in the following assemblies: // -// * nodes/nodes-pods-autoscaling-about.adoc +// * nodes/nodes-pods-autoscaling.adoc :_mod-docs-content-type: PROCEDURE [id="nodes-pods-autoscaling-creating-web-console_{context}"] @@ -18,6 +18,7 @@ An HPA cannot be added to deployments that are part of an Operator-backed servic To create an HPA in the web console: . In the *Topology* view, click the node to reveal the side pane. + . From the *Actions* drop-down list, select *Add HorizontalPodAutoscaler* to open the *Add HorizontalPodAutoscaler* form. + .Add HorizontalPodAutoscaler @@ -29,20 +30,3 @@ image::node-add-hpa-action.png[Add HorizontalPodAutoscaler form] ==== If any of the values for CPU and memory usage are missing, a warning is displayed. ==== - -To edit an HPA in the web console: - -. In the *Topology* view, click the node to reveal the side pane. -. From the *Actions* drop-down list, select *Edit HorizontalPodAutoscaler* to open the *Edit Horizontal Pod Autoscaler* form. -. From the *Edit Horizontal Pod Autoscaler* form, edit the minimum and maximum pod limits and the CPU and memory usage, and click *Save*. - -[NOTE] -==== -While creating or editing the horizontal pod autoscaler in the web console, you can switch from *Form view* to *YAML view*. -==== - -To remove an HPA in the web console: - -. In the *Topology* view, click the node to reveal the side panel. -. From the *Actions* drop-down list, select *Remove HorizontalPodAutoscaler*. -. In the confirmation pop-up window, click *Remove* to remove the HPA. diff --git a/modules/nodes-pods-autoscaling-requests-and-limits-hpa.adoc b/modules/nodes-pods-autoscaling-requests-and-limits-hpa.adoc index f4949f7e1d..6d3779a13a 100644 --- a/modules/nodes-pods-autoscaling-requests-and-limits-hpa.adoc +++ b/modules/nodes-pods-autoscaling-requests-and-limits-hpa.adoc @@ -1,6 +1,6 @@ // Module included in the following assemblies: // -// * nodes/nodes-pods-autoscaling-about.adoc +// * nodes/nodes-pods-autoscaling.adoc :_mod-docs-content-type: CONCEPT [id="nodes-pods-autoscaling-requests-and-limits-hpa_{context}"] diff --git a/modules/nodes-pods-autoscaling-status-about.adoc b/modules/nodes-pods-autoscaling-status-about.adoc index 71117fafca..0aeac56212 100644 --- a/modules/nodes-pods-autoscaling-status-about.adoc +++ b/modules/nodes-pods-autoscaling-status-about.adoc @@ -1,6 +1,6 @@ // Module included in the following assemblies: // -// * nodes/nodes-pods-autoscaling-about.adoc +// * nodes/nodes-pods-autoscaling.adoc :_mod-docs-content-type: CONCEPT [id="nodes-pods-autoscaling-status-about_{context}"] diff --git a/modules/nodes-pods-autoscaling-status-viewing.adoc b/modules/nodes-pods-autoscaling-status-viewing.adoc index ddbaee75d1..b8b7c1eda1 100644 --- a/modules/nodes-pods-autoscaling-status-viewing.adoc +++ b/modules/nodes-pods-autoscaling-status-viewing.adoc @@ -1,6 +1,6 @@ // Module included in the following assemblies: // -// * nodes/nodes-pods-autoscaling-about.adoc +// * nodes/nodes-pods-autoscaling.adoc :_mod-docs-content-type: PROCEDURE [id="nodes-pods-autoscaling-status-viewing_{context}"] diff --git a/modules/nodes-pods-autoscaling-workflow-hpa.adoc b/modules/nodes-pods-autoscaling-workflow-hpa.adoc index 9a15a781a5..0e9159c4f6 100644 --- a/modules/nodes-pods-autoscaling-workflow-hpa.adoc +++ b/modules/nodes-pods-autoscaling-workflow-hpa.adoc @@ -1,6 +1,6 @@ // Module included in the following assemblies: // -// * nodes/nodes-pods-autoscaling-about.adoc +// * nodes/nodes-pods-autoscaling.adoc :_mod-docs-content-type: CONCEPT [id="nodes-pods-autoscaling-workflow-hpa_{context}"] diff --git a/nodes/pods/nodes-pods-autoscaling.adoc b/nodes/pods/nodes-pods-autoscaling.adoc index cead3d5b9e..7acbf1d0a5 100644 --- a/nodes/pods/nodes-pods-autoscaling.adoc +++ b/nodes/pods/nodes-pods-autoscaling.adoc @@ -34,13 +34,30 @@ include::modules/nodes-pods-autoscaling-requests-and-limits-hpa.adoc[leveloffset include::modules/nodes-pods-autoscaling-best-practices-hpa.adoc[leveloffset=+1] +[role="_additional-resources"] +.Additional resources +* xref:../../nodes/pods/nodes-pods-using.adoc#nodes-pods-understanding-requests-limits_nodes-pods-using-ssy[Understanding resource requests and limits] +* xref:../../nodes/pods/nodes-pods-autoscaling.adoc#nodes-pods-autoscaling-policies_nodes-pods-autoscaling[Scaling policies] + include::modules/nodes-pods-autoscaling-policies.adoc[leveloffset=+2] include::modules/nodes-pods-autoscaling-creating-web-console.adoc[leveloffset=+1] +include::modules/nodes-pods-autoscaling-creating-web-console-edit.adoc[leveloffset=+2] + +include::modules/nodes-pods-autoscaling-creating-web-console-remove.adoc[leveloffset=+2] + include::modules/nodes-pods-autoscaling-creating-cpu.adoc[leveloffset=+1] -include::modules/nodes-pods-autoscaling-creating-memory.adoc[leveloffset=+1] +include::modules/nodes-pods-autoscaling-creating-cpu-percent.adoc[leveloffset=+2] + +include::modules/nodes-pods-autoscaling-creating-cpu-specific.adoc[leveloffset=+2] + +// include::modules/nodes-pods-autoscaling-creating-memory.adoc[leveloffset=+1] + +include::modules/nodes-pods-autoscaling-creating-memory-percent.adoc[leveloffset=+2] + +include::modules/nodes-pods-autoscaling-creating-memory-specific.adoc[leveloffset=+2] include::modules/nodes-pods-autoscaling-status-about.adoc[leveloffset=+1] diff --git a/nodes/pods/nodes-pods-configuring.adoc b/nodes/pods/nodes-pods-configuring.adoc index 418bace672..e94d481858 100644 --- a/nodes/pods/nodes-pods-configuring.adoc +++ b/nodes/pods/nodes-pods-configuring.adoc @@ -26,7 +26,6 @@ include::modules/nodes-pods-pod-disruption-about.adoc[leveloffset=+1] include::modules/nodes-pods-pod-disruption-configuring.adoc[leveloffset=+2] -//tech preview feature ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] include::modules/pod-disruption-eviction-policy.adoc[leveloffset=+2] endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] diff --git a/snippets/nodes-pods-autoscaling-creating-cpu-prereqs.adoc b/snippets/nodes-pods-autoscaling-creating-cpu-prereqs.adoc new file mode 100644 index 0000000000..7f590eb0c3 --- /dev/null +++ b/snippets/nodes-pods-autoscaling-creating-cpu-prereqs.adoc @@ -0,0 +1,43 @@ +// Text snippet included in the following modules: +// +// * modules/nodes-pods-autoscaling-creating-cpu-specific.adoc +// * modules/nodes-pods-autoscaling-creating-cpu-percent.adoc + +:_mod-docs-content-type: SNIPPET + +// Prereqs for creating an HPA by using the CLI + +To use horizontal pod autoscalers, your cluster administrator must have properly configured cluster metrics. +You can use the `oc describe PodMetrics ` command to determine if metrics are configured. If metrics are +configured, the output appears similar to the following, with `Cpu` and `Memory` displayed under `Usage`. + +[source,terminal] +---- +$ oc describe PodMetrics openshift-kube-scheduler-ip-10-0-135-131.ec2.internal +---- + +.Example output +[source,text,options="nowrap"] +---- +Name: openshift-kube-scheduler-ip-10-0-135-131.ec2.internal +Namespace: openshift-kube-scheduler +Labels: +Annotations: +API Version: metrics.k8s.io/v1beta1 +Containers: + Name: wait-for-host-port + Usage: + Memory: 0 + Name: scheduler + Usage: + Cpu: 8m + Memory: 45440Ki +Kind: PodMetrics +Metadata: + Creation Timestamp: 2019-05-23T18:47:56Z + Self Link: /apis/metrics.k8s.io/v1beta1/namespaces/openshift-kube-scheduler/pods/openshift-kube-scheduler-ip-10-0-135-131.ec2.internal +Timestamp: 2019-05-23T18:47:56Z +Window: 1m0s +Events: +---- +