mirror of
https://github.com/openshift/openshift-docs.git
synced 2026-02-05 12:46:18 +01:00
OCP content port to ROSA and OSD: Nodes
This commit is contained in:
committed by
openshift-cherrypick-robot
parent
8bf6818d5a
commit
48e73088ac
@@ -611,6 +611,48 @@ Name: Nodes
|
||||
Dir: nodes
|
||||
Distros: openshift-dedicated
|
||||
Topics:
|
||||
- Name: Overview of nodes
|
||||
File: index
|
||||
- Name: Working with pods
|
||||
Dir: pods
|
||||
Topics:
|
||||
- Name: About pods
|
||||
File: nodes-pods-using
|
||||
- Name: Viewing pods
|
||||
File: nodes-pods-viewing
|
||||
- Name: Configuring a cluster for pods
|
||||
File: nodes-pods-configuring
|
||||
Distros: openshift-dedicated
|
||||
# Cannot create namespace to install VPA; revisit after Operator book converted
|
||||
# - Name: Automatically adjust pod resource levels with the vertical pod autoscaler
|
||||
# File: nodes-pods-vertical-autoscaler
|
||||
- Name: Providing sensitive data to pods
|
||||
File: nodes-pods-secrets
|
||||
- Name: Creating and using config maps
|
||||
File: nodes-pods-configmaps
|
||||
# Cannot create required "kubeletconfigs"
|
||||
# - Name: Using Device Manager to make devices available to nodes
|
||||
# File: nodes-pods-plugins
|
||||
# Distros: openshift-dedicated
|
||||
- Name: Including pod priority in pod scheduling decisions
|
||||
File: nodes-pods-priority
|
||||
Distros: openshift-dedicated
|
||||
- Name: Placing pods on specific nodes using node selectors
|
||||
File: nodes-pods-node-selectors
|
||||
Distros: openshift-dedicated
|
||||
# Cannot create namespace to install Run Once; revisit after Operator book converted
|
||||
# - Name: Run Once Duration Override Operator
|
||||
# Dir: run_once_duration_override
|
||||
# Distros: openshift-dedicated
|
||||
# Topics:
|
||||
# - Name: Run Once Duration Override Operator overview
|
||||
# File: index
|
||||
# - Name: Run Once Duration Override Operator release notes
|
||||
# File: run-once-duration-override-release-notes
|
||||
# - Name: Overriding the active deadline for run-once pods
|
||||
# File: run-once-duration-override-install
|
||||
# - Name: Uninstalling the Run Once Duration Override Operator
|
||||
# File: run-once-duration-override-uninstall
|
||||
- Name: Automatically scaling pods with the Custom Metrics Autoscaler Operator
|
||||
Dir: cma
|
||||
Distros: openshift-dedicated
|
||||
@@ -637,6 +679,161 @@ Topics:
|
||||
File: nodes-cma-autoscaling-custom-adding
|
||||
- Name: Removing the Custom Metrics Autoscaler Operator
|
||||
File: nodes-cma-autoscaling-custom-removing
|
||||
- Name: Controlling pod placement onto nodes (scheduling)
|
||||
Dir: scheduling
|
||||
Distros: openshift-dedicated
|
||||
Topics:
|
||||
- Name: About pod placement using the scheduler
|
||||
File: nodes-scheduler-about
|
||||
- Name: Placing pods relative to other pods using pod affinity and anti-affinity rules
|
||||
File: nodes-scheduler-pod-affinity
|
||||
- Name: Controlling pod placement on nodes using node affinity rules
|
||||
File: nodes-scheduler-node-affinity
|
||||
- Name: Placing pods onto overcommited nodes
|
||||
File: nodes-scheduler-overcommit
|
||||
- Name: Controlling pod placement using node taints
|
||||
File: nodes-scheduler-taints-tolerations
|
||||
- Name: Placing pods on specific nodes using node selectors
|
||||
File: nodes-scheduler-node-selectors
|
||||
- Name: Controlling pod placement using pod topology spread constraints
|
||||
File: nodes-scheduler-pod-topology-spread-constraints
|
||||
# - Name: Placing a pod on a specific node by name
|
||||
# File: nodes-scheduler-node-names
|
||||
# - Name: Placing a pod in a specific project
|
||||
# File: nodes-scheduler-node-projects
|
||||
# - Name: Keeping your cluster balanced using the descheduler
|
||||
# File: nodes-scheduler-descheduler
|
||||
- Name: Evicting pods using the descheduler
|
||||
File: nodes-descheduler
|
||||
- Name: Secondary scheduler
|
||||
Dir: secondary_scheduler
|
||||
Distros: openshift-enterprise
|
||||
Topics:
|
||||
- Name: Secondary scheduler overview
|
||||
File: index
|
||||
- Name: Secondary Scheduler Operator release notes
|
||||
File: nodes-secondary-scheduler-release-notes
|
||||
- Name: Scheduling pods using a secondary scheduler
|
||||
File: nodes-secondary-scheduler-configuring
|
||||
- Name: Uninstalling the Secondary Scheduler Operator
|
||||
File: nodes-secondary-scheduler-uninstalling
|
||||
- Name: Using Jobs and DaemonSets
|
||||
Dir: jobs
|
||||
Topics:
|
||||
- Name: Running background tasks on nodes automatically with daemonsets
|
||||
File: nodes-pods-daemonsets
|
||||
Distros: openshift-dedicated
|
||||
- Name: Running tasks in pods using jobs
|
||||
File: nodes-nodes-jobs
|
||||
- Name: Working with nodes
|
||||
Dir: nodes
|
||||
Distros: openshift-dedicated
|
||||
Topics:
|
||||
- Name: Viewing and listing the nodes in your cluster
|
||||
File: nodes-nodes-viewing
|
||||
# cannot use oc adm cordon; cannot patch resource "machinesets"; cannot patch resource "nodes"
|
||||
# - Name: Working with nodes
|
||||
# File: nodes-nodes-working
|
||||
# cannot create resource "kubeletconfigs", "schedulers", "machineconfigs", "kubeletconfigs"
|
||||
# - Name: Managing nodes
|
||||
# File: nodes-nodes-managing
|
||||
# cannot create resource "kubeletconfigs"
|
||||
# - Name: Managing graceful node shutdown
|
||||
# File: nodes-nodes-graceful-shutdown
|
||||
# cannot create resource "kubeletconfigs"
|
||||
# - Name: Managing the maximum number of pods per node
|
||||
# File: nodes-nodes-managing-max-pods
|
||||
- Name: Using the Node Tuning Operator
|
||||
File: nodes-node-tuning-operator
|
||||
- Name: Remediating, fencing, and maintaining nodes
|
||||
File: nodes-remediating-fencing-maintaining-rhwa
|
||||
# Cannot create namespace needed to oc debug and reboot; revisit after Operator book converted
|
||||
# - Name: Understanding node rebooting
|
||||
# File: nodes-nodes-rebooting
|
||||
# cannot create resource "kubeletconfigs"
|
||||
# - Name: Freeing node resources using garbage collection
|
||||
# File: nodes-nodes-garbage-collection
|
||||
# cannot create resource "kubeletconfigs"
|
||||
# - Name: Allocating resources for nodes
|
||||
# File: nodes-nodes-resources-configuring
|
||||
# cannot create resource "kubeletconfigs"
|
||||
# - Name: Allocating specific CPUs for nodes in a cluster
|
||||
# File: nodes-nodes-resources-cpus
|
||||
# cannot create resource "kubeletconfigs"
|
||||
# - Name: Configuring the TLS security profile for the kubelet
|
||||
# File: nodes-nodes-tls
|
||||
# Distros: openshift-dedicated
|
||||
# - Name: Monitoring for problems in your nodes
|
||||
# File: nodes-nodes-problem-detector
|
||||
- Name: Machine Config Daemon metrics
|
||||
File: nodes-nodes-machine-config-daemon-metrics
|
||||
# cannot patch resource "nodes"
|
||||
# - Name: Creating infrastructure nodes
|
||||
# File: nodes-nodes-creating-infrastructure-nodes
|
||||
- Name: Working with containers
|
||||
Dir: containers
|
||||
Topics:
|
||||
- Name: Understanding containers
|
||||
File: nodes-containers-using
|
||||
- Name: Using Init Containers to perform tasks before a pod is deployed
|
||||
File: nodes-containers-init
|
||||
Distros: openshift-dedicated
|
||||
- Name: Using volumes to persist container data
|
||||
File: nodes-containers-volumes
|
||||
- Name: Mapping volumes using projected volumes
|
||||
File: nodes-containers-projected-volumes
|
||||
- Name: Allowing containers to consume API objects
|
||||
File: nodes-containers-downward-api
|
||||
- Name: Copying files to or from a container
|
||||
File: nodes-containers-copying-files
|
||||
- Name: Executing remote commands in a container
|
||||
File: nodes-containers-remote-commands
|
||||
- Name: Using port forwarding to access applications in a container
|
||||
File: nodes-containers-port-forwarding
|
||||
# cannot patch resource "configmaps"
|
||||
# - Name: Using sysctls in containers
|
||||
# File: nodes-containers-sysctls
|
||||
- Name: Working with clusters
|
||||
Dir: clusters
|
||||
Topics:
|
||||
- Name: Viewing system event information in a cluster
|
||||
File: nodes-containers-events
|
||||
- Name: Analyzing cluster resource levels
|
||||
File: nodes-cluster-resource-levels
|
||||
Distros: openshift-dedicated
|
||||
- Name: Setting limit ranges
|
||||
File: nodes-cluster-limit-ranges
|
||||
- Name: Configuring cluster memory to meet container memory and risk requirements
|
||||
File: nodes-cluster-resource-configure
|
||||
Distros: openshift-dedicated
|
||||
- Name: Configuring your cluster to place pods on overcommited nodes
|
||||
File: nodes-cluster-overcommit
|
||||
Distros: openshift-dedicated
|
||||
- Name: Configuring the Linux cgroup version on your nodes
|
||||
File: nodes-cluster-cgroups-2
|
||||
Distros: openshift-enterprise
|
||||
- Name: Configuring the Linux cgroup version on your nodes
|
||||
File: nodes-cluster-cgroups-okd
|
||||
Distros: openshift-origin
|
||||
# The TechPreviewNoUpgrade Feature Gate is not allowed
|
||||
# - Name: Enabling features using FeatureGates
|
||||
# File: nodes-cluster-enabling-features
|
||||
# Distros: openshift-rosa
|
||||
# Error: nodes.config.openshift.io "cluster" could not be patched
|
||||
# - Name: Improving cluster stability in high latency environments using worker latency profiles
|
||||
# File: nodes-cluster-worker-latency-profiles
|
||||
# Not supported per Michael McNeill
|
||||
#- Name: Remote worker nodes on the network edge
|
||||
# Dir: edge
|
||||
# Topics:
|
||||
# - Name: Using remote worker node at the network edge
|
||||
# File: nodes-edge-remote-workers
|
||||
# Not supported per Michael McNeill
|
||||
#- Name: Worker nodes for single-node OpenShift clusters
|
||||
# Dir: nodes
|
||||
# Topics:
|
||||
# - Name: Adding worker nodes to single-node OpenShift clusters
|
||||
# File: nodes-sno-worker-nodes
|
||||
---
|
||||
Name: Logging
|
||||
Dir: logging
|
||||
|
||||
@@ -36,7 +36,7 @@ Name: What's new
|
||||
Dir: rosa_release_notes
|
||||
Distros: openshift-rosa
|
||||
Topics:
|
||||
- Name: What's new with ROSA
|
||||
- Name: What's new with Red Hat OpenShift Service on AWS
|
||||
File: rosa-release-notes
|
||||
---
|
||||
Name: Introduction to ROSA
|
||||
@@ -779,6 +779,48 @@ Name: Nodes
|
||||
Dir: nodes
|
||||
Distros: openshift-rosa
|
||||
Topics:
|
||||
- Name: Overview of nodes
|
||||
File: index
|
||||
- Name: Working with pods
|
||||
Dir: pods
|
||||
Topics:
|
||||
- Name: About pods
|
||||
File: nodes-pods-using
|
||||
- Name: Viewing pods
|
||||
File: nodes-pods-viewing
|
||||
- Name: Configuring a cluster for pods
|
||||
File: nodes-pods-configuring
|
||||
Distros: openshift-rosa
|
||||
# Cannot create namespace to install VPA; revisit after Operator book converted
|
||||
# - Name: Automatically adjust pod resource levels with the vertical pod autoscaler
|
||||
# File: nodes-pods-vertical-autoscaler
|
||||
- Name: Providing sensitive data to pods
|
||||
File: nodes-pods-secrets
|
||||
- Name: Creating and using config maps
|
||||
File: nodes-pods-configmaps
|
||||
# Cannot create required kubeletconfigs
|
||||
# - Name: Using Device Manager to make devices available to nodes
|
||||
# File: nodes-pods-plugins
|
||||
# Distros: openshift-rosa
|
||||
- Name: Including pod priority in pod scheduling decisions
|
||||
File: nodes-pods-priority
|
||||
Distros: openshift-rosa
|
||||
- Name: Placing pods on specific nodes using node selectors
|
||||
File: nodes-pods-node-selectors
|
||||
Distros: openshift-rosa
|
||||
# Cannot create namespace to install Run Once; revisit after Operator book converted
|
||||
# - Name: Run Once Duration Override Operator
|
||||
# Dir: run_once_duration_override
|
||||
# Distros: openshift-rosa
|
||||
# Topics:
|
||||
# - Name: Run Once Duration Override Operator overview
|
||||
# File: index
|
||||
# - Name: Run Once Duration Override Operator release notes
|
||||
# File: run-once-duration-override-release-notes
|
||||
# - Name: Overriding the active deadline for run-once pods
|
||||
# File: run-once-duration-override-install
|
||||
# - Name: Uninstalling the Run Once Duration Override Operator
|
||||
# File: run-once-duration-override-uninstall
|
||||
- Name: Automatically scaling pods with the Custom Metrics Autoscaler Operator
|
||||
Dir: cma
|
||||
Distros: openshift-rosa
|
||||
@@ -805,6 +847,164 @@ Topics:
|
||||
File: nodes-cma-autoscaling-custom-adding
|
||||
- Name: Removing the Custom Metrics Autoscaler Operator
|
||||
File: nodes-cma-autoscaling-custom-removing
|
||||
- Name: Controlling pod placement onto nodes (scheduling)
|
||||
Dir: scheduling
|
||||
Distros: openshift-rosa
|
||||
Topics:
|
||||
- Name: About pod placement using the scheduler
|
||||
File: nodes-scheduler-about
|
||||
- Name: Placing pods relative to other pods using pod affinity and anti-affinity rules
|
||||
File: nodes-scheduler-pod-affinity
|
||||
- Name: Controlling pod placement on nodes using node affinity rules
|
||||
File: nodes-scheduler-node-affinity
|
||||
- Name: Placing pods onto overcommited nodes
|
||||
File: nodes-scheduler-overcommit
|
||||
- Name: Controlling pod placement using node taints
|
||||
File: nodes-scheduler-taints-tolerations
|
||||
- Name: Placing pods on specific nodes using node selectors
|
||||
File: nodes-scheduler-node-selectors
|
||||
- Name: Controlling pod placement using pod topology spread constraints
|
||||
File: nodes-scheduler-pod-topology-spread-constraints
|
||||
# - Name: Placing a pod on a specific node by name
|
||||
# File: nodes-scheduler-node-names
|
||||
# - Name: Placing a pod in a specific project
|
||||
# File: nodes-scheduler-node-projects
|
||||
# - Name: Keeping your cluster balanced using the descheduler
|
||||
# File: nodes-scheduler-descheduler
|
||||
# Cannot create namespace to install Desceduler Operator; revisit after Operator book converted
|
||||
# - Name: Evicting pods using the descheduler
|
||||
# File: nodes-descheduler
|
||||
# Cannot create namespace to install Secondary Scheduler Operator; revisit after Operator book converted
|
||||
# - Name: Secondary scheduler
|
||||
# Dir: secondary_scheduler
|
||||
# Distros: openshift-enterprise
|
||||
# Topics:
|
||||
# - Name: Secondary scheduler overview
|
||||
# File: index
|
||||
# - Name: Secondary Scheduler Operator release notes
|
||||
# File: nodes-secondary-scheduler-release-notes
|
||||
# - Name: Scheduling pods using a secondary scheduler
|
||||
# File: nodes-secondary-scheduler-configuring
|
||||
# - Name: Uninstalling the Secondary Scheduler Operator
|
||||
# File: nodes-secondary-scheduler-uninstalling
|
||||
- Name: Using Jobs and DaemonSets
|
||||
Dir: jobs
|
||||
Topics:
|
||||
- Name: Running background tasks on nodes automatically with daemonsets
|
||||
File: nodes-pods-daemonsets
|
||||
Distros: openshift-rosa
|
||||
- Name: Running tasks in pods using jobs
|
||||
File: nodes-nodes-jobs
|
||||
- Name: Working with nodes
|
||||
Dir: nodes
|
||||
Distros: openshift-rosa
|
||||
Topics:
|
||||
- Name: Viewing and listing the nodes in your cluster
|
||||
File: nodes-nodes-viewing
|
||||
# cannot use oc adm cordon; cannot patch resource "machinesets"; cannot patch resource "nodes"
|
||||
# - Name: Working with nodes
|
||||
# File: nodes-nodes-working
|
||||
# cannot create resource "kubeletconfigs", "schedulers", "machineconfigs", "kubeletconfigs"
|
||||
# - Name: Managing nodes
|
||||
# File: nodes-nodes-managing
|
||||
# cannot create resource "kubeletconfigs"
|
||||
# - Name: Managing graceful node shutdown
|
||||
# File: nodes-nodes-graceful-shutdown
|
||||
# cannot create resource "kubeletconfigs"
|
||||
# - Name: Managing the maximum number of pods per node
|
||||
# File: nodes-nodes-managing-max-pods
|
||||
- Name: Using the Node Tuning Operator
|
||||
File: nodes-node-tuning-operator
|
||||
- Name: Remediating, fencing, and maintaining nodes
|
||||
File: nodes-remediating-fencing-maintaining-rhwa
|
||||
# Cannot create namespace needed to oc debug and reboot; revisit after Operator book converted
|
||||
# - Name: Understanding node rebooting
|
||||
# File: nodes-nodes-rebooting
|
||||
# cannot create resource "kubeletconfigs"
|
||||
# - Name: Freeing node resources using garbage collection
|
||||
# File: nodes-nodes-garbage-collection
|
||||
# cannot create resource "kubeletconfigs"
|
||||
# - Name: Allocating resources for nodes
|
||||
# File: nodes-nodes-resources-configuring
|
||||
# cannot create resource "kubeletconfigs"
|
||||
# - Name: Allocating specific CPUs for nodes in a cluster
|
||||
# File: nodes-nodes-resources-cpus
|
||||
# cannot create resource "kubeletconfigs"
|
||||
# - Name: Configuring the TLS security profile for the kubelet
|
||||
# File: nodes-nodes-tls
|
||||
# Distros: openshift-rosa
|
||||
# - Name: Monitoring for problems in your nodes
|
||||
# File: nodes-nodes-problem-detector
|
||||
- Name: Machine Config Daemon metrics
|
||||
File: nodes-nodes-machine-config-daemon-metrics
|
||||
# cannot patch resource "nodes"
|
||||
# - Name: Creating infrastructure nodes
|
||||
# File: nodes-nodes-creating-infrastructure-nodes
|
||||
- Name: Working with containers
|
||||
Dir: containers
|
||||
Topics:
|
||||
- Name: Understanding containers
|
||||
File: nodes-containers-using
|
||||
- Name: Using Init Containers to perform tasks before a pod is deployed
|
||||
File: nodes-containers-init
|
||||
Distros: openshift-rosa
|
||||
- Name: Using volumes to persist container data
|
||||
File: nodes-containers-volumes
|
||||
- Name: Mapping volumes using projected volumes
|
||||
File: nodes-containers-projected-volumes
|
||||
- Name: Allowing containers to consume API objects
|
||||
File: nodes-containers-downward-api
|
||||
- Name: Copying files to or from a container
|
||||
File: nodes-containers-copying-files
|
||||
- Name: Executing remote commands in a container
|
||||
File: nodes-containers-remote-commands
|
||||
- Name: Using port forwarding to access applications in a container
|
||||
File: nodes-containers-port-forwarding
|
||||
# cannot patch resource "configmaps"
|
||||
# - Name: Using sysctls in containers
|
||||
# File: nodes-containers-sysctls
|
||||
- Name: Working with clusters
|
||||
Dir: clusters
|
||||
Topics:
|
||||
- Name: Viewing system event information in a cluster
|
||||
File: nodes-containers-events
|
||||
- Name: Analyzing cluster resource levels
|
||||
File: nodes-cluster-resource-levels
|
||||
Distros: openshift-rosa
|
||||
- Name: Setting limit ranges
|
||||
File: nodes-cluster-limit-ranges
|
||||
- Name: Configuring cluster memory to meet container memory and risk requirements
|
||||
File: nodes-cluster-resource-configure
|
||||
Distros: openshift-rosa
|
||||
- Name: Configuring your cluster to place pods on overcommited nodes
|
||||
File: nodes-cluster-overcommit
|
||||
Distros: openshift-rosa
|
||||
- Name: Configuring the Linux cgroup version on your nodes
|
||||
File: nodes-cluster-cgroups-2
|
||||
Distros: openshift-enterprise
|
||||
- Name: Configuring the Linux cgroup version on your nodes
|
||||
File: nodes-cluster-cgroups-okd
|
||||
Distros: openshift-origin
|
||||
# The TechPreviewNoUpgrade Feature Gate is not allowed
|
||||
# - Name: Enabling features using FeatureGates
|
||||
# File: nodes-cluster-enabling-features
|
||||
# Distros: openshift-rosa
|
||||
# Error: nodes.config.openshift.io "cluster" could not be patched
|
||||
# - Name: Improving cluster stability in high latency environments using worker latency profiles
|
||||
# File: nodes-cluster-worker-latency-profiles
|
||||
# Not supported per Michael McNeill
|
||||
#- Name: Remote worker nodes on the network edge
|
||||
# Dir: edge
|
||||
# Topics:
|
||||
# - Name: Using remote worker node at the network edge
|
||||
# File: nodes-edge-remote-workers
|
||||
# Not supported per Michael McNeill
|
||||
#- Name: Worker nodes for single-node OpenShift clusters
|
||||
# Dir: nodes
|
||||
# Distros: openshift-rosa
|
||||
# Topics:
|
||||
# - Name: Adding worker nodes to single-node OpenShift clusters
|
||||
# File: nodes-sno-worker-nodes
|
||||
---
|
||||
Name: Logging
|
||||
Dir: logging
|
||||
|
||||
@@ -13,7 +13,15 @@ When enabled, overcommitment can be disabled per-project. For example, you can a
|
||||
|
||||
To disable overcommitment in a project:
|
||||
|
||||
. Edit the namespace object to add the following annotation:
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
. Create or edit the namespace object file.
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
// Invalid value: "false": field is immutable, try updating the namespace
|
||||
ifdef::openshift-rosa,openshift-dedicated[]
|
||||
. Edit the namespace object file.
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
|
||||
. Add the following annotation:
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
|
||||
@@ -30,7 +30,12 @@ The following Operators monitor the changes to the worker latency profiles and r
|
||||
* The Kubernetes Controller Manager Operator updates the `node-monitor-grace-period` parameter on the control plane nodes.
|
||||
* The Kubernetes API Server Operator updates the `default-not-ready-toleration-seconds` and `default-unreachable-toleration-seconds` parameters on the control plance nodes.
|
||||
|
||||
While the default configuration works in most cases, {product-title} offers two other worker latency profiles for situations where the network is experiencing higher latency than usual. The three worker latency profiles are described in the following sections:
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
Although the default configuration works in most cases, {product-title} offers two other worker latency profiles for situations where the network is experiencing higher latency than usual. The three worker latency profiles are described in the following sections:
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
ifdef::openshift-rosa,openshift-dedicated[]
|
||||
Although the default configuration works in most cases, {product-title} offers a second worker latency profile for situations where the network is experiencing higher latency than usual. The two worker latency profiles are described in the following sections:
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
|
||||
Default worker latency profile:: With the `Default` profile, each kubelet reports its node status to the Kubelet Controller Manager Operator (kube controller) every 10 seconds. The Kubelet Controller Manager Operator checks the kubelet for a status every 5 seconds.
|
||||
+
|
||||
@@ -88,6 +93,7 @@ The Kubernetes Controller Manager Operator waits for 2 minutes to consider a nod
|
||||
|
||||
|===
|
||||
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
Low worker latency profile:: Use the `LowUpdateSlowReaction` profile if the network latency is extremely high.
|
||||
+
|
||||
The `LowUpdateSlowReaction` profile reduces the frequency of kubelet updates to 1 minute and changes the period that the Kubernetes Controller Manager Operator waits for those updates to 5 minutes. The pod eviction period for a pod on that node is reduced to 60 seconds. If the pod has the `tolerationSeconds` parameter, the eviction waits for the period specified by that parameter.
|
||||
@@ -116,4 +122,4 @@ The Kubernetes Controller Manager Operator waits for 5 minutes to consider a nod
|
||||
| 60s
|
||||
|
||||
|===
|
||||
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
|
||||
@@ -136,5 +136,5 @@ $ oc get KubeControllerManager -o yaml | grep -i workerlatency -A 5 -B 5
|
||||
----
|
||||
<1> Specifies that the profile is applied and active.
|
||||
|
||||
To change the low profile to medium or change the medium to low, edit the `node.config` object and set the `spec.workerLatencyProfile` parameter to the appropriate value.
|
||||
To change the medium profile to default or change the default to medium, edit the `node.config` object and set the `spec.workerLatencyProfile` parameter to the appropriate value.
|
||||
|
||||
|
||||
@@ -100,6 +100,7 @@ to configure history limits so that old jobs and their pods are properly cleaned
|
||||
|
||||
* `.spec.failedJobsHistoryLimit`. The number of failed finished jobs to retain (defaults to 1).
|
||||
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
[TIP]
|
||||
====
|
||||
* Delete cron jobs that you no longer need:
|
||||
@@ -113,6 +114,7 @@ Doing this prevents them from generating unnecessary artifacts.
|
||||
|
||||
* You can suspend further executions by setting the `spec.suspend` to true. All subsequent executions are suspended until you reset to `false`.
|
||||
====
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
|
||||
[id="jobs-limits_{context}"]
|
||||
== Known limitations
|
||||
|
||||
@@ -14,6 +14,7 @@ To create a cron job:
|
||||
|
||||
. Create a YAML file similar to the following:
|
||||
+
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: batch/v1
|
||||
@@ -61,6 +62,52 @@ all subsequent executions will be suspended.
|
||||
<8> Job template. This is similar to the job example.
|
||||
<9> Sets a label for jobs spawned by this cron job.
|
||||
<10> The restart policy of the pod. This does not apply to the job controller.
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
ifdef::openshift-rosa,openshift-dedicated[]
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: pi
|
||||
spec:
|
||||
schedule: "*/1 * * * *" <1>
|
||||
concurrencyPolicy: "Replace" <2>
|
||||
startingDeadlineSeconds: 200 <3>
|
||||
suspend: true <4>
|
||||
successfulJobsHistoryLimit: 3 <5>
|
||||
failedJobsHistoryLimit: 1 <6>
|
||||
jobTemplate: <7>
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels: <8>
|
||||
parent: "cronjobpi"
|
||||
spec:
|
||||
containers:
|
||||
- name: pi
|
||||
image: perl
|
||||
command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
|
||||
restartPolicy: OnFailure <9>
|
||||
----
|
||||
+
|
||||
<1> Schedule for the job specified in link:https://en.wikipedia.org/wiki/Cron[cron format]. In this example, the job will run every minute.
|
||||
<2> An optional concurrency policy, specifying how to treat concurrent jobs within a cron job. Only one of the following concurrent policies may be specified. If not specified, this defaults to allowing concurrent executions.
|
||||
* `Allow` allows cron jobs to run concurrently.
|
||||
* `Forbid` forbids concurrent runs, skipping the next run if the previous has not
|
||||
finished yet.
|
||||
* `Replace` cancels the currently running job and replaces
|
||||
it with a new one.
|
||||
<3> An optional deadline (in seconds) for starting the job if it misses its
|
||||
scheduled time for any reason. Missed jobs executions will be counted as failed
|
||||
ones. If not specified, there is no deadline.
|
||||
<4> An optional flag allowing the suspension of a cron job. If set to `true`,
|
||||
all subsequent executions will be suspended.
|
||||
<5> The number of successful finished jobs to retain (defaults to 3).
|
||||
<6> The number of failed finished jobs to retain (defaults to 1).
|
||||
<7> Job template. This is similar to the job example.
|
||||
<8> Sets a label for jobs spawned by this cron job.
|
||||
<9> The restart policy of the pod. This does not apply to the job controller.
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
@@ -69,6 +116,7 @@ These fields specify how many completed and failed jobs should be kept. By defa
|
||||
set to `3` and `1` respectively. Setting a limit to `0` corresponds to keeping none of the corresponding
|
||||
kind of jobs after they finish.
|
||||
====
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
|
||||
. Create the cron job:
|
||||
+
|
||||
|
||||
@@ -36,12 +36,14 @@ metadata:
|
||||
----
|
||||
====
|
||||
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
* If you are creating a new project, overwrite the default node selector:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc adm new-project <name> --node-selector=""
|
||||
----
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
|
||||
.Procedure
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ You cannot add a priority class directly to an existing scheduled pod.
|
||||
|
||||
To configure your cluster to use priority and preemption:
|
||||
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
. Create one or more priority classes:
|
||||
|
||||
.. Create a YAML file similar to the following:
|
||||
@@ -47,9 +48,10 @@ $ oc create -f <file-name>.yaml
|
||||
----
|
||||
|
||||
. Create a pod spec to include the name of a priority class:
|
||||
|
||||
// ROSA/OSD cannot create new priority classes. Must use the defaults.
|
||||
.. Create a YAML file similar to the following:
|
||||
+
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: v1
|
||||
@@ -66,6 +68,25 @@ spec:
|
||||
priorityClassName: high-priority <1>
|
||||
----
|
||||
<1> Specify the priority class to use with this pod.
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
ifdef::openshift-rosa,openshift-dedicated[]
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx
|
||||
labels:
|
||||
env: test
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
imagePullPolicy: IfNotPresent
|
||||
priorityClassName: system-cluster-critical <1>
|
||||
----
|
||||
<1> Specify the priority class to use with this pod.
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
|
||||
.. Create the pod:
|
||||
+
|
||||
@@ -73,5 +94,35 @@ spec:
|
||||
----
|
||||
$ oc create -f <file-name>.yaml
|
||||
----
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
|
||||
ifdef::openshift-rosa,openshift-dedicated[]
|
||||
// ROSA/OSD cannot create new priority classes. Must use the defaults.
|
||||
. Define a pod spec to include the name of a priority class by creating a YAML file similar to the following:
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx
|
||||
labels:
|
||||
env: test
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
imagePullPolicy: IfNotPresent
|
||||
priorityClassName: system-cluster-critical <1>
|
||||
----
|
||||
<1> Specify the priority class to use with this pod.
|
||||
|
||||
. Create the pod:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc create -f <file-name>.yaml
|
||||
----
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
+
|
||||
You can add the priority name directly to the pod configuration or to a pod template.
|
||||
|
||||
@@ -12,12 +12,14 @@ Preferred rules specify that, if the rule is met, the scheduler tries to enforce
|
||||
|
||||
The following steps demonstrate a simple configuration that creates a node and a pod that the scheduler tries to place on the node.
|
||||
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
. Add a label to a node using the `oc label node` command:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc label node node1 e2e-az-name=e2e-az3
|
||||
----
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
|
||||
. Create a pod with a specific label:
|
||||
+
|
||||
|
||||
@@ -12,6 +12,7 @@ Required rules *must* be met before a pod can be scheduled on a node.
|
||||
|
||||
The following steps demonstrate a simple configuration that creates a node and a pod that the scheduler is required to place on the node.
|
||||
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
. Add a label to a node using the `oc label node` command:
|
||||
+
|
||||
[source,terminal]
|
||||
@@ -34,6 +35,7 @@ metadata:
|
||||
#...
|
||||
----
|
||||
====
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
|
||||
. Create a pod with a specific label in the pod spec:
|
||||
+
|
||||
|
||||
@@ -35,7 +35,7 @@ $ oc describe pod router-default-66d5cf9464-7pwkc
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
#...
|
||||
# ...
|
||||
Name: router-default-66d5cf9464-7pwkc
|
||||
Namespace: openshift-ingress
|
||||
# ...
|
||||
@@ -64,6 +64,7 @@ metadata:
|
||||
|
||||
.Procedure
|
||||
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
. Add labels to a node by using a compute machine set or editing the node directly:
|
||||
|
||||
* Use a `MachineSet` object to add labels to nodes managed by the compute machine set when a node is created:
|
||||
@@ -100,7 +101,7 @@ spec:
|
||||
labels:
|
||||
region: "east"
|
||||
type: "user-node"
|
||||
#...
|
||||
# ...
|
||||
----
|
||||
====
|
||||
|
||||
@@ -164,7 +165,7 @@ metadata:
|
||||
labels:
|
||||
type: "user-node"
|
||||
region: "east"
|
||||
#...
|
||||
# ...
|
||||
----
|
||||
====
|
||||
|
||||
@@ -207,7 +208,7 @@ spec:
|
||||
kubernetes.io/os: linux
|
||||
node-role.kubernetes.io/worker: ''
|
||||
type: user-node <1>
|
||||
#...
|
||||
# ...
|
||||
----
|
||||
<1> Add the node selector.
|
||||
|
||||
@@ -220,15 +221,68 @@ apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: hello-node-6fbccf8d9
|
||||
#...
|
||||
# ...
|
||||
spec:
|
||||
nodeSelector:
|
||||
region: east
|
||||
type: user-node
|
||||
#...
|
||||
# ...
|
||||
----
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
You cannot add a node selector directly to an existing scheduled pod.
|
||||
====
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
ifdef::openshift-rosa,openshift-dedicated[]
|
||||
* Add the matching node selector to a pod:
|
||||
+
|
||||
** To add a node selector to existing and future pods, add a node selector to the controlling object for the pods:
|
||||
+
|
||||
.Example `ReplicaSet` object with labels
|
||||
[source,yaml]
|
||||
----
|
||||
kind: ReplicaSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: hello-node-6fbccf8d9
|
||||
# ...
|
||||
spec:
|
||||
# ...
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default
|
||||
pod-template-hash: 66d5cf9464
|
||||
spec:
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
node-role.kubernetes.io/worker: ''
|
||||
type: user-node <1>
|
||||
# ...
|
||||
----
|
||||
<1> Add the node selector.
|
||||
|
||||
** To add a node selector to a specific, new pod, add the selector to the `Pod` object directly:
|
||||
+
|
||||
.Example `Pod` object with a node selector
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: hello-node-6fbccf8d9
|
||||
# ...
|
||||
spec:
|
||||
nodeSelector:
|
||||
region: east
|
||||
type: user-node
|
||||
# ...
|
||||
----
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
You cannot add a node selector directly to an existing scheduled pod.
|
||||
====
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
|
||||
@@ -24,12 +24,21 @@ The scheduler attempts to optimize the compute resource use across all nodes
|
||||
in your cluster. It places pods onto specific nodes, taking the pods' compute
|
||||
resource requests and nodes' available capacity into consideration.
|
||||
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
{product-title} administrators can control the level of overcommit and manage
|
||||
container density on nodes. You can configure cluster-level overcommit using
|
||||
the xref:#nodes-cluster-resource-override_nodes-cluster-overcommit[ClusterResourceOverride Operator]
|
||||
to override the ratio between requests and limits set on developer containers.
|
||||
In conjunction with xref:#nodes-cluster-node-overcommit_nodes-cluster-overcommit[node overcommit] and
|
||||
xref:../../applications/deployments/managing-deployment-processes.adoc#deployments-setting-resources_deployment-operations[project memory and CPU limits and defaults], you can adjust the resource limit and request to achieve the desired level of overcommit.
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
ifdef::openshift-rosa,openshift-dedicated[]
|
||||
{product-title} administrators can control the level of overcommit and manage
|
||||
container density on nodes. You can configure cluster-level overcommit using
|
||||
the xref:#nodes-cluster-resource-override_nodes-cluster-overcommit[ClusterResourceOverride Operator]
|
||||
to override the ratio between requests and limits set on developer containers.
|
||||
In conjunction with xref:#nodes-cluster-node-overcommit_nodes-cluster-overcommit[node overcommit], you can adjust the resource limit and request to achieve the desired level of overcommit.
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
@@ -73,9 +82,11 @@ include::modules/nodes-cluster-project-overcommit.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/nodes-cluster-overcommit-project-disable.adoc[leveloffset=+2]
|
||||
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
[role="_additional-resources"]
|
||||
[id="nodes-cluster-overcommit-addtl-resources"]
|
||||
== Additional resources
|
||||
|
||||
* xref:../../applications/deployments/managing-deployment-processes.adoc#deployments-triggers_deployment-operations[Setting deployment resources].
|
||||
* xref:../../nodes/nodes/nodes-nodes-resources-configuring.adoc#nodes-nodes-resources-configuring-setting_nodes-nodes-resources-configuring[Allocating resources for nodes].
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
|
||||
@@ -49,6 +49,7 @@ A _container engine_ is a piece of software that processes user requests, includ
|
||||
The {product-title} documentation uses the term _container runtime_ to refer to the lower-level container runtime. Other documentation can refer to the container engine as the container runtime.
|
||||
====
|
||||
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
{product-title} uses CRI-O as the container engine and runC or crun as the container runtime. The default container runtime is runC. Both container runtimes adhere to the link:https://www.opencontainers.org/[Open Container Initiative (OCI)] runtime specifications.
|
||||
|
||||
include::snippets/about-crio-snippet.adoc[]
|
||||
@@ -70,4 +71,8 @@ runC has some benefits over crun, including:
|
||||
You can move between the two container runtimes as needed.
|
||||
|
||||
For information on setting which container runtime to use, see xref:../../post_installation_configuration/machine-configuration-tasks.adoc#create-a-containerruntimeconfig_post-install-machine-configuration-tasks[Creating a `ContainerRuntimeConfig` CR to edit CRI-O parameters].
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
|
||||
ifdef::openshift-rosa,openshift-dedicated[]
|
||||
{product-title} uses CRI-O as the container engine and runC or crun as the container runtime. The default container runtime is runC.
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
|
||||
@@ -38,6 +38,7 @@ The read operations allow an administrator or a developer to get information abo
|
||||
* Get information about a node, such as memory and CPU usage, health, status, and age.
|
||||
* xref:../nodes/nodes/nodes-nodes-viewing.adoc#nodes-nodes-viewing-listing-pods_nodes-nodes-viewing[List pods running on a node].
|
||||
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
[discrete]
|
||||
=== Management operations
|
||||
|
||||
@@ -51,6 +52,7 @@ through several tasks:
|
||||
* xref:../nodes/nodes/nodes-nodes-managing-max-pods.adoc#nodes-nodes-managing-max-pods-proc_nodes-nodes-managing-max-pods[Configure the number of pods that can run on a node] based on the number of processor cores on the node, a hard limit, or both.
|
||||
* Reboot a node gracefully using xref:../nodes/nodes/nodes-nodes-rebooting.adoc#nodes-nodes-rebooting-affinity_nodes-nodes-rebooting[pod anti-affinity].
|
||||
* xref:../nodes/nodes/nodes-nodes-working.adoc#deleting-nodes[Delete a node from a cluster] by scaling down the cluster using a compute machine set. To delete a node from a bare-metal cluster, you must first drain all pods on the node and then manually delete the node.
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
|
||||
[discrete]
|
||||
=== Enhancement operations
|
||||
@@ -58,12 +60,15 @@ through several tasks:
|
||||
{product-title} allows you to do more than just access and manage nodes; as an administrator, you can perform the following tasks on nodes to make the cluster more efficient, application-friendly, and to provide a better environment for your developers.
|
||||
|
||||
* Manage node-level tuning for high-performance applications that require some level of kernel tuning by xref:../nodes/nodes/nodes-node-tuning-operator.adoc#nodes-node-tuning-operator[using the Node Tuning Operator].
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
* Enable TLS security profiles on the node to protect communication between the kubelet and the Kubernetes API server.
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
* xref:../nodes/jobs/nodes-pods-daemonsets.adoc#nodes-pods-daemonsets[Run background tasks on nodes automatically with daemon sets]. You can create and use daemon sets to create shared storage, run a logging pod on every node, or deploy a monitoring agent on all nodes.
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
* xref:../nodes/nodes/nodes-nodes-garbage-collection.adoc#nodes-nodes-garbage-collection[Free node resources using garbage collection]. You can ensure that your nodes are running efficiently by removing terminated containers and the images not referenced by any running pods.
|
||||
* xref:../nodes/nodes/nodes-nodes-managing.adoc#nodes-nodes-kernel-arguments_nodes-nodes-managing[Add kernel arguments to a set of nodes].
|
||||
* Configure an {product-title} cluster to have worker nodes at the network edge (remote worker nodes). For information on the challenges of having remote worker nodes in an {product-title} cluster and some recommended approaches for managing pods on a remote worker node, see xref:../nodes/edge/nodes-edge-remote-workers.adoc#nodes-edge-remote-workers[Using remote worker nodes at the network edge].
|
||||
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
|
||||
[id="pods-overview"]
|
||||
== About pods
|
||||
@@ -88,8 +93,11 @@ The following list of tasks provides an overview of how an administrator can man
|
||||
** xref:../nodes/scheduling/nodes-scheduler-node-selectors.adoc#nodes-scheduler-node-selectors[Node labels and selectors].
|
||||
** xref:../nodes/scheduling/nodes-scheduler-taints-tolerations.adoc#nodes-scheduler-taints-tolerations[Taints and tolerations].
|
||||
** xref:../nodes/scheduling/nodes-scheduler-pod-topology-spread-constraints.adoc#nodes-scheduler-pod-topology-spread-constraints[Pod topology spread constraints].
|
||||
// Cannot create namespace to install Operator
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
** xref:../nodes/scheduling/secondary_scheduler/index.adoc#nodes-secondary-scheduler-about[Secondary scheduling].
|
||||
* xref:../nodes/scheduling/nodes-descheduler.adoc#nodes-descheduler[Configure the descheduler to evict pods] based on specific strategies so that the scheduler reschedules the pods to more appropriate nodes.
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
* xref:../nodes/pods/nodes-pods-configuring.adoc#nodes-pods-configuring-restart_nodes-pods-configuring[Configure how pods behave after a restart using pod controllers and restart policies].
|
||||
* xref:../nodes/pods/nodes-pods-configuring.adoc#nodes-pods-configuring-bandwidth_nodes-pods-configuring[Limit both egress and ingress traffic on a pod].
|
||||
* xref:../nodes/containers/nodes-containers-volumes.adoc#nodes-containers-volumes[Add and remove volumes to and from any object that has a pod template]. A volume is a mounted file system available to all the containers in a pod. Container storage is ephemeral; you can use volumes to persist container data.
|
||||
@@ -99,7 +107,7 @@ The following list of tasks provides an overview of how an administrator can man
|
||||
|
||||
You can work with pods more easily and efficiently with the help of various tools and features available in {product-title}. The following operations involve using those tools and features to better manage pods.
|
||||
|
||||
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
[cols="2,1,2"]
|
||||
|===
|
||||
|Operation |User |More information
|
||||
@@ -124,6 +132,10 @@ As a developer, use a vertical pod autoscaler to ensure your pods stay up during
|
||||
|
||||
|
||||
|===
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
ifdef::openshift-rosa,openshift-dedicated[]
|
||||
* Secrets: Some applications need sensitive information, such as passwords and usernames. An administrator can use the `Secret` object to provide sensitive data to pods xref:../nodes/pods/nodes-pods-secrets.adoc#nodes-pods-secrets[using the `Secret` object].
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
|
||||
[id="containers-overview"]
|
||||
== About containers
|
||||
@@ -142,6 +154,9 @@ As an administrator, You can perform various tasks on a Linux container, such as
|
||||
|
||||
Apart from performing specific tasks on nodes, pods, and containers, you can work with the overall {product-title} cluster to keep the cluster efficient and the application pods highly available.
|
||||
|
||||
|
||||
//cannot create the required namespace for these operators
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
[id="nodes-about-autoscaling-pod_{context}"]
|
||||
== About autoscaling pods on a node
|
||||
|
||||
@@ -161,6 +176,7 @@ Vertical Pod Autoscaler::
|
||||
The Vertical Pod Autoscaler (VPA) can automatically review the historic and current CPU and memory resources for containers in pods and can update the resource limits and requests based on the usage values it learns.
|
||||
+
|
||||
For more information, see xref:../nodes/pods/nodes-pods-vertical-autoscaler.adoc#nodes-pods-vpa[Automatically adjust pod resource levels with the vertical pod autoscaler].
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
|
||||
[id="commonterms-node"]
|
||||
== Glossary of common terms for {product-title} nodes
|
||||
@@ -187,10 +203,13 @@ The process of data sharing externally through a network’s outbound traffic fr
|
||||
garbage collection::
|
||||
The process of cleaning up cluster resources, such as terminated containers and images that are not referenced by any running pods.
|
||||
|
||||
//cannot create the required namespace for these operators
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
[discrete]
|
||||
[id="commonterms-node-hpa"]
|
||||
Horizontal Pod Autoscaler(HPA)::
|
||||
Implemented as a Kubernetes API resource and a controller. You can use the HPA to specify the minimum and maximum number of pods that you want to run. You can also specify the CPU or memory utilization that your pods should target. The HPA scales out and scales in pods when a given CPU or memory threshold is crossed.
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
|
||||
[discrete]
|
||||
[id="commonterms-node-ingress"]
|
||||
|
||||
@@ -11,5 +11,10 @@ include::modules/machine-config-daemon-metrics.adoc[leveloffset=+1]
|
||||
[role="_additional-resources"]
|
||||
.Additional resources
|
||||
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
* xref:../../monitoring/monitoring-overview.adoc#monitoring-overview[Monitoring overview]
|
||||
* xref:../../support/gathering-cluster-data.adoc#gathering-cluster-data[Gathering data about your cluster]
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
ifdef::openshift-rosa,openshift-dedicated[]
|
||||
* xref:../../monitoring/monitoring-overview.adoc#monitoring-overview[Understanding the monitoring stack]
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
|
||||
@@ -26,14 +26,20 @@ include::modules/nodes-pods-pod-disruption-about.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/nodes-pods-pod-disruption-configuring.adoc[leveloffset=+2]
|
||||
|
||||
//tech preview feature
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
include::modules/pod-disruption-eviction-policy.adoc[leveloffset=+2]
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
|
||||
//Unsupported
|
||||
|
||||
[role="_additional-resources"]
|
||||
.Additional resources
|
||||
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
* xref:../../nodes/clusters/nodes-cluster-enabling-features.adoc#nodes-cluster-enabling[Enabling features using feature gates]
|
||||
* link:https://kubernetes.io/docs/tasks/run-application/configure-pdb/#unhealthy-pod-eviction-policy[Unhealthy Pod Eviction Policy] in the Kubernetes documentation
|
||||
|
||||
include::modules/nodes-pods-configuring-pod-critical.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/nodes-pods-configuring-reducing.adoc[leveloffset=+1]
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
|
||||
@@ -10,9 +10,12 @@ toc::[]
|
||||
You can enable pod priority and preemption in your cluster. Pod priority indicates the importance of a pod relative to other pods and queues the pods based on that priority. pod preemption allows the cluster to evict, or preempt, lower-priority pods so that higher-priority pods can be scheduled if there is no available space on a suitable node
|
||||
pod priority also affects the scheduling order of pods and out-of-resource eviction ordering on the node.
|
||||
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
To use priority and preemption, you create priority classes that define the relative weight of your pods. Then, reference a priority class in the pod specification to apply that weight for scheduling.
|
||||
|
||||
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
ifdef::openshift-rosa,openshift-dedicated[]
|
||||
To use priority and preemption, reference a priority class in the pod specification to apply that weight for scheduling.
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
|
||||
// The following include statements pull in the module files that comprise
|
||||
// the assembly. Include any combination of concept, procedure, or reference
|
||||
|
||||
@@ -16,9 +16,10 @@ include::modules/service-account-auto-secret-removed.adoc[leveloffset=+2]
|
||||
|
||||
.Additional resources
|
||||
|
||||
* For information about requesting bound service account tokens, see xref:../../authentication/bound-service-account-tokens.adoc#bound-sa-tokens-configuring_bound-service-account-tokens[Using bound service account tokens]
|
||||
|
||||
* For information about creating a service account token secret, see xref:../../nodes/pods/nodes-pods-secrets.adoc#nodes-pods-secrets-creating-sa_nodes-pods-secrets[Creating a service account token secret].
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
* For information about requesting bound service account tokens, see xref:../../authentication/bound-service-account-tokens.doc#bound-sa-tokens-configuring_bound-service-account-tokens[Using bound service account tokens]
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
* For information about creating a service account token secret, see xref:../../nodes/pods/nodes-pods-secrets.doc#nodes-pods-secrets-creating-sa_nodes-pods-secrets[Creating a service account token secret].
|
||||
|
||||
include::modules/nodes-pods-secrets-creating.adoc[leveloffset=+1]
|
||||
|
||||
@@ -36,9 +37,11 @@ include::modules/nodes-pods-secrets-creating-sa.adoc[leveloffset=+2]
|
||||
|
||||
* For more information on using secrets in pods, see xref:../../nodes/pods/nodes-pods-secrets.adoc#nodes-pods-secrets-creating_nodes-pods-secrets[Understanding how to create secrets].
|
||||
|
||||
* For information on requesting bound service account tokens, see xref:../../authentication/bound-service-account-tokens.adoc#bound-sa-tokens-configuring_bound-service-account-tokens[Using bound service account tokens]
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
* For information on requesting bound service account tokens, see xref:../../authentication/bound-service-account-tokens.doc#bound-sa-tokens-configuring_bound-service-account-tokens[Using bound service account tokens]
|
||||
|
||||
* For information on creating service accounts, see xref:../../authentication/understanding-and-creating-service-accounts.adoc#understanding-and-creating-service-accounts[Understanding and creating service accounts].
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
|
||||
include::modules/nodes-pods-secrets-creating-basic.adoc[leveloffset=+2]
|
||||
|
||||
|
||||
@@ -21,7 +21,9 @@ In situations where you might want more control over where new pods are placed,
|
||||
+
|
||||
You can control pod placement by using the following scheduling features:
|
||||
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
* xref:../../nodes/scheduling/nodes-scheduler-profiles.adoc#nodes-scheduler-profiles[Scheduler profiles]
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
* xref:../../nodes/scheduling/nodes-scheduler-pod-affinity.adoc#nodes-scheduler-pod-affinity[Pod affinity and anti-affinity rules]
|
||||
* xref:../../nodes/scheduling/nodes-scheduler-node-affinity.adoc#nodes-scheduler-node-affinity-about_nodes-scheduler-node-affinity[Node affinity]
|
||||
* xref:../../nodes/scheduling/nodes-scheduler-node-selectors.adoc#nodes-scheduler-node-selectors[Node selectors]
|
||||
|
||||
@@ -26,10 +26,14 @@ include::modules/nodes-scheduler-node-affinity-configuring-preferred.adoc[levelo
|
||||
|
||||
include::modules/nodes-scheduler-node-affinity-example.adoc[leveloffset=+1]
|
||||
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
include::modules/olm-overriding-operator-pod-affinity.adoc[leveloffset=+1]
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
[id="nodes-scheduler-node-affinity-addtl-resources_{context}"]
|
||||
[role="_additional-resources"]
|
||||
== Additional resources
|
||||
|
||||
* xref:../../nodes/nodes/nodes-nodes-working.adoc#nodes-nodes-working-updating_nodes-nodes-working[Understanding how to update labels on nodes]
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
|
||||
@@ -22,9 +22,11 @@ include::modules/nodes-scheduler-node-selectors-about.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/nodes-scheduler-node-selectors-pod.adoc[leveloffset=+1]
|
||||
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
include::modules/nodes-scheduler-node-selectors-cluster.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/nodes-scheduler-node-selectors-project.adoc[leveloffset=+1]
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
|
||||
[role="_additional-resources"]
|
||||
.Additional resources
|
||||
|
||||
@@ -16,4 +16,6 @@ include::modules/nodes-scheduler-pod-anti-affinity-configuring.adoc[leveloffset=
|
||||
|
||||
include::modules/nodes-scheduler-pod-affinity-example.adoc[leveloffset=+1]
|
||||
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
include::modules/olm-overriding-operator-pod-affinity.adoc[leveloffset=+1]
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
|
||||
@@ -17,7 +17,9 @@ include::modules/nodes-scheduler-pod-topology-spread-constraints-configuring.ado
|
||||
// Sample pod topology spread constraints
|
||||
include::modules/nodes-scheduler-pod-topology-spread-constraints-examples.adoc[leveloffset=+1]
|
||||
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
[role="_additional-resources"]
|
||||
== Additional resources
|
||||
|
||||
* xref:../../nodes/nodes/nodes-nodes-working.adoc#nodes-nodes-working-updating_nodes-nodes-working[Understanding how to update labels on nodes]
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
|
||||
@@ -29,8 +29,10 @@ include::modules/nodes-scheduler-taints-tolerations-projects.adoc[leveloffset=+2
|
||||
.Additional resources
|
||||
|
||||
* Adding taints and tolerations xref:../../nodes/scheduling/nodes-scheduler-taints-tolerations.adoc#nodes-scheduler-taints-tolerations-adding_nodes-scheduler-taints-tolerations[manually to nodes] or xref:../../nodes/scheduling/nodes-scheduler-taints-tolerations.adoc#nodes-scheduler-taints-tolerations-adding-machineset_nodes-scheduler-taints-tolerations[with compute machine sets]
|
||||
ifndef::openshift-rosa,openshift-dedicated[]
|
||||
* xref:../../nodes/scheduling/nodes-scheduler-node-selectors.adoc#nodes-scheduler-node-selectors-project_nodes-scheduler-node-selectors[Creating project-wide node selectors]
|
||||
* xref:../../operators/admin/olm-adding-operators-to-cluster.adoc#olm-pod-placement_olm-adding-operators-to-a-cluster[Pod placement of Operator workloads]
|
||||
endif::openshift-rosa,openshift-dedicated[]
|
||||
|
||||
include::modules/nodes-scheduler-taints-tolerations-special.adoc[leveloffset=+2]
|
||||
|
||||
|
||||
Reference in New Issue
Block a user