From 150e1cb1fd0af91c4ca5debfd02074655fc0f3ea Mon Sep 17 00:00:00 2001 From: Ben Hardesty Date: Thu, 26 Sep 2024 11:34:41 -0400 Subject: [PATCH] OSDOCS-11980: Migrate Nodes to ROSA HCP --- _topic_maps/_topic_map.yml | 4 +- _topic_maps/_topic_map_osd.yml | 4 +- _topic_maps/_topic_map_rosa.yml | 7 +- _topic_maps/_topic_map_rosa_hcp.yml | 335 +++++++++++++++++- ...er-node-tuning-operator-specification.adoc | 1 + ...-tuning-operator-default-profiles-set.adoc | 1 + modules/custom-tuning-specification.adoc | 3 +- ...rator-supported-tuned-daemon-plug-ins.adoc | 2 +- modules/node-tuning-operator.adoc | 2 + ...es-cluster-overcommit-configure-nodes.adoc | 2 +- .../nodes-cluster-resource-levels-job.adoc | 3 +- .../nodes-cma-autoscaling-custom-audit.adoc | 24 +- .../nodes-cma-autoscaling-custom-gather.adoc | 20 +- ...s-cma-autoscaling-custom-uninstalling.adoc | 24 +- modules/nodes-containers-volumes-subpath.adoc | 2 +- modules/nodes-nodes-jobs-about.adoc | 4 +- modules/nodes-nodes-jobs-creating-cron.adoc | 8 +- modules/nodes-nodes-viewing-listing.adoc | 4 +- modules/nodes-pods-daemonsets-creating.adoc | 4 +- modules/nodes-pods-priority-configuring.adoc | 16 +- .../nodes-pods-secrets-creating-basic.adoc | 2 +- .../nodes-pods-secrets-creating-docker.adoc | 2 +- .../nodes-pods-secrets-creating-opaque.adoc | 2 +- modules/nodes-pods-secrets-creating-sa.adoc | 2 +- ...r-node-affinity-configuring-preferred.adoc | 4 +- ...er-node-affinity-configuring-required.adoc | 4 +- .../nodes-scheduler-node-selectors-about.adoc | 2 +- .../nodes-scheduler-node-selectors-pod.adoc | 8 +- modules/rosa-creating-node-tuning.adoc | 4 +- modules/rosa-deleting-node-tuning.adoc | 2 +- modules/rosa-modifying-node-tuning.adoc | 2 +- nodes/clusters/nodes-cluster-overcommit.adoc | 8 +- .../nodes-cma-autoscaling-custom-adding.adoc | 4 +- ...odes-cma-autoscaling-custom-debugging.adoc | 8 +- .../nodes-cma-autoscaling-custom-install.adoc | 8 +- ...nodes-cma-autoscaling-custom-removing.adoc | 8 +- ...s-cma-autoscaling-custom-trigger-auth.adoc | 4 +- nodes/containers/nodes-containers-using.adoc | 8 +- nodes/index.adoc | 73 ++-- nodes/nodes/nodes-node-tuning-operator.adoc | 18 + nodes/nodes/nodes-nodes-viewing.adoc | 6 +- nodes/nodes/nodes-nodes-working.adoc | 14 +- .../nodes}/rosa-tuning-config.adoc | 4 +- nodes/pods/nodes-pods-configuring.adoc | 8 +- nodes/pods/nodes-pods-priority.adoc | 8 +- nodes/pods/nodes-pods-secrets.adoc | 4 +- nodes/pods/nodes-pods-using.adoc | 3 + nodes/scheduling/nodes-scheduler-about.adoc | 8 +- .../nodes-scheduler-node-affinity.adoc | 8 +- .../nodes-scheduler-node-selectors.adoc | 8 +- .../nodes-scheduler-pod-affinity.adoc | 4 +- ...duler-pod-topology-spread-constraints.adoc | 4 +- 52 files changed, 523 insertions(+), 199 deletions(-) rename {rosa_hcp => nodes/nodes}/rosa-tuning-config.adoc (63%) diff --git a/_topic_maps/_topic_map.yml b/_topic_maps/_topic_map.yml index 0adfc3198e..f64110e69d 100644 --- a/_topic_maps/_topic_map.yml +++ b/_topic_maps/_topic_map.yml @@ -2666,10 +2666,10 @@ Topics: File: nodes-secondary-scheduler-configuring - Name: Uninstalling the Secondary Scheduler Operator File: nodes-secondary-scheduler-uninstalling -- Name: Using Jobs and DaemonSets +- Name: Using jobs and daemon sets Dir: jobs Topics: - - Name: Running background tasks on nodes automatically with daemonsets + - Name: Running background tasks on nodes automatically with daemon sets File: nodes-pods-daemonsets Distros: openshift-enterprise,openshift-origin - Name: Running tasks in pods using jobs diff --git a/_topic_maps/_topic_map_osd.yml b/_topic_maps/_topic_map_osd.yml index b054ced82e..78ce5931ea 100644 --- a/_topic_maps/_topic_map_osd.yml +++ b/_topic_maps/_topic_map_osd.yml @@ -1071,10 +1071,10 @@ Topics: # File: nodes-secondary-scheduler-configuring # - Name: Uninstalling the Secondary Scheduler Operator # File: nodes-secondary-scheduler-uninstalling -- Name: Using Jobs and DaemonSets +- Name: Using jobs and daemon sets Dir: jobs Topics: - - Name: Running background tasks on nodes automatically with daemonsets + - Name: Running background tasks on nodes automatically with daemon sets File: nodes-pods-daemonsets Distros: openshift-dedicated - Name: Running tasks in pods using jobs diff --git a/_topic_maps/_topic_map_rosa.yml b/_topic_maps/_topic_map_rosa.yml index 2146d02437..a910d4afdf 100644 --- a/_topic_maps/_topic_map_rosa.yml +++ b/_topic_maps/_topic_map_rosa.yml @@ -267,8 +267,6 @@ Topics: File: rosa-hcp-sts-creating-a-cluster-ext-auth - Name: Creating ROSA with HCP clusters without a CNI plugin File: rosa-hcp-cluster-no-cni -- Name: Using the Node Tuning Operator on ROSA with HCP - File: rosa-tuning-config - Name: Deleting a ROSA with HCP cluster File: rosa-hcp-deleting-cluster --- @@ -1305,7 +1303,6 @@ Topics: File: nodes-cma-autoscaling-custom-rn-past - Name: Custom Metrics Autoscaler Operator overview File: nodes-cma-autoscaling-custom - - Name: Installing the custom metrics autoscaler File: nodes-cma-autoscaling-custom-install - Name: Understanding the custom metrics autoscaler triggers @@ -1365,10 +1362,10 @@ Topics: # File: nodes-secondary-scheduler-configuring # - Name: Uninstalling the Secondary Scheduler Operator # File: nodes-secondary-scheduler-uninstalling -- Name: Using Jobs and DaemonSets +- Name: Using jobs and daemon sets Dir: jobs Topics: - - Name: Running background tasks on nodes automatically with daemonsets + - Name: Running background tasks on nodes automatically with daemon sets File: nodes-pods-daemonsets Distros: openshift-rosa - Name: Running tasks in pods using jobs diff --git a/_topic_maps/_topic_map_rosa_hcp.yml b/_topic_maps/_topic_map_rosa_hcp.yml index 42269d07a4..c10437bb6e 100644 --- a/_topic_maps/_topic_map_rosa_hcp.yml +++ b/_topic_maps/_topic_map_rosa_hcp.yml @@ -189,8 +189,6 @@ Topics: File: rosa-hcp-aws-private-creating-cluster - Name: Creating ROSA with HCP clusters with external authentication File: rosa-hcp-sts-creating-a-cluster-ext-auth -- Name: Using the Node Tuning Operator on ROSA with HCP - File: rosa-tuning-config --- Name: Cluster administration Dir: rosa_cluster_admin @@ -381,3 +379,336 @@ Distros: openshift-rosa-hcp Topics: - Name: Overview of nodes File: index +- Name: Working with pods + Dir: pods + Topics: + - Name: About pods + File: nodes-pods-using + - Name: Viewing pods + File: nodes-pods-viewing + - Name: Configuring a cluster for pods + File: nodes-pods-configuring +# Cannot create namespace to install VPA; revisit after Operator book converted +# - Name: Automatically adjust pod resource levels with the vertical pod autoscaler +# File: nodes-pods-vertical-autoscaler + - Name: Providing sensitive data to pods + File: nodes-pods-secrets + - Name: Creating and using config maps + File: nodes-pods-configmaps +# Cannot create required kubeletconfigs +# - Name: Using Device Manager to make devices available to nodes +# File: nodes-pods-plugins + - Name: Including pod priority in pod scheduling decisions + File: nodes-pods-priority + - Name: Placing pods on specific nodes using node selectors + File: nodes-pods-node-selectors +# Cannot create namespace to install Run Once; revisit after Operator book converted +# - Name: Run Once Duration Override Operator +# Dir: run_once_duration_override +# Topics: +# - Name: Run Once Duration Override Operator overview +# File: index +# - Name: Run Once Duration Override Operator release notes +# File: run-once-duration-override-release-notes +# - Name: Overriding the active deadline for run-once pods +# File: run-once-duration-override-install +# - Name: Uninstalling the Run Once Duration Override Operator +# File: run-once-duration-override-uninstall +- Name: Automatically scaling pods with the Custom Metrics Autoscaler Operator + Dir: cma + Topics: + - Name: Release notes + Dir: nodes-cma-rn + Topics: + - Name: Custom Metrics Autoscaler Operator release notes + File: nodes-cma-autoscaling-custom-rn + - Name: Past releases + File: nodes-cma-autoscaling-custom-rn-past + - Name: Custom Metrics Autoscaler Operator overview + File: nodes-cma-autoscaling-custom + - Name: Installing the custom metrics autoscaler + File: nodes-cma-autoscaling-custom-install + - Name: Understanding the custom metrics autoscaler triggers + File: nodes-cma-autoscaling-custom-trigger + - Name: Understanding the custom metrics autoscaler trigger authentications + File: nodes-cma-autoscaling-custom-trigger-auth + - Name: Pausing the custom metrics autoscaler + File: nodes-cma-autoscaling-custom-pausing + - Name: Gathering audit logs + File: nodes-cma-autoscaling-custom-audit-log + - Name: Gathering debugging data + File: nodes-cma-autoscaling-custom-debugging + - Name: Viewing Operator metrics + File: nodes-cma-autoscaling-custom-metrics + - Name: Understanding how to add custom metrics autoscalers + File: nodes-cma-autoscaling-custom-adding + - Name: Removing the Custom Metrics Autoscaler Operator + File: nodes-cma-autoscaling-custom-removing +- Name: Controlling pod placement onto nodes (scheduling) + Dir: scheduling + Topics: + - Name: About pod placement using the scheduler + File: nodes-scheduler-about + - Name: Placing pods relative to other pods using pod affinity and anti-affinity rules + File: nodes-scheduler-pod-affinity + - Name: Controlling pod placement on nodes using node affinity rules + File: nodes-scheduler-node-affinity + - Name: Placing pods onto overcommited nodes + File: nodes-scheduler-overcommit +# Per OSDOCS-9791, ROSA customers cannot add taints to individual nodes. +# - Name: Controlling pod placement using node taints +# File: nodes-scheduler-taints-tolerations + - Name: Placing pods on specific nodes using node selectors + File: nodes-scheduler-node-selectors + - Name: Controlling pod placement using pod topology spread constraints + File: nodes-scheduler-pod-topology-spread-constraints +# - Name: Placing a pod on a specific node by name +# File: nodes-scheduler-node-names +# - Name: Placing a pod in a specific project +# File: nodes-scheduler-node-projects +# - Name: Keeping your cluster balanced using the descheduler +# File: nodes-scheduler-descheduler +# Cannot create namespace to install Desceduler Operator; revisit after Operator book converted +# - Name: Evicting pods using the descheduler +# File: nodes-descheduler +# Cannot create namespace to install Secondary Scheduler Operator; revisit after Operator book converted +# - Name: Secondary scheduler +# Dir: secondary_scheduler +# Distros: openshift-enterprise +# Topics: +# - Name: Secondary scheduler overview +# File: index +# - Name: Secondary Scheduler Operator release notes +# File: nodes-secondary-scheduler-release-notes +# - Name: Scheduling pods using a secondary scheduler +# File: nodes-secondary-scheduler-configuring +# - Name: Uninstalling the Secondary Scheduler Operator +# File: nodes-secondary-scheduler-uninstalling +# - Name: Using Jobs and DaemonSets +# Dir: jobs +# Topics: +# - Name: Running background tasks on nodes automatically with daemonsets +# File: nodes-pods-daemonsets +# Distros: openshift-rosa-hcp +# - Name: Running tasks in pods using jobs +# File: nodes-nodes-jobs +# - Name: Working with nodes +# Dir: nodes +# Distros: openshift-rosa-hcp +# Topics: +# - Name: Viewing and listing the nodes in your cluster +# File: nodes-nodes-viewing +# cannot use oc adm cordon; cannot patch resource "machinesets"; cannot patch resource "nodes" +# - Name: Working with nodes +# File: nodes-nodes-working +# cannot create resource "kubeletconfigs", "schedulers", "machineconfigs", "kubeletconfigs" +# - Name: Managing nodes +# File: nodes-nodes-managing +# cannot create resource "kubeletconfigs" +# - Name: Managing graceful node shutdown +# File: nodes-nodes-graceful-shutdown +# cannot create resource "kubeletconfigs" +# - Name: Managing the maximum number of pods per node +# File: nodes-nodes-managing-max-pods +# - Name: Using the Node Tuning Operator +# File: nodes-node-tuning-operator +# - Name: Remediating, fencing, and maintaining nodes +# File: nodes-remediating-fencing-maintaining-rhwa +# Cannot create namespace needed to oc debug and reboot; revisit after Operator book converted +# - Name: Understanding node rebooting +# File: nodes-nodes-rebooting +# cannot create resource "kubeletconfigs" +# - Name: Freeing node resources using garbage collection +# File: nodes-nodes-garbage-collection +# cannot create resource "kubeletconfigs" +# - Name: Allocating resources for nodes +# File: nodes-nodes-resources-configuring +# cannot create resource "kubeletconfigs" +# - Name: Allocating specific CPUs for nodes in a cluster +# File: nodes-nodes-resources-cpus +# cannot create resource "kubeletconfigs" +# - Name: Configuring the TLS security profile for the kubelet +# File: nodes-nodes-tls +# Distros: openshift-rosa-hcp +# - Name: Monitoring for problems in your nodes +# File: nodes-nodes-problem-detector +# - Name: Machine Config Daemon metrics +# File: nodes-nodes-machine-config-daemon-metrics +# cannot patch resource "nodes" +# - Name: Creating infrastructure nodes +# File: nodes-nodes-creating-infrastructure-nodes +# - Name: Working with containers +# Dir: containers +# Topics: +# - Name: Understanding containers +# File: nodes-containers-using +# - Name: Using Init Containers to perform tasks before a pod is deployed +# File: nodes-containers-init +# Distros: openshift-rosa-hcp +# - Name: Using volumes to persist container data +# File: nodes-containers-volumes +# - Name: Mapping volumes using projected volumes +# File: nodes-containers-projected-volumes +# - Name: Allowing containers to consume API objects +# File: nodes-containers-downward-api +# - Name: Copying files to or from a container +# File: nodes-containers-copying-files +# - Name: Executing remote commands in a container +# File: nodes-containers-remote-commands +# - Name: Using port forwarding to access applications in a container +# File: nodes-containers-port-forwarding +# cannot patch resource "configmaps" +# - Name: Using sysctls in containers +# File: nodes-containers-sysctls +# - Name: Working with clusters +# Dir: clusters +# Topics: +# - Name: Viewing system event information in a cluster +# File: nodes-containers-events +# - Name: Analyzing cluster resource levels +# File: nodes-cluster-resource-levels +# Distros: openshift-rosa-hcp +# - Name: Setting limit ranges +# File: nodes-cluster-limit-ranges +# - Name: Configuring cluster memory to meet container memory and risk requirements +# File: nodes-cluster-resource-configure +# Distros: openshift-rosa-hcp +# - Name: Configuring your cluster to place pods on overcommited nodes +# File: nodes-cluster-overcommit +# Distros: openshift-rosa-hcp +# - Name: Configuring the Linux cgroup version on your nodes +# File: nodes-cluster-cgroups-2 +# Distros: openshift-enterprise +# - Name: Configuring the Linux cgroup version on your nodes +# File: nodes-cluster-cgroups-okd +# Distros: openshift-origin +# The TechPreviewNoUpgrade Feature Gate is not allowed +# - Name: Enabling features using FeatureGates +# File: nodes-cluster-enabling-features +# Distros: openshift-rosa-hcp +# Error: nodes.config.openshift.io "cluster" could not be patched +# - Name: Improving cluster stability in high latency environments using worker latency profiles +# File: nodes-cluster-worker-latency-profiles +# Not supported per Michael McNeill +# - Name: Remote worker nodes on the network edge +# Dir: edge +# Topics: +# - Name: Using remote worker node at the network edge +# File: nodes-edge-remote-workers +# Not supported per Michael McNeill +# - Name: Worker nodes for single-node OpenShift clusters +# Dir: nodes +# Distros: openshift-rosa-hcp +# Topics: +# - Name: Adding worker nodes to single-node OpenShift clusters +# File: nodes-sno-worker-nodes +- Name: Using jobs and daemon sets + Dir: jobs + Topics: + - Name: Running background tasks on nodes automatically with daemon sets + File: nodes-pods-daemonsets + - Name: Running tasks in pods using jobs + File: nodes-nodes-jobs +- Name: Working with nodes + Dir: nodes + Topics: + - Name: Viewing and listing the nodes in your cluster + File: nodes-nodes-viewing + - Name: Working with nodes + File: nodes-nodes-working +# cannot use oc adm cordon; cannot patch resource "machinesets"; cannot patch resource "nodes" +# - Name: Working with nodes +# File: nodes-nodes-working +# cannot create resource "kubeletconfigs", "schedulers", "machineconfigs", "kubeletconfigs" +# - Name: Managing nodes +# File: nodes-nodes-managing +# cannot create resource "kubeletconfigs" +# - Name: Managing graceful node shutdown +# File: nodes-nodes-graceful-shutdown +# cannot create resource "kubeletconfigs" +# - Name: Managing the maximum number of pods per node +# File: nodes-nodes-managing-max-pods + - Name: Using the Node Tuning Operator + File: nodes-node-tuning-operator +# - Name: Remediating, fencing, and maintaining nodes +# File: nodes-remediating-fencing-maintaining-rhwa +# Cannot create namespace needed to oc debug and reboot; revisit after Operator book converted +# - Name: Understanding node rebooting +# File: nodes-nodes-rebooting +# cannot create resource "kubeletconfigs" +# - Name: Freeing node resources using garbage collection +# File: nodes-nodes-garbage-collection +# cannot create resource "kubeletconfigs" +# - Name: Allocating resources for nodes +# File: nodes-nodes-resources-configuring +# cannot create resource "kubeletconfigs" +# - Name: Allocating specific CPUs for nodes in a cluster +# File: nodes-nodes-resources-cpus +# cannot create resource "kubeletconfigs" +# - Name: Configuring the TLS security profile for the kubelet +# File: nodes-nodes-tls +# Distros: openshift-rosa +# - Name: Monitoring for problems in your nodes +# File: nodes-nodes-problem-detector +# cannot patch resource "nodes" +# - Name: Creating infrastructure nodes +# File: nodes-nodes-creating-infrastructure-nodes +- Name: Working with containers + Dir: containers + Topics: + - Name: Understanding containers + File: nodes-containers-using + - Name: Using Init Containers to perform tasks before a pod is deployed + File: nodes-containers-init + - Name: Using volumes to persist container data + File: nodes-containers-volumes + - Name: Mapping volumes using projected volumes + File: nodes-containers-projected-volumes + - Name: Allowing containers to consume API objects + File: nodes-containers-downward-api + - Name: Copying files to or from a container + File: nodes-containers-copying-files + - Name: Executing remote commands in a container + File: nodes-containers-remote-commands + - Name: Using port forwarding to access applications in a container + File: nodes-containers-port-forwarding +# cannot patch resource "configmaps" +# - Name: Using sysctls in containers +# File: nodes-containers-sysctls +- Name: Working with clusters + Dir: clusters + Topics: + - Name: Viewing system event information in a cluster + File: nodes-containers-events + - Name: Analyzing cluster resource levels + File: nodes-cluster-resource-levels + - Name: Setting limit ranges + File: nodes-cluster-limit-ranges + - Name: Configuring cluster memory to meet container memory and risk requirements + File: nodes-cluster-resource-configure + - Name: Configuring your cluster to place pods on overcommited nodes + File: nodes-cluster-overcommit +# - Name: Configuring the Linux cgroup version on your nodes +# File: nodes-cluster-cgroups-2 +# - Name: Configuring the Linux cgroup version on your nodes +# File: nodes-cluster-cgroups-okd +# The TechPreviewNoUpgrade Feature Gate is not allowed +# - Name: Enabling features using FeatureGates +# File: nodes-cluster-enabling-features +# Distros: openshift-rosa +# Error: nodes.config.openshift.io "cluster" could not be patched +# - Name: Improving cluster stability in high latency environments using worker latency profiles +# File: nodes-cluster-worker-latency-profiles +# Not supported per Michael McNeill +#- Name: Remote worker nodes on the network edge +# Dir: edge +# Topics: +# - Name: Using remote worker node at the network edge +# File: nodes-edge-remote-workers +# Not supported per Michael McNeill +#- Name: Worker nodes for single-node OpenShift clusters +# Dir: nodes +# Distros: openshift-rosa +# Topics: +# - Name: Adding worker nodes to single-node OpenShift clusters +# File: nodes-sno-worker-nodes diff --git a/modules/accessing-an-example-cluster-node-tuning-operator-specification.adoc b/modules/accessing-an-example-cluster-node-tuning-operator-specification.adoc index 32934cfea1..6fd8c6eea2 100644 --- a/modules/accessing-an-example-cluster-node-tuning-operator-specification.adoc +++ b/modules/accessing-an-example-cluster-node-tuning-operator-specification.adoc @@ -2,6 +2,7 @@ // // * scalability_and_performance/using-node-tuning-operator.adoc // * post_installation_configuration/node-tasks.adoc +// * nodes/nodes/nodes-node-tuning-operator.adoc :_mod-docs-content-type: PROCEDURE [id="accessing-an-example-node-tuning-operator-specification_{context}"] diff --git a/modules/cluster-node-tuning-operator-default-profiles-set.adoc b/modules/cluster-node-tuning-operator-default-profiles-set.adoc index 85fba84495..c66f000c98 100644 --- a/modules/cluster-node-tuning-operator-default-profiles-set.adoc +++ b/modules/cluster-node-tuning-operator-default-profiles-set.adoc @@ -2,6 +2,7 @@ // // * scalability_and_performance/using-node-tuning-operator.adoc // * post_installation_configuration/node-tasks.adoc +// * nodes/nodes/nodes-node-tuning-operator.adoc [id="custom-tuning-default-profiles-set_{context}"] = Default profiles set on a cluster diff --git a/modules/custom-tuning-specification.adoc b/modules/custom-tuning-specification.adoc index d1af2d82fe..ce5bd53bb7 100644 --- a/modules/custom-tuning-specification.adoc +++ b/modules/custom-tuning-specification.adoc @@ -2,7 +2,8 @@ // // * scalability_and_performance/using-node-tuning-operator.adoc // * post_installation_configuration/node-tasks.adoc -// * rosa_hcp/rosa-tuning-config.adoc +// * nodes/nodes/nodes-node-tuning-operator.adoc +// * nodes/nodes/rosa-tuning-config.adoc ifeval::["{context}" == "rosa-tuning-config"] :rosa-hcp-tuning: diff --git a/modules/node-tuning-operator-supported-tuned-daemon-plug-ins.adoc b/modules/node-tuning-operator-supported-tuned-daemon-plug-ins.adoc index 785f46c3b1..0ecc1ec2f8 100644 --- a/modules/node-tuning-operator-supported-tuned-daemon-plug-ins.adoc +++ b/modules/node-tuning-operator-supported-tuned-daemon-plug-ins.adoc @@ -2,7 +2,7 @@ // // * scalability_and_performance/using-node-tuning-operator.adoc // * post_installation_configuration/node-tasks.adoc -// * nodes/nodes/nodes-node-tuning-operator +// * nodes/nodes/nodes-node-tuning-operator.adoc [id="supported-tuned-daemon-plug-ins_{context}"] = Supported TuneD daemon plugins diff --git a/modules/node-tuning-operator.adoc b/modules/node-tuning-operator.adoc index 92cebbe55b..85a0db78a7 100644 --- a/modules/node-tuning-operator.adoc +++ b/modules/node-tuning-operator.adoc @@ -3,6 +3,8 @@ // * scalability_and_performance/using-node-tuning-operator.adoc // * operators/operator-reference.adoc // * post_installation_configuration/node-tasks.adoc +// * nodes/nodes/nodes-node-tuning-operator.adoc +// * nodes/nodes/rosa-tuning-config.adoc ifeval::["{context}" == "cluster-operators-ref"] :operators: diff --git a/modules/nodes-cluster-overcommit-configure-nodes.adoc b/modules/nodes-cluster-overcommit-configure-nodes.adoc index 7518a1d6be..e2920a7179 100644 --- a/modules/nodes-cluster-overcommit-configure-nodes.adoc +++ b/modules/nodes-cluster-overcommit-configure-nodes.adoc @@ -20,7 +20,7 @@ default operating system setting. {product-title} also configures the kernel not to panic when it runs out of memory by setting the `vm.panic_on_oom` parameter to `0`. A setting of 0 instructs the kernel to call oom_killer in an Out of Memory (OOM) condition, which kills -processes based on priority +processes based on priority. You can view the current setting by running the following commands on your nodes: diff --git a/modules/nodes-cluster-resource-levels-job.adoc b/modules/nodes-cluster-resource-levels-job.adoc index 36e5e69c35..6f1e1ce2f1 100644 --- a/modules/nodes-cluster-resource-levels-job.adoc +++ b/modules/nodes-cluster-resource-levels-job.adoc @@ -184,8 +184,7 @@ $ oc create -f cluster-capacity-job.yaml .Verification -. Check the job logs to find the number of pods that can be scheduled in the - cluster: +. Check the job logs to find the number of pods that can be scheduled in the cluster: + [source,terminal] ---- diff --git a/modules/nodes-cma-autoscaling-custom-audit.adoc b/modules/nodes-cma-autoscaling-custom-audit.adoc index ee4d72adae..7fed3dbc03 100644 --- a/modules/nodes-cma-autoscaling-custom-audit.adoc +++ b/modules/nodes-cma-autoscaling-custom-audit.adoc @@ -18,7 +18,7 @@ You can configure auditing for the Custom Metrics Autoscaler Operator by editing . Edit the `KedaController` custom resource to add the `auditConfig` stanza: + -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [source,yaml] ---- kind: KedaController @@ -43,8 +43,8 @@ spec: maxBackup: "1" maxSize: "50" ---- -endif::openshift-rosa,openshift-dedicated[] -ifdef::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] +ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [source,yaml] ---- kind: KedaController @@ -69,7 +69,7 @@ spec: maxBackup: "1" maxSize: "50" ---- -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] <1> Specifies the output format of the audit log, either `legacy` or `json`. <2> Specifies an existing persistent volume claim for storing the log data. All requests coming to the API server are logged to this persistent volume claim. If you leave this field empty, the log data is sent to stdout. <3> Specifies which events should be recorded and what data they should include: @@ -102,18 +102,18 @@ oc adm must-gather -- /usr/bin/gather_audit_logs .. Obtain the name of the `keda-metrics-apiserver-*` pod: + -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [source,terminal] ---- oc get pod -n openshift-keda ---- -endif::openshift-rosa,openshift-dedicated[] -ifdef::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] +ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [source,terminal] ---- oc get pod -n keda ---- -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] + .Example output + @@ -153,7 +153,7 @@ $ oc logs keda-metrics-apiserver-65c7cc44fd-rrl4r|grep -i metadata + .. Use a command similar to the following to log into the `keda-metrics-apiserver-*` pod: + -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [source,terminal] ---- $ oc rsh pod/keda-metrics-apiserver- -n openshift-keda @@ -165,8 +165,8 @@ For example: ---- $ oc rsh pod/keda-metrics-apiserver-65c7cc44fd-rrl4r -n openshift-keda ---- -endif::openshift-rosa,openshift-dedicated[] -ifdef::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] +ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [source,terminal] ---- $ oc rsh pod/keda-metrics-apiserver- -n keda @@ -178,7 +178,7 @@ For example: ---- $ oc rsh pod/keda-metrics-apiserver-65c7cc44fd-rrl4r -n keda ---- -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] .. Change to the `/var/audit-policy/` directory: + diff --git a/modules/nodes-cma-autoscaling-custom-gather.adoc b/modules/nodes-cma-autoscaling-custom-gather.adoc index 3e92dd8351..a6b3d9d211 100644 --- a/modules/nodes-cma-autoscaling-custom-gather.adoc +++ b/modules/nodes-cma-autoscaling-custom-gather.adoc @@ -23,19 +23,19 @@ The standard {product-title} `must-gather` command, `oc adm must-gather`, does n .Prerequisites -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] * You are logged in to {product-title} as a user with the `cluster-admin` role. -endif::openshift-rosa,openshift-dedicated[] -ifdef::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] +ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] * You are logged in to {product-title} as a user with the `dedicated-admin` role. -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] * The {product-title} CLI (`oc`) installed. .Procedure // Hide note from ROSA/OSD, as restricted is not supported. . Navigate to the directory where you want to store the `must-gather` data. -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] + [NOTE] ==== @@ -46,7 +46,7 @@ If your cluster is using a restricted network, you must take additional steps. I $ oc import-image is/must-gather -n openshift ---- ==== -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] . Perform one of the following: + @@ -82,7 +82,7 @@ $ oc adm must-gather --image-stream=openshift/must-gather --image=${IMAGE} -- + .Example must-gather output for the Custom Metric Autoscaler -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [%collapsible] ==== [source,terminal] @@ -166,8 +166,8 @@ ifndef::openshift-rosa,openshift-dedicated[] └── routes.yaml ---- ==== -endif::openshift-rosa,openshift-dedicated[] -ifdef::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] +ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [%collapsible] ==== [source,terminal] @@ -251,7 +251,7 @@ ifdef::openshift-rosa,openshift-dedicated[] └── routes.yaml ---- ==== -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] ifndef::openshift-origin[] . Create a compressed file from the `must-gather` directory that was created in your working directory. For example, on a computer that uses a Linux diff --git a/modules/nodes-cma-autoscaling-custom-uninstalling.adoc b/modules/nodes-cma-autoscaling-custom-uninstalling.adoc index c92d170bd7..4fe5e22b2e 100644 --- a/modules/nodes-cma-autoscaling-custom-uninstalling.adoc +++ b/modules/nodes-cma-autoscaling-custom-uninstalling.adoc @@ -16,12 +16,12 @@ Use the following procedure to remove the custom metrics autoscaler from your {p . In the {product-title} web console, click *Operators* -> *Installed Operators*. -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] . Switch to the *openshift-keda* project. -endif::openshift-rosa,openshift-dedicated[] -ifdef::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] +ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] . Switch to the *keda* project. -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] . Remove the `KedaController` custom resource. @@ -88,30 +88,30 @@ $ oc delete clusterrolebinding.keda.sh-v1alpha1-admin . Delete the custom metrics autoscaler project: + -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [source,terminal] ---- $ oc delete project openshift-keda ---- -endif::openshift-rosa,openshift-dedicated[] -ifdef::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] +ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [source,terminal] ---- $ oc delete project keda ---- -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] . Delete the Cluster Metric Autoscaler Operator: + -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [source,terminal] ---- $ oc delete operator/openshift-custom-metrics-autoscaler-operator.openshift-keda ---- -endif::openshift-rosa,openshift-dedicated[] -ifdef::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] +ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [source,terminal] ---- $ oc delete operator/openshift-custom-metrics-autoscaler-operator.keda ---- -endif::openshift-rosa,openshift-dedicated[] \ No newline at end of file +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] diff --git a/modules/nodes-containers-volumes-subpath.adoc b/modules/nodes-containers-volumes-subpath.adoc index 54921a604d..9636f943d7 100644 --- a/modules/nodes-containers-volumes-subpath.adoc +++ b/modules/nodes-containers-volumes-subpath.adoc @@ -6,7 +6,7 @@ [id="nodes-containers-volumes-subpath_{context}"] = Configuring volumes for multiple uses in a pod -You can configure a volume to allows you to share one volume for +You can configure a volume to share one volume for multiple uses in a single pod using the `volumeMounts.subPath` property to specify a `subPath` value inside a volume instead of the volume's root. diff --git a/modules/nodes-nodes-jobs-about.adoc b/modules/nodes-nodes-jobs-about.adoc index fc879db5cf..a9564c7709 100644 --- a/modules/nodes-nodes-jobs-about.adoc +++ b/modules/nodes-nodes-jobs-about.adoc @@ -100,7 +100,7 @@ to configure history limits so that old jobs and their pods are properly cleaned * `.spec.failedJobsHistoryLimit`. The number of failed finished jobs to retain (defaults to 1). -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [TIP] ==== * Delete cron jobs that you no longer need: @@ -114,7 +114,7 @@ Doing this prevents them from generating unnecessary artifacts. * You can suspend further executions by setting the `spec.suspend` to true. All subsequent executions are suspended until you reset to `false`. ==== -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [id="jobs-limits_{context}"] == Known limitations diff --git a/modules/nodes-nodes-jobs-creating-cron.adoc b/modules/nodes-nodes-jobs-creating-cron.adoc index f5f0fdb0b5..ab2ce9c3d4 100644 --- a/modules/nodes-nodes-jobs-creating-cron.adoc +++ b/modules/nodes-nodes-jobs-creating-cron.adoc @@ -14,7 +14,7 @@ To create a cron job: . Create a YAML file similar to the following: + -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [source,yaml] ---- apiVersion: batch/v1 @@ -62,8 +62,8 @@ all subsequent executions will be suspended. <8> Job template. This is similar to the job example. <9> Sets a label for jobs spawned by this cron job. <10> The restart policy of the pod. This does not apply to the job controller. -endif::openshift-rosa,openshift-dedicated[] -ifdef::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] +ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [source,yaml] ---- apiVersion: batch/v1 @@ -116,7 +116,7 @@ These fields specify how many completed and failed jobs should be kept. By defa set to `3` and `1` respectively. Setting a limit to `0` corresponds to keeping none of the corresponding kind of jobs after they finish. ==== -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] . Create the cron job: + diff --git a/modules/nodes-nodes-viewing-listing.adoc b/modules/nodes-nodes-viewing-listing.adoc index d82c77cb0a..8bb209e637 100644 --- a/modules/nodes-nodes-viewing-listing.adoc +++ b/modules/nodes-nodes-viewing-listing.adoc @@ -211,14 +211,14 @@ Events: <11> <10> The pods on the node. <11> The events reported by the node. -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [NOTE] ==== The control plane label is not automatically added to newly created or updated master nodes. If you want to use the control plane label for your nodes, you can manually configure the label. For more information, see _Understanding how to update labels on nodes_ in the _Additional resources_ section. ==== -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] Among the information shown for nodes, the following node conditions appear in the output of the commands shown in this section: diff --git a/modules/nodes-pods-daemonsets-creating.adoc b/modules/nodes-pods-daemonsets-creating.adoc index 9a5ee927f7..a6011d1c59 100644 --- a/modules/nodes-pods-daemonsets-creating.adoc +++ b/modules/nodes-pods-daemonsets-creating.adoc @@ -36,14 +36,14 @@ metadata: ---- ==== -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] * If you are creating a new project, overwrite the default node selector: + [source,terminal] ---- $ oc adm new-project --node-selector="" ---- -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] .Procedure diff --git a/modules/nodes-pods-priority-configuring.adoc b/modules/nodes-pods-priority-configuring.adoc index 3badec0a66..cc0c83acbf 100644 --- a/modules/nodes-pods-priority-configuring.adoc +++ b/modules/nodes-pods-priority-configuring.adoc @@ -18,7 +18,7 @@ You cannot add a priority class directly to an existing scheduled pod. To configure your cluster to use priority and preemption: -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] . Create one or more priority classes: .. Create a YAML file similar to the following: @@ -51,7 +51,7 @@ $ oc create -f .yaml // ROSA/OSD cannot create new priority classes. Must use the defaults. .. Create a YAML file similar to the following: + -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [source,yaml] ---- apiVersion: v1 @@ -76,8 +76,8 @@ spec: priorityClassName: high-priority <1> ---- <1> Specify the priority class to use with this pod. -endif::openshift-rosa,openshift-dedicated[] -ifdef::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] +ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [source,yaml] ---- apiVersion: v1 @@ -94,7 +94,7 @@ spec: priorityClassName: system-cluster-critical <1> ---- <1> Specify the priority class to use with this pod. -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] .. Create the pod: + @@ -102,9 +102,9 @@ endif::openshift-rosa,openshift-dedicated[] ---- $ oc create -f .yaml ---- -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -ifdef::openshift-rosa,openshift-dedicated[] +ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] // ROSA/OSD cannot create new priority classes. Must use the defaults. . Define a pod spec to include the name of a priority class by creating a YAML file similar to the following: + @@ -131,6 +131,6 @@ spec: ---- $ oc create -f .yaml ---- -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] + You can add the priority name directly to the pod configuration or to a pod template. diff --git a/modules/nodes-pods-secrets-creating-basic.adoc b/modules/nodes-pods-secrets-creating-basic.adoc index 6c702e6ed5..2f11fbefbf 100644 --- a/modules/nodes-pods-secrets-creating-basic.adoc +++ b/modules/nodes-pods-secrets-creating-basic.adoc @@ -18,7 +18,7 @@ You can use the `stringData` parameter to use clear text content. .Procedure -. Create a `Secret` object in a YAML file on a control plane node: +. Create a `Secret` object in a YAML file: + .Example `secret` object [source,yaml] diff --git a/modules/nodes-pods-secrets-creating-docker.adoc b/modules/nodes-pods-secrets-creating-docker.adoc index e621840a33..8668acc156 100644 --- a/modules/nodes-pods-secrets-creating-docker.adoc +++ b/modules/nodes-pods-secrets-creating-docker.adoc @@ -14,7 +14,7 @@ As an administrator, you can create a Docker configuration secret, which allows .Procedure -. Create a `Secret` object in a YAML file on a control plane node. +. Create a `Secret` object in a YAML file. + -- .Example Docker configuration `secret` object diff --git a/modules/nodes-pods-secrets-creating-opaque.adoc b/modules/nodes-pods-secrets-creating-opaque.adoc index 4d890941d2..d0f56ccdfc 100644 --- a/modules/nodes-pods-secrets-creating-opaque.adoc +++ b/modules/nodes-pods-secrets-creating-opaque.adoc @@ -10,7 +10,7 @@ As an administrator, you can create an opaque secret, which allows you to store .Procedure -. Create a `Secret` object in a YAML file on a control plane node. +. Create a `Secret` object in a YAML file. + For example: + diff --git a/modules/nodes-pods-secrets-creating-sa.adoc b/modules/nodes-pods-secrets-creating-sa.adoc index 2da2c55a88..d35a03bf0f 100644 --- a/modules/nodes-pods-secrets-creating-sa.adoc +++ b/modules/nodes-pods-secrets-creating-sa.adoc @@ -25,7 +25,7 @@ For more information, see "Configuring bound service account tokens using volume .Procedure -. Create a `Secret` object in a YAML file on a control plane node: +. Create a `Secret` object in a YAML file: + .Example `Secret` object [source,yaml] diff --git a/modules/nodes-scheduler-node-affinity-configuring-preferred.adoc b/modules/nodes-scheduler-node-affinity-configuring-preferred.adoc index 3520b52036..8b44402f25 100644 --- a/modules/nodes-scheduler-node-affinity-configuring-preferred.adoc +++ b/modules/nodes-scheduler-node-affinity-configuring-preferred.adoc @@ -12,14 +12,14 @@ Preferred rules specify that, if the rule is met, the scheduler tries to enforce The following steps demonstrate a simple configuration that creates a node and a pod that the scheduler tries to place on the node. -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] . Add a label to a node using the `oc label node` command: + [source,terminal] ---- $ oc label node node1 e2e-az-name=e2e-az3 ---- -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] . Create a pod with a specific label: + diff --git a/modules/nodes-scheduler-node-affinity-configuring-required.adoc b/modules/nodes-scheduler-node-affinity-configuring-required.adoc index 2ef700549b..3c412ddadb 100644 --- a/modules/nodes-scheduler-node-affinity-configuring-required.adoc +++ b/modules/nodes-scheduler-node-affinity-configuring-required.adoc @@ -12,7 +12,7 @@ Required rules *must* be met before a pod can be scheduled on a node. The following steps demonstrate a simple configuration that creates a node and a pod that the scheduler is required to place on the node. -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] . Add a label to a node using the `oc label node` command: + [source,terminal] @@ -35,7 +35,7 @@ metadata: #... ---- ==== -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] . Create a pod with a specific label in the pod spec: + diff --git a/modules/nodes-scheduler-node-selectors-about.adoc b/modules/nodes-scheduler-node-selectors-about.adoc index 9287f268f5..c139eb9ba7 100644 --- a/modules/nodes-scheduler-node-selectors-about.adoc +++ b/modules/nodes-scheduler-node-selectors-about.adoc @@ -239,7 +239,7 @@ NAME READY STATUS RESTARTS AGE IP NODE pod-s1 1/1 Running 0 20s 10.131.2.6 ci-ln-qg1il3k-f76d1-hlmhl-worker-b-df2s4 ---- + -A pod in the project is not created or scheduled if the pod contains different node selectors. For example, if you deploy the following pod into the example project, it is not be created: +A pod in the project is not created or scheduled if the pod contains different node selectors. For example, if you deploy the following pod into the example project, it is not created: + .Example `Pod` object with an invalid node selector [source,yaml] diff --git a/modules/nodes-scheduler-node-selectors-pod.adoc b/modules/nodes-scheduler-node-selectors-pod.adoc index 394e60a1b2..2379467087 100644 --- a/modules/nodes-scheduler-node-selectors-pod.adoc +++ b/modules/nodes-scheduler-node-selectors-pod.adoc @@ -64,7 +64,7 @@ metadata: .Procedure -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] . Add labels to a node by using a compute machine set or editing the node directly: * Use a `MachineSet` object to add labels to nodes managed by the compute machine set when a node is created: @@ -233,8 +233,8 @@ spec: ==== You cannot add a node selector directly to an existing scheduled pod. ==== -endif::openshift-rosa,openshift-dedicated[] -ifdef::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] +ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] * Add the matching node selector to a pod: + ** To add a node selector to existing and future pods, add a node selector to the controlling object for the pods: @@ -285,4 +285,4 @@ spec: ==== You cannot add a node selector directly to an existing scheduled pod. ==== -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] diff --git a/modules/rosa-creating-node-tuning.adoc b/modules/rosa-creating-node-tuning.adoc index 48c90aba6b..8f7bfb3890 100644 --- a/modules/rosa-creating-node-tuning.adoc +++ b/modules/rosa-creating-node-tuning.adoc @@ -4,7 +4,7 @@ :_mod-docs-content-type: PROCEDURE [id="rosa-creating-node-tuning_{context}"] -= Creating node tuning configurations on {hcp-title} += Creating node tuning configurations You can create tuning configurations using the {product-title} (ROSA) CLI, `rosa`. @@ -84,4 +84,4 @@ The following JSON output has hard line-returns for the sake of reading clarity. } } ] ----- \ No newline at end of file +---- diff --git a/modules/rosa-deleting-node-tuning.adoc b/modules/rosa-deleting-node-tuning.adoc index f2908ea751..2f8dcfa09a 100644 --- a/modules/rosa-deleting-node-tuning.adoc +++ b/modules/rosa-deleting-node-tuning.adoc @@ -4,7 +4,7 @@ :_mod-docs-content-type: PROCEDURE [id="rosa-deleting-node-tuning_{context}"] -= Deleting node tuning configurations on {hcp-title} += Deleting node tuning configurations You can delete tuning configurations by using the {product-title} (ROSA) CLI, `rosa`. diff --git a/modules/rosa-modifying-node-tuning.adoc b/modules/rosa-modifying-node-tuning.adoc index 4a8e17bfdf..a47febdc42 100644 --- a/modules/rosa-modifying-node-tuning.adoc +++ b/modules/rosa-modifying-node-tuning.adoc @@ -4,7 +4,7 @@ :_mod-docs-content-type: PROCEDURE [id="rosa-modifying-node-tuning_{context}"] -= Modifying your node tuning configurations for {hcp-title} += Modifying your node tuning configurations You can view and update the node tuning configurations using the {product-title} (ROSA) CLI, `rosa`. diff --git a/nodes/clusters/nodes-cluster-overcommit.adoc b/nodes/clusters/nodes-cluster-overcommit.adoc index 9cae3b5600..28e4529cc0 100644 --- a/nodes/clusters/nodes-cluster-overcommit.adoc +++ b/nodes/clusters/nodes-cluster-overcommit.adoc @@ -24,8 +24,7 @@ ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] container density on nodes. You can configure cluster-level overcommit using the xref:#nodes-cluster-resource-override_nodes-cluster-overcommit[ClusterResourceOverride Operator] to override the ratio between requests and limits set on developer containers. -In conjunction with xref:#nodes-cluster-node-overcommit_nodes-cluster-overcommit[node overcommit] and -xref:../../applications/deployments/managing-deployment-processes.adoc#deployments-setting-resources_deployment-operations[project memory and CPU limits and defaults], you can adjust the resource limit and request to achieve the desired level of overcommit. +In conjunction with xref:#nodes-cluster-node-overcommit_nodes-cluster-overcommit[node overcommit], you can adjust the resource limit and request to achieve the desired level of overcommit. [NOTE] ==== @@ -110,5 +109,8 @@ ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] * xref:../../nodes/clusters/nodes-cluster-limit-ranges.adoc#nodes-cluster-limit-ranges[Restrict resource consumption with limit ranges] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] +// TODO: Add this xref to ROSA HCP when the Support book is added. +ifndef::openshift-rosa-hcp,openshift-enterprise[] * xref:../../support/troubleshooting/sd-managed-resources.adoc#sd-managed-resources[Red Hat Managed resources] -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] \ No newline at end of file +endif::openshift-rosa-hcp,openshift-enterprise[] diff --git a/nodes/cma/nodes-cma-autoscaling-custom-adding.adoc b/nodes/cma/nodes-cma-autoscaling-custom-adding.adoc index 0ea901bb06..cede8ee188 100644 --- a/nodes/cma/nodes-cma-autoscaling-custom-adding.adoc +++ b/nodes/cma/nodes-cma-autoscaling-custom-adding.adoc @@ -16,9 +16,9 @@ You can create only one scaled object for each workload that you want to scale. include::modules/nodes-cma-autoscaling-custom-creating-workload.adoc[leveloffset=+1] //Scaling by using a scaled job is a Technology Preview feature. TP not supported in ROSA/OSD -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] include::modules/nodes-cma-autoscaling-custom-creating-job.adoc[leveloffset=+1] -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [role="_additional-resources"] [id="nodes-cma-autoscaling-custom-adding-additional-resources"] diff --git a/nodes/cma/nodes-cma-autoscaling-custom-debugging.adoc b/nodes/cma/nodes-cma-autoscaling-custom-debugging.adoc index bf3ef55131..4295238136 100644 --- a/nodes/cma/nodes-cma-autoscaling-custom-debugging.adoc +++ b/nodes/cma/nodes-cma-autoscaling-custom-debugging.adoc @@ -17,12 +17,12 @@ endif::openshift-origin[] You can use the `must-gather` tool to collect data about the Custom Metrics Autoscaler Operator and its components, including the following items: -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] * The `openshift-keda` namespace and its child objects. -endif::openshift-rosa,openshift-dedicated[] -ifdef::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] +ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] * The `keda` namespace and its child objects. -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] * The Custom Metric Autoscaler Operator installation objects. * The Custom Metric Autoscaler Operator CRD objects. diff --git a/nodes/cma/nodes-cma-autoscaling-custom-install.adoc b/nodes/cma/nodes-cma-autoscaling-custom-install.adoc index 66ff6c62fc..451e2dd11f 100644 --- a/nodes/cma/nodes-cma-autoscaling-custom-install.adoc +++ b/nodes/cma/nodes-cma-autoscaling-custom-install.adoc @@ -17,10 +17,10 @@ The installation creates the following five CRDs: * `ScaledObject` * `TriggerAuthentication` -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] include::modules/nodes-cma-autoscaling-custom-install.adoc[leveloffset=+1] -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -ifdef::openshift-rosa,openshift-dedicated[] +ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] include::modules/sd-nodes-cma-autoscaling-custom-install.adoc[leveloffset=+1] -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] diff --git a/nodes/cma/nodes-cma-autoscaling-custom-removing.adoc b/nodes/cma/nodes-cma-autoscaling-custom-removing.adoc index cc98a26a3d..3c1af80d31 100644 --- a/nodes/cma/nodes-cma-autoscaling-custom-removing.adoc +++ b/nodes/cma/nodes-cma-autoscaling-custom-removing.adoc @@ -8,17 +8,17 @@ toc::[] You can remove the custom metrics autoscaler from your {product-title} cluster. After removing the Custom Metrics Autoscaler Operator, remove other components associated with the Operator to avoid potential issues. -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [NOTE] ==== Delete the `KedaController` custom resource (CR) first. If you do not delete the `KedaController` CR, {product-title} can hang when you delete the `openshift-keda` project. If you delete the Custom Metrics Autoscaler Operator before deleting the CR, you are not able to delete the CR. ==== -endif::openshift-rosa,openshift-dedicated[] -ifdef::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] +ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [NOTE] ==== Delete the `KedaController` custom resource (CR) first. If you do not delete the `KedaController` CR, {product-title} can hang when you delete the `keda` project. If you delete the Custom Metrics Autoscaler Operator before deleting the CR, you are not able to delete the CR. ==== -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] include::modules/nodes-cma-autoscaling-custom-uninstalling.adoc[leveloffset=+1] diff --git a/nodes/cma/nodes-cma-autoscaling-custom-trigger-auth.adoc b/nodes/cma/nodes-cma-autoscaling-custom-trigger-auth.adoc index 435e24a514..1fc10d4e1b 100644 --- a/nodes/cma/nodes-cma-autoscaling-custom-trigger-auth.adoc +++ b/nodes/cma/nodes-cma-autoscaling-custom-trigger-auth.adoc @@ -186,10 +186,10 @@ spec: <3> Specifies a pod identity. Supported values are `none`, `azure`, `gcp`, `aws-eks`, or `aws-kiam`. The default is `none`. // Remove ifdef after https://github.com/openshift/openshift-docs/pull/62147 merges -ifndef::openshift-rosa,openshift-dedicated[] +// ifndef::openshift-rosa,openshift-dedicated[] .Additional resources * For information about {product-title} secrets, see xref:../../nodes/pods/nodes-pods-secrets.adoc#nodes-pods-secrets[Providing sensitive data to pods]. -endif::openshift-rosa,openshift-dedicated[] +// endif::openshift-rosa,openshift-dedicated[] include::modules/nodes-cma-autoscaling-custom-trigger-auth-using.adoc[leveloffset=+1] diff --git a/nodes/containers/nodes-containers-using.adoc b/nodes/containers/nodes-containers-using.adoc index 31acf09126..e0ca0da94b 100644 --- a/nodes/containers/nodes-containers-using.adoc +++ b/nodes/containers/nodes-containers-using.adoc @@ -49,7 +49,7 @@ A _container engine_ is a piece of software that processes user requests, includ The {product-title} documentation uses the term _container runtime_ to refer to the lower-level container runtime. Other documentation can refer to the container engine as the container runtime. ==== -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] {product-title} uses CRI-O as the container engine and runC or crun as the container runtime. The default container runtime is runC. Both container runtimes adhere to the link:https://www.opencontainers.org/[Open Container Initiative (OCI)] runtime specifications. include::snippets/about-crio-snippet.adoc[] @@ -71,8 +71,8 @@ runC has some benefits over crun, including: You can move between the two container runtimes as needed. For information on setting which container runtime to use, see xref:../../machine_configuration/machine-configs-custom.adoc#create-a-containerruntimeconfig_machine-configs-custom[Creating a `ContainerRuntimeConfig` CR to edit CRI-O parameters]. -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -ifdef::openshift-rosa,openshift-dedicated[] +ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] {product-title} uses CRI-O as the container engine and runC or crun as the container runtime. The default container runtime is runC. -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] diff --git a/nodes/index.adoc b/nodes/index.adoc index 070fd468c9..48abd52955 100644 --- a/nodes/index.adoc +++ b/nodes/index.adoc @@ -11,7 +11,13 @@ toc::[] [id="nodes-overview"] == About nodes -A node is a virtual or bare-metal machine in a Kubernetes cluster. Worker nodes host your application containers, grouped as pods. The control plane nodes run services that are required to control the Kubernetes cluster. In {product-title}, the control plane nodes contain more than just the Kubernetes services for managing the {product-title} cluster. +A node is a virtual or bare-metal machine in a Kubernetes cluster. Worker nodes host your application containers, grouped as pods. The control plane nodes run services that are required to control the Kubernetes cluster. +ifndef::openshift-rosa-hcp[] +In {product-title}, the control plane nodes contain more than just the Kubernetes services for managing the {product-title} cluster. +endif::openshift-rosa-hcp[] +ifdef::openshift-rosa-hcp[] +In {product-title}, the control plane nodes are hosted in a Red{nbsp}Hat-owned AWS account. Red{nbsp}Hat fully manages the control plane infrastructure for you. +endif::openshift-rosa-hcp[] Having stable and healthy nodes in a cluster is fundamental to the smooth functioning of your hosted application. In {product-title}, you can access, manage, and monitor a node through the `Node` object representing the node. @@ -27,23 +33,18 @@ Kube-proxy:: Kube-proxy runs on every node in the cluster and maintains the netw DNS:: Cluster DNS is a DNS server which serves DNS records for Kubernetes services. Containers started by Kubernetes automatically include this DNS server in their DNS searches. +ifndef::openshift-rosa-hcp[] image::295_OpenShift_Nodes_Overview_1222.png[Overview of control plane and worker node] +endif::openshift-rosa-hcp[] [discrete] === Read operations The read operations allow an administrator or a developer to get information about nodes in an {product-title} cluster. -ifdef::openshift-rosa-hcp[] -* List all the nodes in a cluster. -* Get information about a node, such as memory and CPU usage, health, status, and age. -* List pods running on a node. -endif::openshift-rosa-hcp[] -ifndef::openshift-rosa-hcp[] * xref:../nodes/nodes/nodes-nodes-viewing.adoc#nodes-nodes-viewing-listing_nodes-nodes-viewing[List all the nodes in a cluster]. * Get information about a node, such as memory and CPU usage, health, status, and age. * xref:../nodes/nodes/nodes-nodes-viewing.adoc#nodes-nodes-viewing-listing-pods_nodes-nodes-viewing[List pods running on a node]. -endif::openshift-rosa-hcp[] ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [discrete] @@ -66,29 +67,23 @@ endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] {product-title} allows you to do more than just access and manage nodes; as an administrator, you can perform the following tasks on nodes to make the cluster more efficient, application-friendly, and to provide a better environment for your developers. -ifndef::openshift-rosa-hcp[] -* Manage node-level tuning for high-performance applications that require some level of kernel tuning by using the Node Tuning Operator. -* Run background tasks on nodes automatically with daemon sets. You can create and use daemon sets to create shared storage, run a logging pod on every node, or deploy a monitoring agent on all nodes. -endif::openshift-rosa-hcp[] -ifndef::openshift-rosa-hcp[] * Manage node-level tuning for high-performance applications that require some level of kernel tuning by xref:../nodes/nodes/nodes-node-tuning-operator.adoc#nodes-node-tuning-operator[using the Node Tuning Operator]. -ifndef::openshift-rosa,openshift-dedicated[] -* Enable TLS security profiles on the node to protect communication between the kubelet and the Kubernetes API server. -endif::openshift-rosa,openshift-dedicated[] * xref:../nodes/jobs/nodes-pods-daemonsets.adoc#nodes-pods-daemonsets[Run background tasks on nodes automatically with daemon sets]. You can create and use daemon sets to create shared storage, run a logging pod on every node, or deploy a monitoring agent on all nodes. -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] +* Enable TLS security profiles on the node to protect communication between the kubelet and the Kubernetes API server. +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] +* xref:../nodes/jobs/nodes-pods-daemonsets.adoc#nodes-pods-daemonsets[Run background tasks on nodes automatically with daemon sets]. You can create and use daemon sets to create shared storage, run a logging pod on every node, or deploy a monitoring agent on all nodes. +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] * xref:../nodes/nodes/nodes-nodes-garbage-collection.adoc#nodes-nodes-garbage-collection[Free node resources using garbage collection]. You can ensure that your nodes are running efficiently by removing terminated containers and the images not referenced by any running pods. * xref:../nodes/nodes/nodes-nodes-managing.adoc#nodes-nodes-kernel-arguments_nodes-nodes-managing[Add kernel arguments to a set of nodes]. * Configure an {product-title} cluster to have worker nodes at the network edge (remote worker nodes). For information on the challenges of having remote worker nodes in an {product-title} cluster and some recommended approaches for managing pods on a remote worker node, see xref:../nodes/edge/nodes-edge-remote-workers.adoc#nodes-edge-remote-workers[Using remote worker nodes at the network edge]. -endif::openshift-rosa,openshift-dedicated[] -endif::openshift-rosa-hcp[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [id="pods-overview"] == About pods A pod is one or more containers deployed together on a node. As a cluster administrator, you can define a pod, assign it to run on a healthy node that is ready for scheduling, and manage. A pod runs as long as the containers are running. You cannot change a pod once it is defined and is running. Some operations you can perform when working with pods are: -ifndef::openshift-rosa-hcp[] [discrete] === Read operations @@ -96,39 +91,27 @@ As an administrator, you can get information about pods in a project through the * xref:../nodes/pods/nodes-pods-viewing.adoc#nodes-pods-viewing-project_nodes-pods-viewing[List pods associated with a project], including information such as the number of replicas and restarts, current status, and age. * xref:../nodes/pods/nodes-pods-viewing.adoc#nodes-pods-viewing-usage_nodes-pods-viewing[View pod usage statistics] such as CPU, memory, and storage consumption. -endif::openshift-rosa-hcp[] [discrete] === Management operations The following list of tasks provides an overview of how an administrator can manage pods in an {product-title} cluster. -ifdef::openshift-rosa-hcp[] -* Control scheduling of pods using the advanced scheduling features available in {product-title}: -** Node-to-pod binding rules such as pod affinity, node affinity, and anti-affinity. -** Node labels and selectors. -** Pod topology spread constraints. -* Configure how pods behave after a restart using pod controllers and restart policies. -* Limit both egress and ingress traffic on a pod. -* Add and remove volumes to and from any object that has a pod template. A volume is a mounted file system available to all the containers in a pod. Container storage is ephemeral; you can use volumes to persist container data. -endif::openshift-rosa-hcp[] -ifndef::openshift-rosa-hcp[] * Control scheduling of pods using the advanced scheduling features available in {product-title}: ** Node-to-pod binding rules such as xref:../nodes/scheduling/nodes-scheduler-pod-affinity.adoc#nodes-scheduler-pod-affinity-example-affinity_nodes-scheduler-pod-affinity[pod affinity], xref:../nodes/scheduling/nodes-scheduler-node-affinity.adoc#nodes-scheduler-node-affinity[node affinity], and xref:../nodes/scheduling/nodes-scheduler-pod-affinity.adoc#nodes-scheduler-pod-anti-affinity-configuring_nodes-scheduler-pod-affinity[anti-affinity]. ** xref:../nodes/scheduling/nodes-scheduler-node-selectors.adoc#nodes-scheduler-node-selectors[Node labels and selectors]. -ifndef::openshift-dedicated,openshift-rosa[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] ** xref:../nodes/scheduling/nodes-scheduler-taints-tolerations.adoc#nodes-scheduler-taints-tolerations[Taints and tolerations]. -endif::openshift-dedicated,openshift-rosa[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] ** xref:../nodes/scheduling/nodes-scheduler-pod-topology-spread-constraints.adoc#nodes-scheduler-pod-topology-spread-constraints[Pod topology spread constraints]. // Cannot create namespace to install Operator -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] ** xref:../nodes/scheduling/secondary_scheduler/index.adoc#nodes-secondary-scheduler-about[Secondary scheduling]. * xref:../nodes/scheduling/descheduler/index.adoc#nodes-descheduler-about[Configure the descheduler to evict pods] based on specific strategies so that the scheduler reschedules the pods to more appropriate nodes. -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] * xref:../nodes/pods/nodes-pods-configuring.adoc#nodes-pods-configuring-restart_nodes-pods-configuring[Configure how pods behave after a restart using pod controllers and restart policies]. * xref:../nodes/pods/nodes-pods-configuring.adoc#nodes-pods-configuring-bandwidth_nodes-pods-configuring[Limit both egress and ingress traffic on a pod]. * xref:../nodes/containers/nodes-containers-volumes.adoc#nodes-containers-volumes[Add and remove volumes to and from any object that has a pod template]. A volume is a mounted file system available to all the containers in a pod. Container storage is ephemeral; you can use volumes to persist container data. -endif::openshift-rosa-hcp[] [discrete] === Enhancement operations @@ -161,27 +144,15 @@ As a developer, use a vertical pod autoscaler to ensure your pods stay up during |=== endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -ifdef::openshift-rosa,openshift-dedicated[] +ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] * Secrets: Some applications need sensitive information, such as passwords and usernames. An administrator can use the `Secret` object to provide sensitive data to pods xref:../nodes/pods/nodes-pods-secrets.adoc#nodes-pods-secrets[using the `Secret` object]. -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [id="containers-overview"] == About containers A container is the basic unit of an {product-title} application, which comprises the application code packaged along with its dependencies, libraries, and binaries. Containers provide consistency across environments and multiple deployment targets: physical servers, virtual machines (VMs), and private or public cloud. -ifdef::openshift-rosa-hcp[] -Linux container technologies are lightweight mechanisms for isolating running processes and limiting access to only designated resources. -As an administrator, You can perform various tasks on a Linux container, such as: - -* Copy files to and from a container. -* Allow containers to consume API objects. -* Execute remote commands in a container. -* Use port forwarding to access applications in a container. - -{product-title} provides specialized containers called Init containers. Init containers run before application containers and can contain utilities or setup scripts not present in an application image. You can use an Init container to perform tasks before the rest of a pod is deployed. -endif::openshift-rosa-hcp[] -ifndef::openshift-rosa-hcp[] Linux container technologies are lightweight mechanisms for isolating running processes and limiting access to only designated resources. As an administrator, You can perform various tasks on a Linux container, such as: @@ -191,11 +162,9 @@ As an administrator, You can perform various tasks on a Linux container, such as * xref:../nodes/containers/nodes-containers-port-forwarding.adoc#nodes-containers-port-forwarding[Use port forwarding to access applications in a container]. {product-title} provides specialized containers called xref:../nodes/containers/nodes-containers-init.adoc#nodes-containers-init[Init containers]. Init containers run before application containers and can contain utilities or setup scripts not present in an application image. You can use an Init container to perform tasks before the rest of a pod is deployed. -endif::openshift-rosa-hcp[] Apart from performing specific tasks on nodes, pods, and containers, you can work with the overall {product-title} cluster to keep the cluster efficient and the application pods highly available. - //cannot create the required namespace for these operators ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [id="nodes-about-autoscaling-pod_{context}"] diff --git a/nodes/nodes/nodes-node-tuning-operator.adoc b/nodes/nodes/nodes-node-tuning-operator.adoc index 2a229b345e..0d3940d320 100644 --- a/nodes/nodes/nodes-node-tuning-operator.adoc +++ b/nodes/nodes/nodes-node-tuning-operator.adoc @@ -6,15 +6,33 @@ include::_attributes/common-attributes.adoc[] toc::[] +ifndef::openshift-rosa-hcp[] Learn about the Node Tuning Operator and how you can use it to manage node-level tuning by orchestrating the tuned daemon. +endif::openshift-rosa-hcp[] + +ifdef::openshift-rosa-hcp[] +{product-title} supports the Node Tuning Operator to improve performance of your nodes on your clusters. Prior to creating a node tuning configuration, you must create a custom tuning specification. +endif::openshift-rosa-hcp[] include::modules/node-tuning-operator.adoc[leveloffset=+1] +ifndef::openshift-rosa-hcp[] include::modules/accessing-an-example-cluster-node-tuning-operator-specification.adoc[leveloffset=+1] +endif::openshift-rosa-hcp[] include::modules/custom-tuning-specification.adoc[leveloffset=+1] +ifndef::openshift-rosa-hcp[] include::modules/cluster-node-tuning-operator-default-profiles-set.adoc[leveloffset=+1] include::modules/node-tuning-operator-supported-tuned-daemon-plug-ins.adoc[leveloffset=+1] +endif::openshift-rosa-hcp[] + +ifdef::openshift-rosa-hcp[] +include::modules/rosa-creating-node-tuning.adoc[leveloffset=+1] + +include::modules/rosa-modifying-node-tuning.adoc[leveloffset=+1] + +include::modules/rosa-deleting-node-tuning.adoc[leveloffset=+1] +endif::openshift-rosa-hcp[] diff --git a/nodes/nodes/nodes-nodes-viewing.adoc b/nodes/nodes/nodes-nodes-viewing.adoc index 18acac0172..d7f21bf21f 100644 --- a/nodes/nodes/nodes-nodes-viewing.adoc +++ b/nodes/nodes/nodes-nodes-viewing.adoc @@ -18,15 +18,15 @@ The master uses the information from node objects to validate nodes with health include::modules/nodes-nodes-viewing-listing.adoc[leveloffset=+1] -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [role="_additional-resources"] .Additional resources * xref:../../nodes/nodes/nodes-nodes-working.adoc#nodes-nodes-working-updating_nodes-nodes-working[Understanding how to update labels on nodes] -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] include::modules/nodes-nodes-viewing-listing-pods.adoc[leveloffset=+1] -include::modules/nodes-nodes-viewing-memory.adoc[leveloffset=+1] \ No newline at end of file +include::modules/nodes-nodes-viewing-memory.adoc[leveloffset=+1] diff --git a/nodes/nodes/nodes-nodes-working.adoc b/nodes/nodes/nodes-nodes-working.adoc index cf5aa6c8fd..40ecc3edfb 100644 --- a/nodes/nodes/nodes-nodes-working.adoc +++ b/nodes/nodes/nodes-nodes-working.adoc @@ -8,25 +8,25 @@ include::_attributes/common-attributes.adoc[] toc::[] As an administrator, you can perform several tasks to make your clusters more efficient. -ifdef::openshift-rosa[] -You can use the `oc adm` command to cordon, uncordon, and drain a specific node. This is available for both ROSA Classic and ROSA with HCP clusters. +ifdef::openshift-rosa,openshift-rosa-hcp[] +You can use the `oc adm` command to cordon, uncordon, and drain a specific node. [NOTE] ==== Cordoning and draining are only allowed on worker nodes that are part of {cluster-manager-first} machine pools. ==== -endif::openshift-rosa[] +endif::openshift-rosa,openshift-rosa-hcp[] // The following include statements pull in the module files that comprise // the assembly. Include any combination of concept, procedure, or reference // modules required to cover the user story. You can also include other // assemblies. -ifdef::openshift-enterprise,openshift-rosa[] +ifdef::openshift-enterprise,openshift-rosa,openshift-rosa-hcp[] include::modules/nodes-nodes-working-evacuating.adoc[leveloffset=+1] -endif::openshift-enterprise,openshift-rosa[] +endif::openshift-enterprise,openshift-rosa,openshift-rosa-hcp[] -ifndef::openshift-rosa[] +ifndef::openshift-rosa,openshift-rosa-hcp[] include::modules/nodes-nodes-working-updating.adoc[leveloffset=+1] include::modules/nodes-nodes-working-marking.adoc[leveloffset=+1] include::modules/sno-clusters-reboot-without-drain.adoc[leveloffset=+1] @@ -46,4 +46,4 @@ include::modules/nodes-nodes-working-deleting.adoc[leveloffset=+2] * xref:../../machine_management/manually-scaling-machineset.adoc#machineset-manually-scaling-manually-scaling-machineset[Manually scaling a compute machine set] include::modules/nodes-nodes-working-deleting-bare-metal.adoc[leveloffset=+2] -endif::openshift-rosa[] +endif::openshift-rosa,openshift-rosa-hcp[] diff --git a/rosa_hcp/rosa-tuning-config.adoc b/nodes/nodes/rosa-tuning-config.adoc similarity index 63% rename from rosa_hcp/rosa-tuning-config.adoc rename to nodes/nodes/rosa-tuning-config.adoc index c36dca6814..d0840976b5 100644 --- a/rosa_hcp/rosa-tuning-config.adoc +++ b/nodes/nodes/rosa-tuning-config.adoc @@ -6,7 +6,7 @@ include::_attributes/attributes-openshift-dedicated.adoc[] toc::[] -{hcp-title-first} supports the Node Tuning Operator to improve performance of your nodes on your {hcp-title} clusters. Prior to creating a node tuning configuration, you must create a custom tuning specification. +{product-title} supports the Node Tuning Operator to improve performance of your nodes on your clusters. Prior to creating a node tuning configuration, you must create a custom tuning specification. include::modules/node-tuning-operator.adoc[leveloffset=+1] @@ -16,4 +16,4 @@ include::modules/rosa-creating-node-tuning.adoc[leveloffset=+1] include::modules/rosa-modifying-node-tuning.adoc[leveloffset=+1] -include::modules/rosa-deleting-node-tuning.adoc[leveloffset=+1] \ No newline at end of file +include::modules/rosa-deleting-node-tuning.adoc[leveloffset=+1] diff --git a/nodes/pods/nodes-pods-configuring.adoc b/nodes/pods/nodes-pods-configuring.adoc index e505c89741..418bace672 100644 --- a/nodes/pods/nodes-pods-configuring.adoc +++ b/nodes/pods/nodes-pods-configuring.adoc @@ -27,19 +27,19 @@ include::modules/nodes-pods-pod-disruption-about.adoc[leveloffset=+1] include::modules/nodes-pods-pod-disruption-configuring.adoc[leveloffset=+2] //tech preview feature -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] include::modules/pod-disruption-eviction-policy.adoc[leveloffset=+2] -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] //Unsupported [role="_additional-resources"] .Additional resources -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] * xref:../../nodes/clusters/nodes-cluster-enabling-features.adoc#nodes-cluster-enabling[Enabling features using feature gates] * link:https://kubernetes.io/docs/tasks/run-application/configure-pdb/#unhealthy-pod-eviction-policy[Unhealthy Pod Eviction Policy] in the Kubernetes documentation include::modules/nodes-pods-configuring-pod-critical.adoc[leveloffset=+1] include::modules/nodes-pods-configuring-reducing.adoc[leveloffset=+1] -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] diff --git a/nodes/pods/nodes-pods-priority.adoc b/nodes/pods/nodes-pods-priority.adoc index 8b19901f24..9c820d6f38 100644 --- a/nodes/pods/nodes-pods-priority.adoc +++ b/nodes/pods/nodes-pods-priority.adoc @@ -10,12 +10,12 @@ toc::[] You can enable pod priority and preemption in your cluster. Pod priority indicates the importance of a pod relative to other pods and queues the pods based on that priority. pod preemption allows the cluster to evict, or preempt, lower-priority pods so that higher-priority pods can be scheduled if there is no available space on a suitable node pod priority also affects the scheduling order of pods and out-of-resource eviction ordering on the node. -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] To use priority and preemption, you create priority classes that define the relative weight of your pods. Then, reference a priority class in the pod specification to apply that weight for scheduling. -endif::openshift-rosa,openshift-dedicated[] -ifdef::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] +ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] To use priority and preemption, reference a priority class in the pod specification to apply that weight for scheduling. -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] // The following include statements pull in the module files that comprise // the assembly. Include any combination of concept, procedure, or reference diff --git a/nodes/pods/nodes-pods-secrets.adoc b/nodes/pods/nodes-pods-secrets.adoc index 466e3f7297..5101cd73e9 100644 --- a/nodes/pods/nodes-pods-secrets.adoc +++ b/nodes/pods/nodes-pods-secrets.adoc @@ -30,12 +30,12 @@ include::modules/nodes-pods-secrets-creating-sa.adoc[leveloffset=+2] * xref:../../nodes/pods/nodes-pods-secrets.adoc#nodes-pods-secrets-creating_nodes-pods-secrets[Understanding how to create secrets] -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] * xref:../../authentication/bound-service-account-tokens.adoc#bound-sa-tokens-configuring_bound-service-account-tokens[Configuring bound service account tokens using volume projection] * xref:../../authentication/understanding-and-creating-service-accounts.adoc#understanding-and-creating-service-accounts[Understanding and creating service accounts] -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] include::modules/nodes-pods-secrets-creating-basic.adoc[leveloffset=+2] diff --git a/nodes/pods/nodes-pods-using.adoc b/nodes/pods/nodes-pods-using.adoc index 67b73fc3f9..79dbc2ecae 100644 --- a/nodes/pods/nodes-pods-using.adoc +++ b/nodes/pods/nodes-pods-using.adoc @@ -19,7 +19,10 @@ include::modules/nodes-pods-using-about.adoc[leveloffset=+1] include::modules/nodes-pods-using-example.adoc[leveloffset=+1] +// TODO: Add xrefs to ROSA HCP when distro is published. +ifndef::openshift-rosa-hcp[] [role="_additional-resources"] == Additional resources * For more information on pods and storage see xref:../../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[Understanding persistent storage] and xref:../../storage/understanding-persistent-storage.adoc#understanding-ephemeral-storage[Understanding ephemeral storage]. +endif::openshift-rosa-hcp[] diff --git a/nodes/scheduling/nodes-scheduler-about.adoc b/nodes/scheduling/nodes-scheduler-about.adoc index b79c32acdd..8d6550aac1 100644 --- a/nodes/scheduling/nodes-scheduler-about.adoc +++ b/nodes/scheduling/nodes-scheduler-about.adoc @@ -21,15 +21,15 @@ In situations where you might want more control over where new pods are placed, + You can control pod placement by using the following scheduling features: -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] * xref:../../nodes/scheduling/nodes-scheduler-profiles.adoc#nodes-scheduler-profiles[Scheduler profiles] -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] * xref:../../nodes/scheduling/nodes-scheduler-pod-affinity.adoc#nodes-scheduler-pod-affinity[Pod affinity and anti-affinity rules] * xref:../../nodes/scheduling/nodes-scheduler-node-affinity.adoc#nodes-scheduler-node-affinity-about_nodes-scheduler-node-affinity[Node affinity] * xref:../../nodes/scheduling/nodes-scheduler-node-selectors.adoc#nodes-scheduler-node-selectors[Node selectors] -ifndef::openshift-dedicated,openshift-rosa[] +ifndef::openshift-dedicated,openshift-rosa-hcp,openshift-rosa[] * xref:../../nodes/scheduling/nodes-scheduler-taints-tolerations.adoc#nodes-scheduler-taints-tolerations[Taints and tolerations] -endif::openshift-dedicated,openshift-rosa[] +endif::openshift-dedicated,openshift-rosa-hcp,openshift-rosa[] * xref:../../nodes/scheduling/nodes-scheduler-overcommit.adoc#nodes-scheduler-overcommit[Node overcommitment] [id="about-default-scheduler"] diff --git a/nodes/scheduling/nodes-scheduler-node-affinity.adoc b/nodes/scheduling/nodes-scheduler-node-affinity.adoc index fa0322b058..de0155c706 100644 --- a/nodes/scheduling/nodes-scheduler-node-affinity.adoc +++ b/nodes/scheduling/nodes-scheduler-node-affinity.adoc @@ -26,14 +26,14 @@ include::modules/nodes-scheduler-node-affinity-configuring-preferred.adoc[levelo include::modules/nodes-scheduler-node-affinity-example.adoc[leveloffset=+1] -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] include::modules/olm-overriding-operator-pod-affinity.adoc[leveloffset=+1] -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [id="nodes-scheduler-node-affinity-addtl-resources_{context}"] [role="_additional-resources"] == Additional resources * xref:../../nodes/nodes/nodes-nodes-working.adoc#nodes-nodes-working-updating_nodes-nodes-working[Understanding how to update labels on nodes] -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] diff --git a/nodes/scheduling/nodes-scheduler-node-selectors.adoc b/nodes/scheduling/nodes-scheduler-node-selectors.adoc index 909b6e26f0..7b4dc37dbc 100644 --- a/nodes/scheduling/nodes-scheduler-node-selectors.adoc +++ b/nodes/scheduling/nodes-scheduler-node-selectors.adoc @@ -12,18 +12,18 @@ include::modules/nodes-scheduler-node-selectors-about.adoc[leveloffset=+1] include::modules/nodes-scheduler-node-selectors-pod.adoc[leveloffset=+1] -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] include::modules/nodes-scheduler-node-selectors-cluster.adoc[leveloffset=+1] include::modules/nodes-scheduler-node-selectors-project.adoc[leveloffset=+1] -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] // The following xref points to a topic not included in the OSD and ROSA docs. -ifndef::openshift-dedicated,openshift-rosa[] +ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] [role="_additional-resources"] .Additional resources * xref:../../nodes/scheduling/nodes-scheduler-taints-tolerations.adoc#nodes-scheduler-taints-tolerations-projects_nodes-scheduler-taints-tolerations[Creating a project with a node selector and toleration] -endif::openshift-dedicated,openshift-rosa[] +endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] // include::modules/nodes-scheduler-node-selectors-configuring.adoc[leveloffset=+1] diff --git a/nodes/scheduling/nodes-scheduler-pod-affinity.adoc b/nodes/scheduling/nodes-scheduler-pod-affinity.adoc index fcc183841a..fef94caeca 100644 --- a/nodes/scheduling/nodes-scheduler-pod-affinity.adoc +++ b/nodes/scheduling/nodes-scheduler-pod-affinity.adoc @@ -16,6 +16,6 @@ include::modules/nodes-scheduler-pod-anti-affinity-configuring.adoc[leveloffset= include::modules/nodes-scheduler-pod-affinity-example.adoc[leveloffset=+1] -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] include::modules/olm-overriding-operator-pod-affinity.adoc[leveloffset=+1] -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] diff --git a/nodes/scheduling/nodes-scheduler-pod-topology-spread-constraints.adoc b/nodes/scheduling/nodes-scheduler-pod-topology-spread-constraints.adoc index d5cd4c290d..4523f350f8 100644 --- a/nodes/scheduling/nodes-scheduler-pod-topology-spread-constraints.adoc +++ b/nodes/scheduling/nodes-scheduler-pod-topology-spread-constraints.adoc @@ -31,9 +31,9 @@ include::modules/pod-topology-spread-constraints-max-skew.adoc[leveloffset=+1] // Example pod topology spread constraints include::modules/nodes-scheduler-pod-topology-spread-constraints-examples.adoc[leveloffset=+1] -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [role="_additional-resources"] == Additional resources * xref:../../nodes/nodes/nodes-nodes-working.adoc#nodes-nodes-working-updating_nodes-nodes-working[Understanding how to update labels on nodes] -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[]