1
0
mirror of https://github.com/openshift/openshift-docs.git synced 2026-02-05 12:46:18 +01:00

OSDOCS#15257: Doc sweep updating examples from kube 1.32 to 1.33

This commit is contained in:
Andrea Hoffer
2025-09-11 12:06:09 -04:00
committed by openshift-cherrypick-robot
parent 559eda1766
commit d3d23e536d
54 changed files with 258 additions and 258 deletions

View File

@@ -156,12 +156,12 @@ $ oc get nodes
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
ip-10-0-147-106.us-east-2.compute.internal Ready master 14h v1.32.3
ip-10-0-150-175.us-east-2.compute.internal Ready worker 14h v1.32.3
ip-10-0-175-23.us-east-2.compute.internal Ready master 14h v1.32.3
ip-10-0-189-6.us-east-2.compute.internal Ready worker 14h v1.32.3
ip-10-0-205-158.us-east-2.compute.internal Ready master 14h v1.32.3
ip-10-0-210-167.us-east-2.compute.internal Ready worker 14h v1.32.3
ip-10-0-147-106.us-east-2.compute.internal Ready master 14h v1.33.4
ip-10-0-150-175.us-east-2.compute.internal Ready worker 14h v1.33.4
ip-10-0-175-23.us-east-2.compute.internal Ready master 14h v1.33.4
ip-10-0-189-6.us-east-2.compute.internal Ready worker 14h v1.33.4
ip-10-0-205-158.us-east-2.compute.internal Ready master 14h v1.33.4
ip-10-0-210-167.us-east-2.compute.internal Ready worker 14h v1.33.4
----
--

View File

@@ -315,6 +315,6 @@ $ oc get nodes
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
control-plane-1.example.com Ready master,worker 56m v1.32.3
compute-1.example.com Ready worker 11m v1.32.3
control-plane-1.example.com Ready master,worker 56m v1.33.4
compute-1.example.com Ready worker 11m v1.33.4
----

View File

@@ -64,9 +64,9 @@ $ oc get nodes -l <key_name>=<value>
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
node1.example.com Ready worker 7h v1.32.3
node2.example.com Ready worker 7h v1.32.3
node3.example.com Ready worker 7h v1.32.3
node1.example.com Ready worker 7h v1.33.4
node2.example.com Ready worker 7h v1.33.4
node3.example.com Ready worker 7h v1.33.4
----
. Configure the Classic Load Balancer service by adding the cloud-based subnet information to the `annotations` field of the `Service` manifest:

View File

@@ -122,7 +122,7 @@ $ oc get nodes
+
----
NAME STATUS ROLES AGE VERSION
ci-ln-tkbxyft-f76d1-nvwhr-master-1 Ready, SchedulingDisabled master 133m v1.32.3
ci-ln-tkbxyft-f76d1-nvwhr-master-1 Ready, SchedulingDisabled master 133m v1.33.4
----
+
. Mark the node schedulable. You will know that the scheduling is enabled when `SchedulingDisabled` is no longer in status:
@@ -137,5 +137,5 @@ $ oc adm uncordon <node_name>
+
----
NAME STATUS ROLES AGE VERSION
ci-ln-tkbxyft-f76d1-nvwhr-master-1 Ready master 133m v1.32.3
ci-ln-tkbxyft-f76d1-nvwhr-master-1 Ready master 133m v1.33.4
----

View File

@@ -23,11 +23,11 @@ $ oc get nodes -n openshift-compliance
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
ip-10-0-128-92.us-east-2.compute.internal Ready master 5h21m v1.32.3
ip-10-0-158-32.us-east-2.compute.internal Ready worker 5h17m v1.32.3
ip-10-0-166-81.us-east-2.compute.internal Ready worker 5h17m v1.32.3
ip-10-0-171-170.us-east-2.compute.internal Ready master 5h21m v1.32.3
ip-10-0-197-35.us-east-2.compute.internal Ready master 5h22m v1.32.3
ip-10-0-128-92.us-east-2.compute.internal Ready master 5h21m v1.33.4
ip-10-0-158-32.us-east-2.compute.internal Ready worker 5h17m v1.33.4
ip-10-0-166-81.us-east-2.compute.internal Ready worker 5h17m v1.33.4
ip-10-0-171-170.us-east-2.compute.internal Ready master 5h21m v1.33.4
ip-10-0-197-35.us-east-2.compute.internal Ready master 5h22m v1.33.4
----
. Add a label to nodes.

View File

@@ -44,10 +44,10 @@ $ oc get nodes
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
ci-ln-47ltxtb-f76d1-mrffg-master-0 Ready master 42m v1.32.3
ci-ln-47ltxtb-f76d1-mrffg-master-1 Ready master 42m v1.32.3
ci-ln-47ltxtb-f76d1-mrffg-master-2 Ready master 42m v1.32.3
ci-ln-47ltxtb-f76d1-mrffg-worker-a-gsxbz Ready worker 35m v1.32.3
ci-ln-47ltxtb-f76d1-mrffg-worker-b-5qqdx Ready worker 35m v1.32.3
ci-ln-47ltxtb-f76d1-mrffg-worker-c-rjkpq Ready worker 34m v1.32.3
ci-ln-47ltxtb-f76d1-mrffg-master-0 Ready master 42m v1.33.4
ci-ln-47ltxtb-f76d1-mrffg-master-1 Ready master 42m v1.33.4
ci-ln-47ltxtb-f76d1-mrffg-master-2 Ready master 42m v1.33.4
ci-ln-47ltxtb-f76d1-mrffg-worker-a-gsxbz Ready worker 35m v1.33.4
ci-ln-47ltxtb-f76d1-mrffg-worker-b-5qqdx Ready worker 35m v1.33.4
ci-ln-47ltxtb-f76d1-mrffg-worker-c-rjkpq Ready worker 34m v1.33.4
----

View File

@@ -64,12 +64,12 @@ $ oc get nodes
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
ip-10-0-148-79.us-west-1.compute.internal Ready worker 32m v1.32.3
ip-10-0-155-125.us-west-1.compute.internal Ready,SchedulingDisabled worker 35m v1.32.3
ip-10-0-170-47.us-west-1.compute.internal Ready control-plane,master 42m v1.32.3
ip-10-0-174-77.us-west-1.compute.internal Ready control-plane,master 42m v1.32.3
ip-10-0-211-49.us-west-1.compute.internal Ready control-plane,master 42m v1.32.3
ip-10-0-218-151.us-west-1.compute.internal Ready worker 31m v1.32.3
ip-10-0-148-79.us-west-1.compute.internal Ready worker 32m v1.33.4
ip-10-0-155-125.us-west-1.compute.internal Ready,SchedulingDisabled worker 35m v1.33.4
ip-10-0-170-47.us-west-1.compute.internal Ready control-plane,master 42m v1.33.4
ip-10-0-174-77.us-west-1.compute.internal Ready control-plane,master 42m v1.33.4
ip-10-0-211-49.us-west-1.compute.internal Ready control-plane,master 42m v1.33.4
ip-10-0-218-151.us-west-1.compute.internal Ready worker 31m v1.33.4
----
** When the node is back in the `Ready` state, check that the node is using the base image:

View File

@@ -165,12 +165,12 @@ $ oc get nodes
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
ip-10-0-148-79.us-west-1.compute.internal Ready worker 32m v1.32.3
ip-10-0-155-125.us-west-1.compute.internal Ready,SchedulingDisabled worker 35m v1.32.3
ip-10-0-170-47.us-west-1.compute.internal Ready control-plane,master 42m v1.32.3
ip-10-0-174-77.us-west-1.compute.internal Ready control-plane,master 42m v1.32.3
ip-10-0-211-49.us-west-1.compute.internal Ready control-plane,master 42m v1.32.3
ip-10-0-218-151.us-west-1.compute.internal Ready worker 31m v1.32.3
ip-10-0-148-79.us-west-1.compute.internal Ready worker 32m v1.33.4
ip-10-0-155-125.us-west-1.compute.internal Ready,SchedulingDisabled worker 35m v1.33.4
ip-10-0-170-47.us-west-1.compute.internal Ready control-plane,master 42m v1.33.4
ip-10-0-174-77.us-west-1.compute.internal Ready control-plane,master 42m v1.33.4
ip-10-0-211-49.us-west-1.compute.internal Ready control-plane,master 42m v1.33.4
ip-10-0-218-151.us-west-1.compute.internal Ready worker 31m v1.33.4
----
. When the node is back in the `Ready` state, check that the node is using the custom layered image:

View File

@@ -52,12 +52,12 @@ $ oc get nodes
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
ip-10-0-148-79.us-west-1.compute.internal Ready worker 32m v1.32.3
ip-10-0-155-125.us-west-1.compute.internal Ready,SchedulingDisabled worker 35m v1.32.3
ip-10-0-170-47.us-west-1.compute.internal Ready control-plane,master 42m v1.32.3
ip-10-0-174-77.us-west-1.compute.internal Ready control-plane,master 42m v1.32.3
ip-10-0-211-49.us-west-1.compute.internal Ready control-plane,master 42m v1.32.3
ip-10-0-218-151.us-west-1.compute.internal Ready worker 31m v1.32.3
ip-10-0-148-79.us-west-1.compute.internal Ready worker 32m v1.33.4
ip-10-0-155-125.us-west-1.compute.internal Ready,SchedulingDisabled worker 35m v1.33.4
ip-10-0-170-47.us-west-1.compute.internal Ready control-plane,master 42m v1.33.4
ip-10-0-174-77.us-west-1.compute.internal Ready control-plane,master 42m v1.33.4
ip-10-0-211-49.us-west-1.compute.internal Ready control-plane,master 42m v1.33.4
ip-10-0-218-151.us-west-1.compute.internal Ready worker 31m v1.33.4
----
. When the node is back in the `Ready` state, check that the node is using the base image:

View File

@@ -58,9 +58,9 @@ The control plane nodes are ready if the status is `Ready`, as shown in the foll
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
ip-10-0-168-251.ec2.internal Ready control-plane,master 75m v1.32.3
ip-10-0-170-223.ec2.internal Ready control-plane,master 75m v1.32.3
ip-10-0-211-16.ec2.internal Ready control-plane,master 75m v1.32.3
ip-10-0-168-251.ec2.internal Ready control-plane,master 75m v1.33.4
ip-10-0-170-223.ec2.internal Ready control-plane,master 75m v1.33.4
ip-10-0-211-16.ec2.internal Ready control-plane,master 75m v1.33.4
----
. If the control plane nodes are _not_ ready, then check whether there are any pending certificate signing requests (CSRs) that must be approved.
@@ -99,9 +99,9 @@ The worker nodes are ready if the status is `Ready`, as shown in the following o
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
ip-10-0-179-95.ec2.internal Ready worker 64m v1.32.3
ip-10-0-182-134.ec2.internal Ready worker 64m v1.32.3
ip-10-0-250-100.ec2.internal Ready worker 64m v1.32.3
ip-10-0-179-95.ec2.internal Ready worker 64m v1.33.4
ip-10-0-182-134.ec2.internal Ready worker 64m v1.33.4
ip-10-0-250-100.ec2.internal Ready worker 64m v1.33.4
----
. If the worker nodes are _not_ ready, then check whether there are any pending certificate signing requests (CSRs) that must be approved.
@@ -172,12 +172,12 @@ Check that the status for all nodes is `Ready`.
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
ip-10-0-168-251.ec2.internal Ready control-plane,master 82m v1.32.3
ip-10-0-170-223.ec2.internal Ready control-plane,master 82m v1.32.3
ip-10-0-179-95.ec2.internal Ready worker 70m v1.32.3
ip-10-0-182-134.ec2.internal Ready worker 70m v1.32.3
ip-10-0-211-16.ec2.internal Ready control-plane,master 82m v1.32.3
ip-10-0-250-100.ec2.internal Ready worker 69m v1.32.3
ip-10-0-168-251.ec2.internal Ready control-plane,master 82m v1.33.4
ip-10-0-170-223.ec2.internal Ready control-plane,master 82m v1.33.4
ip-10-0-179-95.ec2.internal Ready worker 70m v1.33.4
ip-10-0-182-134.ec2.internal Ready worker 70m v1.33.4
ip-10-0-211-16.ec2.internal Ready control-plane,master 82m v1.33.4
ip-10-0-250-100.ec2.internal Ready worker 69m v1.33.4
----
+
If the cluster did not start properly, you might need to restore your cluster using an etcd backup.

View File

@@ -137,6 +137,6 @@ $ oc get nodes
[source, terminal]
----
NAME STATUS ROLES AGE VERSION
ip-10-0-132-74.ec2.internal Ready worker 17m v1.32.5
ip-10-0-134-183.ec2.internal Ready worker 4h5m v1.32.5
ip-10-0-132-74.ec2.internal Ready worker 17m v1.33.4
ip-10-0-134-183.ec2.internal Ready worker 4h5m v1.33.4
----

View File

@@ -36,12 +36,12 @@ $ oc get nodes
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
ci-ln-812tb4k-72292-8bcj7-master-0 Ready control-plane,master 32m v1.32.3
ci-ln-812tb4k-72292-8bcj7-master-1 Ready control-plane,master 32m v1.32.3
ci-ln-812tb4k-72292-8bcj7-master-2 Ready control-plane,master 32m v1.32.3
Ci-ln-812tb4k-72292-8bcj7-worker-a-zhdvk Ready worker 19m v1.32.3
ci-ln-812tb4k-72292-8bcj7-worker-b-9hrmv Ready worker 19m v1.32.3
ci-ln-812tb4k-72292-8bcj7-worker-c-q8mw2 Ready worker 19m v1.32.3
ci-ln-812tb4k-72292-8bcj7-master-0 Ready control-plane,master 32m v1.33.4
ci-ln-812tb4k-72292-8bcj7-master-1 Ready control-plane,master 32m v1.33.4
ci-ln-812tb4k-72292-8bcj7-master-2 Ready control-plane,master 32m v1.33.4
Ci-ln-812tb4k-72292-8bcj7-worker-a-zhdvk Ready worker 19m v1.33.4
ci-ln-812tb4k-72292-8bcj7-worker-b-9hrmv Ready worker 19m v1.33.4
ci-ln-812tb4k-72292-8bcj7-worker-c-q8mw2 Ready worker 19m v1.33.4
----
+
All nodes should show `Ready` in the `STATUS` column.

View File

@@ -79,12 +79,12 @@ $ oc get nodes
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
ci-ln-812tb4k-72292-8bcj7-master-0 Ready control-plane,master 32m v1.32.3
ci-ln-812tb4k-72292-8bcj7-master-1 Ready control-plane,master 32m v1.32.3
ci-ln-812tb4k-72292-8bcj7-master-2 Ready control-plane,master 32m v1.32.3
Ci-ln-812tb4k-72292-8bcj7-worker-a-zhdvk Ready worker 19m v1.32.3
ci-ln-812tb4k-72292-8bcj7-worker-b-9hrmv Ready worker 19m v1.32.3
ci-ln-812tb4k-72292-8bcj7-worker-c-q8mw2 Ready worker 19m v1.32.3
ci-ln-812tb4k-72292-8bcj7-master-0 Ready control-plane,master 32m v1.33.4
ci-ln-812tb4k-72292-8bcj7-master-1 Ready control-plane,master 32m v1.33.4
ci-ln-812tb4k-72292-8bcj7-master-2 Ready control-plane,master 32m v1.33.4
Ci-ln-812tb4k-72292-8bcj7-worker-a-zhdvk Ready worker 19m v1.33.4
ci-ln-812tb4k-72292-8bcj7-worker-b-9hrmv Ready worker 19m v1.33.4
ci-ln-812tb4k-72292-8bcj7-worker-c-q8mw2 Ready worker 19m v1.33.4
----
+
All nodes should show `Ready` in the `STATUS` column. It might take a few minutes for all nodes to become ready after approving the CSRs.

View File

@@ -224,5 +224,5 @@ $ oc get nodes
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
node/sno-cluster-name.host.example.com Ready control-plane,master 5h15m v1.32.3
node/sno-cluster-name.host.example.com Ready control-plane,master 5h15m v1.33.4
----

View File

@@ -85,10 +85,10 @@ $ oc get nodes
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
ip-10-0-137-182.us-east-2.compute.internal Ready,SchedulingDisabled worker 65m v1.32.3
ip-10-0-139-120.us-east-2.compute.internal Ready,SchedulingDisabled control-plane 74m v1.32.3
ip-10-0-176-102.us-east-2.compute.internal Ready control-plane 75m v1.32.3
ip-10-0-188-96.us-east-2.compute.internal Ready worker 65m v1.32.3
ip-10-0-200-59.us-east-2.compute.internal Ready worker 63m v1.32.3
ip-10-0-223-123.us-east-2.compute.internal Ready control-plane 73m v1.32.3
ip-10-0-137-182.us-east-2.compute.internal Ready,SchedulingDisabled worker 65m v1.33.4
ip-10-0-139-120.us-east-2.compute.internal Ready,SchedulingDisabled control-plane 74m v1.33.4
ip-10-0-176-102.us-east-2.compute.internal Ready control-plane 75m v1.33.4
ip-10-0-188-96.us-east-2.compute.internal Ready worker 65m v1.33.4
ip-10-0-200-59.us-east-2.compute.internal Ready worker 63m v1.33.4
ip-10-0-223-123.us-east-2.compute.internal Ready control-plane 73m v1.33.4
----

View File

@@ -127,7 +127,7 @@ $ oc get nodes
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
ip-10-0-137-182.us-east-2.compute.internal Ready,SchedulingDisabled worker 65m v1.32.3
ip-10-0-188-96.us-east-2.compute.internal Ready worker 65m v1.32.3
ip-10-0-200-59.us-east-2.compute.internal Ready worker 63m v1.32.3
ip-10-0-137-182.us-east-2.compute.internal Ready,SchedulingDisabled worker 65m v1.33.4
ip-10-0-188-96.us-east-2.compute.internal Ready worker 65m v1.33.4
ip-10-0-200-59.us-east-2.compute.internal Ready worker 63m v1.33.4
----

View File

@@ -207,12 +207,12 @@ $ oc get node
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
ip-10-0-137-44.ec2.internal Ready worker 7m v1.32.3
ip-10-0-138-148.ec2.internal Ready master 11m v1.32.3
ip-10-0-139-122.ec2.internal Ready master 11m v1.32.3
ip-10-0-147-35.ec2.internal Ready worker 7m v1.32.3
ip-10-0-153-12.ec2.internal Ready worker 7m v1.32.3
ip-10-0-154-10.ec2.internal Ready master 11m v1.32.3
ip-10-0-137-44.ec2.internal Ready worker 7m v1.33.4
ip-10-0-138-148.ec2.internal Ready master 11m v1.33.4
ip-10-0-139-122.ec2.internal Ready master 11m v1.33.4
ip-10-0-147-35.ec2.internal Ready worker 7m v1.33.4
ip-10-0-153-12.ec2.internal Ready worker 7m v1.33.4
ip-10-0-154-10.ec2.internal Ready master 11m v1.33.4
----
.. Start the debugging process to access the node:

View File

@@ -111,7 +111,7 @@ $ oc get node <node_name> <1>
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
ip-10-0-217-226.ec2.internal Ready infra,worker 17h v1.32.3
ip-10-0-217-226.ec2.internal Ready infra,worker 17h v1.33.4
----
+
Because the role list includes `infra`, the pod is running on the correct node.

View File

@@ -66,7 +66,7 @@ ifndef::openshift-origin[]
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
control-plane.example.com Ready master,worker 10m v1.32.3
control-plane.example.com Ready master,worker 10m v1.33.4
----
endif::openshift-origin[]
ifdef::openshift-origin[]

View File

@@ -65,9 +65,9 @@ $ oc get nodes
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
master-0 Ready master 63m v1.32.3
master-1 Ready master 63m v1.32.3
master-2 Ready master 64m v1.32.3
master-0 Ready master 63m v1.33.4
master-1 Ready master 63m v1.33.4
master-2 Ready master 64m v1.33.4
----
+
The output lists all of the machines that you created.
@@ -193,21 +193,21 @@ endif::ibm-power[]
----
ifndef::ibm-power[]
NAME STATUS ROLES AGE VERSION
master-0 Ready master 73m v1.32.3
master-1 Ready master 73m v1.32.3
master-2 Ready master 74m v1.32.3
worker-0 Ready worker 11m v1.32.3
worker-1 Ready worker 11m v1.32.3
master-0 Ready master 73m v1.33.4
master-1 Ready master 73m v1.33.4
master-2 Ready master 74m v1.33.4
worker-0 Ready worker 11m v1.33.4
worker-1 Ready worker 11m v1.33.4
endif::ibm-power[]
ifdef::ibm-power[]
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
worker-0-ppc64le Ready worker 42d v1.32.3 192.168.200.21 <none> Red Hat Enterprise Linux CoreOS 415.92.202309261919-0 (Plow) 5.14.0-284.34.1.el9_2.ppc64le cri-o://1.32.3-3.rhaos4.15.gitb36169e.el9
worker-1-ppc64le Ready worker 42d v1.32.3 192.168.200.20 <none> Red Hat Enterprise Linux CoreOS 415.92.202309261919-0 (Plow) 5.14.0-284.34.1.el9_2.ppc64le cri-o://1.32.3-3.rhaos4.15.gitb36169e.el9
master-0-x86 Ready control-plane,master 75d v1.32.3 10.248.0.38 10.248.0.38 Red Hat Enterprise Linux CoreOS 415.92.202309261919-0 (Plow) 5.14.0-284.34.1.el9_2.x86_64 cri-o://1.32.3-3.rhaos4.15.gitb36169e.el9
master-1-x86 Ready control-plane,master 75d v1.32.3 10.248.0.39 10.248.0.39 Red Hat Enterprise Linux CoreOS 415.92.202309261919-0 (Plow) 5.14.0-284.34.1.el9_2.x86_64 cri-o://1.32.3-3.rhaos4.15.gitb36169e.el9
master-2-x86 Ready control-plane,master 75d v1.32.3 10.248.0.40 10.248.0.40 Red Hat Enterprise Linux CoreOS 415.92.202309261919-0 (Plow) 5.14.0-284.34.1.el9_2.x86_64 cri-o://1.32.3-3.rhaos4.15.gitb36169e.el9
worker-0-x86 Ready worker 75d v1.32.3 10.248.0.43 10.248.0.43 Red Hat Enterprise Linux CoreOS 415.92.202309261919-0 (Plow) 5.14.0-284.34.1.el9_2.x86_64 cri-o://1.32.3-3.rhaos4.15.gitb36169e.el9
worker-1-x86 Ready worker 75d v1.32.3 10.248.0.44 10.248.0.44 Red Hat Enterprise Linux CoreOS 415.92.202309261919-0 (Plow) 5.14.0-284.34.1.el9_2.x86_64 cri-o://1.32.3-3.rhaos4.15.gitb36169e.el9
worker-0-ppc64le Ready worker 42d v1.33.4 192.168.200.21 <none> Red Hat Enterprise Linux CoreOS 415.92.202309261919-0 (Plow) 5.14.0-284.34.1.el9_2.ppc64le cri-o://1.33.4-3.rhaos4.15.gitb36169e.el9
worker-1-ppc64le Ready worker 42d v1.33.4 192.168.200.20 <none> Red Hat Enterprise Linux CoreOS 415.92.202309261919-0 (Plow) 5.14.0-284.34.1.el9_2.ppc64le cri-o://1.33.4-3.rhaos4.15.gitb36169e.el9
master-0-x86 Ready control-plane,master 75d v1.33.4 10.248.0.38 10.248.0.38 Red Hat Enterprise Linux CoreOS 415.92.202309261919-0 (Plow) 5.14.0-284.34.1.el9_2.x86_64 cri-o://1.33.4-3.rhaos4.15.gitb36169e.el9
master-1-x86 Ready control-plane,master 75d v1.33.4 10.248.0.39 10.248.0.39 Red Hat Enterprise Linux CoreOS 415.92.202309261919-0 (Plow) 5.14.0-284.34.1.el9_2.x86_64 cri-o://1.33.4-3.rhaos4.15.gitb36169e.el9
master-2-x86 Ready control-plane,master 75d v1.33.4 10.248.0.40 10.248.0.40 Red Hat Enterprise Linux CoreOS 415.92.202309261919-0 (Plow) 5.14.0-284.34.1.el9_2.x86_64 cri-o://1.33.4-3.rhaos4.15.gitb36169e.el9
worker-0-x86 Ready worker 75d v1.33.4 10.248.0.43 10.248.0.43 Red Hat Enterprise Linux CoreOS 415.92.202309261919-0 (Plow) 5.14.0-284.34.1.el9_2.x86_64 cri-o://1.33.4-3.rhaos4.15.gitb36169e.el9
worker-1-x86 Ready worker 75d v1.33.4 10.248.0.44 10.248.0.44 Red Hat Enterprise Linux CoreOS 415.92.202309261919-0 (Plow) 5.14.0-284.34.1.el9_2.x86_64 cri-o://1.33.4-3.rhaos4.15.gitb36169e.el9
endif::ibm-power[]
----
+

View File

@@ -32,7 +32,7 @@ stored the installation files in.
[source,terminal]
----
INFO Waiting up to 20m0s for the Kubernetes API at https://api.mycluster.example.com:6443...
INFO API v1.32.3 up
INFO API v1.33.4 up
INFO Waiting up to 30m0s for bootstrapping to complete...
INFO It is now safe to remove the bootstrap resources
INFO Time elapsed: 1s

View File

@@ -62,7 +62,7 @@ $ ./openshift-install --dir <installation_directory> wait-for bootstrap-complete
[source,terminal]
----
INFO Waiting up to 30m0s for the Kubernetes API at https://api.test.example.com:6443...
INFO API v1.32.3 up
INFO API v1.33.4 up
INFO Waiting up to 30m0s for bootstrapping to complete...
INFO It is now safe to remove the bootstrap resources
----

View File

@@ -40,7 +40,7 @@ You will see messages that confirm that the control plane machines are running a
+
[source,terminal]
----
INFO API v1.32.3 up
INFO API v1.33.4 up
INFO Waiting up to 30m0s for bootstrapping to complete...
...
INFO It is now safe to remove the bootstrap resources

View File

@@ -92,12 +92,12 @@ $ oc get nodes
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
ip-10-0-139-200.us-east-2.compute.internal Ready master 111m v1.32.3
ip-10-0-143-147.us-east-2.compute.internal Ready worker 103m v1.32.3
ip-10-0-146-92.us-east-2.compute.internal Ready worker 101m v1.32.3
ip-10-0-156-255.us-east-2.compute.internal Ready master 111m v1.32.3
ip-10-0-164-74.us-east-2.compute.internal Ready master 111m v1.32.3
ip-10-0-169-2.us-east-2.compute.internal Ready worker 102m v1.32.3
ip-10-0-139-200.us-east-2.compute.internal Ready master 111m v1.33.4
ip-10-0-143-147.us-east-2.compute.internal Ready worker 103m v1.33.4
ip-10-0-146-92.us-east-2.compute.internal Ready worker 101m v1.33.4
ip-10-0-156-255.us-east-2.compute.internal Ready master 111m v1.33.4
ip-10-0-164-74.us-east-2.compute.internal Ready master 111m v1.33.4
ip-10-0-169-2.us-east-2.compute.internal Ready worker 102m v1.33.4
----
+
[source,terminal]

View File

@@ -35,11 +35,11 @@ $ oc get nodes
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
openshift-master-1.openshift.example.com Ready master 30h v1.32.3
openshift-master-2.openshift.example.com Ready master 30h v1.32.3
openshift-master-3.openshift.example.com Ready master 30h v1.32.3
openshift-worker-0.openshift.example.com Ready worker 30h v1.32.3
openshift-worker-1.openshift.example.com Ready worker 30h v1.32.3
openshift-master-1.openshift.example.com Ready master 30h v1.33.4
openshift-master-2.openshift.example.com Ready master 30h v1.33.4
openshift-master-3.openshift.example.com Ready master 30h v1.33.4
openshift-worker-0.openshift.example.com Ready worker 30h v1.33.4
openshift-worker-1.openshift.example.com Ready worker 30h v1.33.4
----
. Get the compute machine set.
@@ -99,12 +99,12 @@ $ oc get nodes
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
openshift-master-1.openshift.example.com Ready master 30h v1.32.3
openshift-master-2.openshift.example.com Ready master 30h v1.32.3
openshift-master-3.openshift.example.com Ready master 30h v1.32.3
openshift-worker-0.openshift.example.com Ready worker 30h v1.32.3
openshift-worker-1.openshift.example.com Ready worker 30h v1.32.3
openshift-worker-<num>.openshift.example.com Ready worker 3m27s v1.32.3
openshift-master-1.openshift.example.com Ready master 30h v1.33.4
openshift-master-2.openshift.example.com Ready master 30h v1.33.4
openshift-master-3.openshift.example.com Ready master 30h v1.33.4
openshift-worker-0.openshift.example.com Ready worker 30h v1.33.4
openshift-worker-1.openshift.example.com Ready worker 30h v1.33.4
openshift-worker-<num>.openshift.example.com Ready worker 3m27s v1.33.4
----
+
You can also check the kubelet.

View File

@@ -179,11 +179,11 @@ $ oc get nodes
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
control-plane-1.example.com available master 4m2s v1.32.3
control-plane-2.example.com available master 141m v1.32.3
control-plane-3.example.com available master 141m v1.32.3
compute-1.example.com available worker 87m v1.32.3
compute-2.example.com available worker 87m v1.32.3
control-plane-1.example.com available master 4m2s v1.33.4
control-plane-2.example.com available master 141m v1.33.4
control-plane-3.example.com available master 141m v1.33.4
compute-1.example.com available worker 87m v1.33.4
compute-2.example.com available worker 87m v1.33.4
----
+
[NOTE]

View File

@@ -21,10 +21,10 @@ $ oc get nodes
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
master-0.cloud.example.com Ready master 145m v1.32.3
master-1.cloud.example.com Ready master 135m v1.32.3
master-2.cloud.example.com Ready master 145m v1.32.3
worker-2.cloud.example.com Ready worker 100m v1.32.3
master-0.cloud.example.com Ready master 145m v1.33.4
master-1.cloud.example.com Ready master 135m v1.33.4
master-2.cloud.example.com Ready master 145m v1.33.4
worker-2.cloud.example.com Ready worker 100m v1.33.4
----
. Check for inconsistent timing delays due to clock drift. For example:

View File

@@ -21,9 +21,9 @@ $ oc get nodes
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
master-0.example.com Ready master,worker 4h v1.32.3
master-1.example.com Ready master,worker 4h v1.32.3
master-2.example.com Ready master,worker 4h v1.32.3
master-0.example.com Ready master,worker 4h v1.33.4
master-1.example.com Ready master,worker 4h v1.33.4
master-2.example.com Ready master,worker 4h v1.33.4
----
. Confirm the installation program deployed all pods successfully. The following command

View File

@@ -236,13 +236,13 @@ $ oc get nodes
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
ip-10-0-128-78.ec2.internal Ready worker 117m v1.32.3
ip-10-0-146-113.ec2.internal Ready master 127m v1.32.3
ip-10-0-153-35.ec2.internal Ready worker 118m v1.32.3
ip-10-0-176-58.ec2.internal Ready master 126m v1.32.3
ip-10-0-217-135.ec2.internal Ready worker 2m57s v1.32.3 <1>
ip-10-0-225-248.ec2.internal Ready master 127m v1.32.3
ip-10-0-245-59.ec2.internal Ready worker 116m v1.32.3
ip-10-0-128-78.ec2.internal Ready worker 117m v1.33.4
ip-10-0-146-113.ec2.internal Ready master 127m v1.33.4
ip-10-0-153-35.ec2.internal Ready worker 118m v1.33.4
ip-10-0-176-58.ec2.internal Ready master 126m v1.33.4
ip-10-0-217-135.ec2.internal Ready worker 2m57s v1.33.4 <1>
ip-10-0-225-248.ec2.internal Ready master 127m v1.33.4
ip-10-0-245-59.ec2.internal Ready worker 116m v1.33.4
----
<1> This is new new node.

View File

@@ -76,7 +76,7 @@ $ oc --certificate-authority ~/certs/ca.ca get node
----
oc get node
NAME STATUS ROLES AGE VERSION
dhcp-1-235-195.arm.example.com Ready control-plane,master,worker 76m v1.32.3
dhcp-1-235-195.arm.example.com Ready control-plane,master,worker 76m v1.33.4
----
.. Add the new CA file to the $KUBECONFIG environment variable by running the following command:

View File

@@ -32,15 +32,15 @@ clusterresourceoverride-operator-6b8b8b656b-lvr62 1/1 Running 0
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
ip-10-0-14-183.us-west-2.compute.internal Ready control-plane,master 65m v1.32.3
ip-10-0-2-39.us-west-2.compute.internal Ready worker 58m v1.32.3
ip-10-0-20-140.us-west-2.compute.internal Ready control-plane,master 65m v1.32.3
ip-10-0-23-244.us-west-2.compute.internal Ready infra 55m v1.32.3
ip-10-0-77-153.us-west-2.compute.internal Ready control-plane,master 65m v1.32.3
ip-10-0-99-108.us-west-2.compute.internal Ready worker 24m v1.32.3
ip-10-0-24-233.us-west-2.compute.internal Ready infra 55m v1.32.3
ip-10-0-88-109.us-west-2.compute.internal Ready worker 24m v1.32.3
ip-10-0-67-453.us-west-2.compute.internal Ready infra 55m v1.32.3
ip-10-0-14-183.us-west-2.compute.internal Ready control-plane,master 65m v1.33.4
ip-10-0-2-39.us-west-2.compute.internal Ready worker 58m v1.33.4
ip-10-0-20-140.us-west-2.compute.internal Ready control-plane,master 65m v1.33.4
ip-10-0-23-244.us-west-2.compute.internal Ready infra 55m v1.33.4
ip-10-0-77-153.us-west-2.compute.internal Ready control-plane,master 65m v1.33.4
ip-10-0-99-108.us-west-2.compute.internal Ready worker 24m v1.33.4
ip-10-0-24-233.us-west-2.compute.internal Ready infra 55m v1.33.4
ip-10-0-88-109.us-west-2.compute.internal Ready worker 24m v1.33.4
ip-10-0-67-453.us-west-2.compute.internal Ready infra 55m v1.33.4
----
.Procedure

View File

@@ -128,12 +128,12 @@ $ oc get nodes
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
ip-10-0-136-161.ec2.internal Ready worker 28m v1.32.3
ip-10-0-136-243.ec2.internal Ready master 34m v1.32.3
ip-10-0-141-105.ec2.internal Ready,SchedulingDisabled worker 28m v1.32.3
ip-10-0-142-249.ec2.internal Ready master 34m v1.32.3
ip-10-0-153-11.ec2.internal Ready worker 28m v1.32.3
ip-10-0-153-150.ec2.internal Ready master 34m v1.32.3
ip-10-0-136-161.ec2.internal Ready worker 28m v1.33.4
ip-10-0-136-243.ec2.internal Ready master 34m v1.33.4
ip-10-0-141-105.ec2.internal Ready,SchedulingDisabled worker 28m v1.33.4
ip-10-0-142-249.ec2.internal Ready master 34m v1.33.4
ip-10-0-153-11.ec2.internal Ready worker 28m v1.33.4
ip-10-0-153-150.ec2.internal Ready master 34m v1.33.4
----
+
You can see that scheduling on each worker node is disabled as the change is being applied.

View File

@@ -58,9 +58,9 @@ $ oc get nodes
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
ip-10-0-143-147.us-east-2.compute.internal Ready worker 103m v1.32.3
ip-10-0-146-92.us-east-2.compute.internal Ready worker 101m v1.32.3
ip-10-0-169-2.us-east-2.compute.internal Ready worker 102m v1.32.3
ip-10-0-143-147.us-east-2.compute.internal Ready worker 103m v1.33.4
ip-10-0-146-92.us-east-2.compute.internal Ready worker 101m v1.33.4
ip-10-0-169-2.us-east-2.compute.internal Ready worker 102m v1.33.4
----
+
[source,terminal]

View File

@@ -26,9 +26,9 @@ $ oc get nodes
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
master.example.com Ready master 7h v1.32.3
node1.example.com Ready worker 7h v1.32.3
node2.example.com Ready worker 7h v1.32.3
master.example.com Ready master 7h v1.33.4
node1.example.com Ready worker 7h v1.33.4
node2.example.com Ready worker 7h v1.33.4
----
+
The following example is a cluster with one unhealthy node:
@@ -42,9 +42,9 @@ $ oc get nodes
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
master.example.com Ready master 7h v1.32.3
node1.example.com NotReady,SchedulingDisabled worker 7h v1.32.3
node2.example.com Ready worker 7h v1.32.3
master.example.com Ready master 7h v1.33.4
node1.example.com NotReady,SchedulingDisabled worker 7h v1.33.4
node2.example.com Ready worker 7h v1.33.4
----
+
The conditions that trigger a `NotReady` status are shown later in this section.
@@ -60,9 +60,9 @@ $ oc get nodes -o wide
[source,terminal]
----
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
master.example.com Ready master 171m v1.32.3 10.0.129.108 <none> Red Hat Enterprise Linux CoreOS 48.83.202103210901-0 (Ootpa) 4.19.0-240.15.1.el8_3.x86_64 cri-o://1.32.3-30.rhaos4.10.gitf2f339d.el8-dev
node1.example.com Ready worker 72m v1.32.3 10.0.129.222 <none> Red Hat Enterprise Linux CoreOS 48.83.202103210901-0 (Ootpa) 4.19.0-240.15.1.el8_3.x86_64 cri-o://1.32.3-30.rhaos4.10.gitf2f339d.el8-dev
node2.example.com Ready worker 164m v1.32.3 10.0.142.150 <none> Red Hat Enterprise Linux CoreOS 48.83.202103210901-0 (Ootpa) 4.19.0-240.15.1.el8_3.x86_64 cri-o://1.32.3-30.rhaos4.10.gitf2f339d.el8-dev
master.example.com Ready master 171m v1.33.4 10.0.129.108 <none> Red Hat Enterprise Linux CoreOS 48.83.202103210901-0 (Ootpa) 4.19.0-240.15.1.el8_3.x86_64 cri-o://1.33.4-30.rhaos4.10.gitf2f339d.el8-dev
node1.example.com Ready worker 72m v1.33.4 10.0.129.222 <none> Red Hat Enterprise Linux CoreOS 48.83.202103210901-0 (Ootpa) 4.19.0-240.15.1.el8_3.x86_64 cri-o://1.33.4-30.rhaos4.10.gitf2f339d.el8-dev
node2.example.com Ready worker 164m v1.33.4 10.0.142.150 <none> Red Hat Enterprise Linux CoreOS 48.83.202103210901-0 (Ootpa) 4.19.0-240.15.1.el8_3.x86_64 cri-o://1.33.4-30.rhaos4.10.gitf2f339d.el8-dev
----
* The following command lists information about a single node:
@@ -83,7 +83,7 @@ $ oc get node node1.example.com
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
node1.example.com Ready worker 7h v1.32.3
node1.example.com Ready worker 7h v1.33.4
----
* The following command provides more detailed information about a specific node, including the reason for
@@ -162,9 +162,9 @@ System Info: <9>
OS Image: Red Hat Enterprise Linux CoreOS 410.8.20190520.0 (Ootpa)
Operating System: linux
Architecture: amd64
Container Runtime Version: cri-o://1.32.3-0.6.dev.rhaos4.3.git9ad059b.el8-rc2
Kubelet Version: v1.32.3
Kube-Proxy Version: v1.32.3
Container Runtime Version: cri-o://1.33.4-0.6.dev.rhaos4.3.git9ad059b.el8-rc2
Kubelet Version: v1.33.4
Kube-Proxy Version: v1.33.4
PodCIDR: 10.128.4.0/24
ProviderID: aws:///us-east-2a/i-04e87b31dc6b3e171
Non-terminated Pods: (12 in total) <10>

View File

@@ -44,7 +44,7 @@ $ oc get node <node1>
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
<node1> Ready,SchedulingDisabled worker 1d v1.32.3
<node1> Ready,SchedulingDisabled worker 1d v1.33.4
----
. Evacuate the pods using one of the following methods:

View File

@@ -145,7 +145,7 @@ $ oc get nodes -l type=user-node
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
ci-ln-l8nry52-f76d1-hl7m7-worker-c-vmqzp Ready worker 61s v1.32.3
ci-ln-l8nry52-f76d1-hl7m7-worker-c-vmqzp Ready worker 61s v1.33.4
----
* Add labels directly to a node:
@@ -198,5 +198,5 @@ $ oc get nodes -l type=user-node,region=east
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
ci-ln-l8nry52-f76d1-hl7m7-worker-b-tgq49 Ready worker 17m v1.32.3
ci-ln-l8nry52-f76d1-hl7m7-worker-b-tgq49 Ready worker 17m v1.33.4
----

View File

@@ -180,7 +180,7 @@ $ oc get nodes -l type=user-node,region=east
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
ip-10-0-142-25.ec2.internal Ready worker 17m v1.32.3
ip-10-0-142-25.ec2.internal Ready worker 17m v1.33.4
----
. Add the matching node selector to a pod:

View File

@@ -161,7 +161,7 @@ $ oc get nodes -l type=user-node,region=east
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
ci-ln-l8nry52-f76d1-hl7m7-worker-c-vmqzp Ready worker 61s v1.32.3
ci-ln-l8nry52-f76d1-hl7m7-worker-c-vmqzp Ready worker 61s v1.33.4
----
* Add labels directly to a node:
@@ -214,5 +214,5 @@ $ oc get nodes -l type=user-node,region=east
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
ci-ln-l8nry52-f76d1-hl7m7-worker-b-tgq49 Ready worker 17m v1.32.3
ci-ln-l8nry52-f76d1-hl7m7-worker-b-tgq49 Ready worker 17m v1.33.4
----

View File

@@ -28,12 +28,12 @@ $ oc get nodes
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
ip-10-0-52-50.us-east-2.compute.internal Ready worker 3d17h v1.32.3
ip-10-0-58-24.us-east-2.compute.internal Ready control-plane,master 3d17h v1.32.3
ip-10-0-68-148.us-east-2.compute.internal Ready worker 3d17h v1.32.3
ip-10-0-68-68.us-east-2.compute.internal Ready control-plane,master 3d17h v1.32.3
ip-10-0-72-170.us-east-2.compute.internal Ready control-plane,master 3d17h v1.32.3
ip-10-0-74-50.us-east-2.compute.internal Ready worker 3d17h v1.32.3
ip-10-0-52-50.us-east-2.compute.internal Ready worker 3d17h v1.33.4
ip-10-0-58-24.us-east-2.compute.internal Ready control-plane,master 3d17h v1.33.4
ip-10-0-68-148.us-east-2.compute.internal Ready worker 3d17h v1.33.4
ip-10-0-68-68.us-east-2.compute.internal Ready control-plane,master 3d17h v1.33.4
ip-10-0-72-170.us-east-2.compute.internal Ready control-plane,master 3d17h v1.33.4
ip-10-0-74-50.us-east-2.compute.internal Ready worker 3d17h v1.33.4
----
. View the machines and machine sets that exist in the `openshift-machine-api` namespace by running the following command. Each compute machine set is associated with a different availability zone within the AWS region. The installer automatically load balances compute machines across availability zones.

View File

@@ -346,13 +346,13 @@ $ oc get nodes
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
myclustername-master-0 Ready control-plane,master 6h39m v1.32.3
myclustername-master-1 Ready control-plane,master 6h41m v1.32.3
myclustername-master-2 Ready control-plane,master 6h39m v1.32.3
myclustername-nc4ast4-gpu-worker-centralus1-w9bqn Ready worker 14m v1.32.3
myclustername-worker-centralus1-rbh6b Ready worker 6h29m v1.32.3
myclustername-worker-centralus2-dbz7w Ready worker 6h29m v1.32.3
myclustername-worker-centralus3-p9b8c Ready worker 6h31m v1.32.3
myclustername-master-0 Ready control-plane,master 6h39m v1.33.4
myclustername-master-1 Ready control-plane,master 6h41m v1.33.4
myclustername-master-2 Ready control-plane,master 6h39m v1.33.4
myclustername-nc4ast4-gpu-worker-centralus1-w9bqn Ready worker 14m v1.33.4
myclustername-worker-centralus1-rbh6b Ready worker 6h29m v1.33.4
myclustername-worker-centralus2-dbz7w Ready worker 6h29m v1.33.4
myclustername-worker-centralus3-p9b8c Ready worker 6h31m v1.33.4
----
. View the list of compute machine sets:

View File

@@ -156,13 +156,13 @@ $ oc get nodes
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
myclustername-2pt9p-master-0.c.openshift-qe.internal Ready control-plane,master 8h v1.32.3
myclustername-2pt9p-master-1.c.openshift-qe.internal Ready control-plane,master 8h v1.32.3
myclustername-2pt9p-master-2.c.openshift-qe.internal Ready control-plane,master 8h v1.32.3
myclustername-2pt9p-worker-a-mxtnz.c.openshift-qe.internal Ready worker 8h v1.32.3
myclustername-2pt9p-worker-b-9pzzn.c.openshift-qe.internal Ready worker 8h v1.32.3
myclustername-2pt9p-worker-c-6pbg6.c.openshift-qe.internal Ready worker 8h v1.32.3
myclustername-2pt9p-worker-gpu-a-wxcr6.c.openshift-qe.internal Ready worker 4h35m v1.32.3
myclustername-2pt9p-master-0.c.openshift-qe.internal Ready control-plane,master 8h v1.33.4
myclustername-2pt9p-master-1.c.openshift-qe.internal Ready control-plane,master 8h v1.33.4
myclustername-2pt9p-master-2.c.openshift-qe.internal Ready control-plane,master 8h v1.33.4
myclustername-2pt9p-worker-a-mxtnz.c.openshift-qe.internal Ready worker 8h v1.33.4
myclustername-2pt9p-worker-b-9pzzn.c.openshift-qe.internal Ready worker 8h v1.33.4
myclustername-2pt9p-worker-c-6pbg6.c.openshift-qe.internal Ready worker 8h v1.33.4
myclustername-2pt9p-worker-gpu-a-wxcr6.c.openshift-qe.internal Ready worker 4h35m v1.33.4
----
. View the machines and machine sets that exist in the `openshift-machine-api` namespace by running the following command. Each compute machine set is associated with a different availability zone within the GCP region. The installer automatically load balances compute machines across availability zones.

View File

@@ -61,11 +61,11 @@ $ oc get nodes
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
master-0 Ready master 2d v1.32.3
master-1 Ready master 2d v1.32.3
worker-0 Ready worker 2d v1.32.3
worker-1 Ready worker 2d v1.32.3
worker-2 Ready mcp-offloading,worker 47h v1.32.3
master-0 Ready master 2d v1.33.4
master-1 Ready master 2d v1.33.4
worker-0 Ready worker 2d v1.33.4
worker-1 Ready worker 2d v1.33.4
worker-2 Ready mcp-offloading,worker 47h v1.33.4
----
--

View File

@@ -18,14 +18,14 @@ During a cluster upgrade, the index image tag for the default Red Hat-provided c
[source,terminal]
----
registry.redhat.io/redhat/redhat-operator-index:v4.19
registry.redhat.io/redhat/redhat-operator-index:v4.20
----
to:
[source,terminal]
----
registry.redhat.io/redhat/redhat-operator-index:v4.19
registry.redhat.io/redhat/redhat-operator-index:v4.20
----
However, the CVO does not automatically update image tags for custom catalogs. To ensure users are left with a compatible and supported Operator installation after a cluster upgrade, custom catalogs should also be kept updated to reference an updated index image.
@@ -64,7 +64,7 @@ metadata:
"quay.io/example-org/example-catalog:v{kube_major_version}.{kube_minor_version}"
spec:
displayName: Example Catalog
image: quay.io/example-org/example-catalog:v1.32
image: quay.io/example-org/example-catalog:v1.33
priority: -400
publisher: Example Org
----
@@ -77,11 +77,11 @@ If the `spec.image` field and the `olm.catalogImageTemplate` annotation are both
If the `spec.image` field is not set and the annotation does not resolve to a usable pull spec, OLM stops reconciliation of the catalog source and sets it into a human-readable error condition.
====
For an {product-title} {product-version} cluster, which uses Kubernetes 1.32, the `olm.catalogImageTemplate` annotation in the preceding example resolves to the following image reference:
For an {product-title} {product-version} cluster, which uses Kubernetes 1.33, the `olm.catalogImageTemplate` annotation in the preceding example resolves to the following image reference:
[source,terminal]
----
quay.io/example-org/example-catalog:v1.32
quay.io/example-org/example-catalog:v1.33
----
For future releases of {product-title}, you can create updated index images for your custom catalogs that target the later Kubernetes version that is used by the later {product-title} version. With the `olm.catalogImageTemplate` annotation set before the upgrade, upgrading the cluster to the later {product-title} version would then automatically update the catalog's index image as well.

View File

@@ -26,12 +26,12 @@ $ oc get nodes
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
compute-1.example.com Ready worker 33m v1.32.3
control-plane-1.example.com Ready master 41m v1.32.3
control-plane-2.example.com Ready master 45m v1.32.3
compute-2.example.com Ready worker 38m v1.32.3
compute-3.example.com Ready worker 33m v1.32.3
control-plane-3.example.com Ready master 41m v1.32.3
compute-1.example.com Ready worker 33m v1.33.4
control-plane-1.example.com Ready master 41m v1.33.4
control-plane-2.example.com Ready master 45m v1.33.4
compute-2.example.com Ready worker 38m v1.33.4
compute-3.example.com Ready worker 33m v1.33.4
control-plane-3.example.com Ready master 41m v1.33.4
----
. Review CPU and memory resource availability for each cluster node:

View File

@@ -72,7 +72,7 @@ $ oc get nodes -l node-role.kubernetes.io/master | grep "NotReady"
.Example output
[source,terminal]
----
ip-10-0-131-183.ec2.internal NotReady master 122m v1.32.3 <1>
ip-10-0-131-183.ec2.internal NotReady master 122m v1.33.4 <1>
----
<1> If the node is listed as `NotReady`, then the *node is not ready*.
@@ -96,9 +96,9 @@ $ oc get nodes -l node-role.kubernetes.io/master
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
ip-10-0-131-183.ec2.internal Ready master 6h13m v1.32.3
ip-10-0-164-97.ec2.internal Ready master 6h13m v1.32.3
ip-10-0-154-204.ec2.internal Ready master 6h13m v1.32.3
ip-10-0-131-183.ec2.internal Ready master 6h13m v1.33.4
ip-10-0-164-97.ec2.internal Ready master 6h13m v1.33.4
ip-10-0-154-204.ec2.internal Ready master 6h13m v1.33.4
----
.. Check whether the status of an etcd pod is either `Error` or `CrashloopBackoff`:

View File

@@ -285,10 +285,10 @@ examplecluster-compute-1 Running 165m opens
$ oc get nodes
NAME STATUS ROLES AGE VERSION
openshift-control-plane-0 Ready master 3h24m v1.32.3
openshift-control-plane-1 Ready master 3h24m v1.32.3
openshift-compute-0 Ready worker 176m v1.32.3
openshift-compute-1 Ready worker 176m v1.32.3
openshift-control-plane-0 Ready master 3h24m v1.33.4
openshift-control-plane-1 Ready master 3h24m v1.33.4
openshift-compute-0 Ready worker 176m v1.33.4
openshift-compute-1 Ready worker 176m v1.33.4
----
. Create the new `BareMetalHost` object and the secret to store the BMC credentials:
@@ -413,11 +413,11 @@ $ oc get nodes
----
$ oc get nodes
NAME STATUS ROLES AGE VERSION
openshift-control-plane-0 Ready master 4h26m v1.32.3
openshift-control-plane-1 Ready master 4h26m v1.32.3
openshift-control-plane-2 Ready master 12m v1.32.3
openshift-compute-0 Ready worker 3h58m v1.32.3
openshift-compute-1 Ready worker 3h58m v1.32.3
openshift-control-plane-0 Ready master 4h26m v1.33.4
openshift-control-plane-1 Ready master 4h26m v1.33.4
openshift-control-plane-2 Ready master 12m v1.33.4
openshift-compute-0 Ready worker 3h58m v1.33.4
openshift-compute-1 Ready worker 3h58m v1.33.4
----
. Turn the quorum guard back on by entering the following command:

View File

@@ -105,7 +105,7 @@ $ oc get node | grep worker
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
ip-10-0-169-2.us-east-2.compute.internal Ready worker 102m v1.32.3
ip-10-0-169-2.us-east-2.compute.internal Ready worker 102m v1.33.4
----
+
[source,terminal]

View File

@@ -111,12 +111,12 @@ $ oc get nodes
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
ip-10-0-136-161.ec2.internal Ready worker 28m v1.32.3
ip-10-0-136-243.ec2.internal Ready master 34m v1.32.3
ip-10-0-141-105.ec2.internal Ready,SchedulingDisabled worker 28m v1.32.3
ip-10-0-142-249.ec2.internal Ready master 34m v1.32.3
ip-10-0-153-11.ec2.internal Ready worker 28m v1.32.3
ip-10-0-153-150.ec2.internal Ready master 34m v1.32.3
ip-10-0-136-161.ec2.internal Ready worker 28m v1.33.4
ip-10-0-136-243.ec2.internal Ready master 34m v1.33.4
ip-10-0-141-105.ec2.internal Ready,SchedulingDisabled worker 28m v1.33.4
ip-10-0-142-249.ec2.internal Ready master 34m v1.33.4
ip-10-0-153-11.ec2.internal Ready worker 28m v1.33.4
ip-10-0-153-150.ec2.internal Ready master 34m v1.33.4
----
+
You can see that scheduling on each worker node is disabled as the change is being applied.

View File

@@ -132,13 +132,13 @@ The `upgrade` playbook only updates the {product-title} packages. It does not up
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
mycluster-control-plane-0 Ready master 145m v1.32.3
mycluster-control-plane-1 Ready master 145m v1.32.3
mycluster-control-plane-2 Ready master 145m v1.32.3
mycluster-rhel8-0 Ready worker 98m v1.32.3
mycluster-rhel8-1 Ready worker 98m v1.32.3
mycluster-rhel8-2 Ready worker 98m v1.32.3
mycluster-rhel8-3 Ready worker 98m v1.32.3
mycluster-control-plane-0 Ready master 145m v1.33.4
mycluster-control-plane-1 Ready master 145m v1.33.4
mycluster-control-plane-2 Ready master 145m v1.33.4
mycluster-rhel8-0 Ready worker 98m v1.33.4
mycluster-rhel8-1 Ready worker 98m v1.33.4
mycluster-rhel8-2 Ready worker 98m v1.33.4
mycluster-rhel8-3 Ready worker 98m v1.33.4
----
. Optional: Update the operating system packages that were not updated by the `upgrade` playbook. To update packages that are not on {product-version}, use the following command:

View File

@@ -214,6 +214,6 @@ $ oc get nodes
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
control-plane-1.example.com Ready master,worker 56m v1.32.3
compute-1.example.com Ready worker 11m v1.32.3
control-plane-1.example.com Ready master,worker 56m v1.33.4
compute-1.example.com Ready worker 11m v1.33.4
----

View File

@@ -190,11 +190,11 @@ $ oc get nodes
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
ip-10-0-168-251.ec2.internal Ready master 82m v1.32.3
ip-10-0-170-223.ec2.internal Ready master 82m v1.32.3
ip-10-0-179-95.ec2.internal Ready worker 70m v1.32.3
ip-10-0-182-134.ec2.internal Ready worker 70m v1.32.3
ip-10-0-211-16.ec2.internal Ready master 82m v1.32.3
ip-10-0-250-100.ec2.internal Ready worker 69m v1.32.3
ip-10-0-168-251.ec2.internal Ready master 82m v1.33.4
ip-10-0-170-223.ec2.internal Ready master 82m v1.33.4
ip-10-0-179-95.ec2.internal Ready worker 70m v1.33.4
ip-10-0-182-134.ec2.internal Ready worker 70m v1.33.4
ip-10-0-211-16.ec2.internal Ready master 82m v1.33.4
ip-10-0-250-100.ec2.internal Ready worker 69m v1.33.4
----

View File

@@ -31,9 +31,9 @@ $ oc get nodes -l node-role.kubernetes.io/worker
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
compute-node-0 Ready worker 30m v1.32.3
compute-node-1 Ready worker 30m v1.32.3
compute-node-2 Ready worker 30m v1.32.3
compute-node-0 Ready worker 30m v1.33.4
compute-node-1 Ready worker 30m v1.33.4
compute-node-2 Ready worker 30m v1.33.4
----
+
Note the names of your compute nodes.

View File

@@ -26,9 +26,9 @@ $ oc get nodes -l node-role.kubernetes.io/master
[source,terminal]
----
NAME STATUS ROLES AGE VERSION
control-plane-node-0 Ready master 75m v1.32.3
control-plane-node-1 Ready master 75m v1.32.3
control-plane-node-2 Ready master 75m v1.32.3
control-plane-node-0 Ready master 75m v1.33.4
control-plane-node-1 Ready master 75m v1.33.4
control-plane-node-2 Ready master 75m v1.33.4
----
+
Note the names of your control plane nodes.

View File

@@ -102,9 +102,9 @@ $ oc get -l node.openshift.io/os_id=rhel
[source,text]
----
NAME STATUS ROLES AGE VERSION
rhel-node1.example.com Ready worker 7h v1.32.3
rhel-node2.example.com Ready worker 7h v1.32.3
rhel-node3.example.com Ready worker 7h v1.32.3
rhel-node1.example.com Ready worker 7h v1.33.4
rhel-node2.example.com Ready worker 7h v1.33.4
rhel-node3.example.com Ready worker 7h v1.33.4
----
. xref:../../nodes/nodes/nodes-nodes-working.adoc#nodes-nodes-working-deleting-bare-metal_nodes-nodes-working[Continue with the node removal process]. {op-system-base} nodes are not managed by the Machine API and have no compute machine sets associated with them. You must unschedule and drain the node before you manually delete it from the cluster.