1
0
mirror of https://github.com/openshift/openshift-docs.git synced 2026-02-05 12:46:18 +01:00

OSDOCS-11300: hardcoded 4.17 version number bump

This commit is contained in:
Andrew Taylor
2024-09-19 09:05:50 -04:00
committed by openshift-cherrypick-robot
parent 5ad54fc1f4
commit db07e5365c
91 changed files with 178 additions and 179 deletions

View File

@@ -38,5 +38,5 @@ include::modules/verifying-the-assumed-iam-role-in-your-pod.adoc[leveloffset=+2]
* For more information about installing and using the AWS Boto3 SDK for Python, see the link:https://boto3.amazonaws.com/v1/documentation/api/latest/index.html[AWS Boto3 documentation].
ifdef::openshift-rosa,openshift-dedicated[]
* For general information about webhook admission plugins for OpenShift, see link:https://docs.openshift.com/container-platform/4.16/architecture/admission-plug-ins.html#admission-webhooks-about_admission-plug-ins[Webhook admission plugins] in the OpenShift Container Platform documentation.
* For general information about webhook admission plugins for OpenShift, see link:https://docs.openshift.com/container-platform/4.17/architecture/admission-plug-ins.html#admission-webhooks-about_admission-plug-ins[Webhook admission plugins] in the OpenShift Container Platform documentation.
endif::openshift-rosa,openshift-dedicated[]

View File

@@ -24,6 +24,6 @@ Commands for multi-node deployments, projects, and developer tooling are not sup
== Additional resources
* xref:..//microshift_cli_ref/microshift-oc-cli-install.adoc#microshift-oc-cli-install[Getting started with the OpenShift CLI]
* link:https://docs.redhat.com/en/documentation/openshift_container_platform/4.16/html/cli_tools/openshift-cli-oc#cli-about-cli_cli-developer-commands[About the OpenShift CLI] (OpenShift Container Platform documentation)
* link:https://docs.redhat.com/en/documentation/openshift_container_platform/4.17/html/cli_tools/openshift-cli-oc#cli-about-cli_cli-developer-commands[About the OpenShift CLI] (OpenShift Container Platform documentation)
* link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9[Red Hat Enterprise Linux (RHEL) documentation for specific use cases]
* xref:../microshift_configuring/microshift-cluster-access-kubeconfig.adoc#microshift-kubeconfig[Cluster access with kubeconfig]

View File

@@ -13,7 +13,7 @@ include::modules/microshift-low-latency-concept.adoc[leveloffset=+1]
//additional resources for the low latency concept
[role="_additional-resources"]
.Additional resources
* link:https://docs.redhat.com/en/documentation/openshift_container_platform/4.16/html/scalability_and_performance/low-latency-tuning#cnf-understanding-low-latency_cnf-understanding-low-latency[About low latency] (OpenShift Container Platform documentation)
* link:https://docs.redhat.com/en/documentation/openshift_container_platform/4.17/html/scalability_and_performance/low-latency-tuning#cnf-understanding-low-latency_cnf-understanding-low-latency[About low latency] (OpenShift Container Platform documentation)
include::modules/microshift-install-rpms-low-latency.adoc[leveloffset=+1]
@@ -83,11 +83,11 @@ include::modules/microshift-low-latency-prepare-workload.adoc[leveloffset=+1]
[id="additional-resources-prep-workload_{context}"]
.Additional resources
* link:https://docs.openshift.com/container-platform/4.16/scalability_and_performance/low_latency_tuning/cnf-provisioning-low-latency-workloads.html#cnf-configuring-high-priority-workload-pods_cnf-provisioning-low-latency[Disabling power saving mode for high priority pods] (Red Hat OpenShift Container Platform documentation)
* link:https://docs.openshift.com/container-platform/4.17/scalability_and_performance/low_latency_tuning/cnf-provisioning-low-latency-workloads.html#cnf-configuring-high-priority-workload-pods_cnf-provisioning-low-latency[Disabling power saving mode for high priority pods] (Red Hat OpenShift Container Platform documentation)
* link:https://docs.openshift.com/container-platform/4.16/scalability_and_performance/low_latency_tuning/cnf-provisioning-low-latency-workloads.html#cnf-disabling-cpu-cfs-quota_cnf-provisioning-low-latency[Disabling CPU CFS quota] (Red Hat OpenShift Container Platform documentation)
* link:https://docs.openshift.com/container-platform/4.17/scalability_and_performance/low_latency_tuning/cnf-provisioning-low-latency-workloads.html#cnf-disabling-cpu-cfs-quota_cnf-provisioning-low-latency[Disabling CPU CFS quota] (Red Hat OpenShift Container Platform documentation)
* link:https://docs.openshift.com/container-platform/4.16/scalability_and_performance/low_latency_tuning/cnf-provisioning-low-latency-workloads.html#cnf-disabling-interrupt-processing-for-individual-pods_cnf-provisioning-low-latency[Disabling interrupt processing for CPUs where pinned containers are running] (Red Hat OpenShift Container Platform documentation)
* link:https://docs.openshift.com/container-platform/4.17/scalability_and_performance/low_latency_tuning/cnf-provisioning-low-latency-workloads.html#cnf-disabling-interrupt-processing-for-individual-pods_cnf-provisioning-low-latency[Disabling interrupt processing for CPUs where pinned containers are running] (Red Hat OpenShift Container Platform documentation)
include::modules/microshift-low-latency-rhel-edge-blueprint-rtk.adoc[leveloffset=+1]

View File

@@ -26,4 +26,4 @@ include::modules/microshift-cni-multus-troubleshoot.adoc[leveloffset=+1]
== Additional resources
* xref:../../microshift_networking/microshift_multiple_networks/microshift-cni-multus.adoc#microshift-cni-multus[About using multiple networks]
* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.16/html/networking/multiple-networks#nw-multus-ipam-object_configuring-additional-network[Configuration of IP address assignment for an additional network]
* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.17/html/networking/multiple-networks#nw-multus-ipam-object_configuring-additional-network[Configuration of IP address assignment for an additional network]

View File

@@ -31,4 +31,4 @@ The *Developer* perspective provides workflows specific to developer use cases,
You can use the *Topology* view to display applications, components, and workloads of your project. If you have no workloads in the project, the *Topology* view will show some links to create or import them. You can also use the *Quick Search* to import components directly.
.Additional resources
See link:https://docs.openshift.com/container-platform/4.16/applications/odc-viewing-application-composition-using-topology-view.html[Viewing application composition using the Topology] view for more information on using the *Topology* view in *Developer* perspective.
See link:https://docs.openshift.com/container-platform/4.17/applications/odc-viewing-application-composition-using-topology-view.html[Viewing application composition using the Topology] view for more information on using the *Topology* view in *Developer* perspective.

View File

@@ -27,7 +27,7 @@ $ ./openshift-install version
.Example output
[source,terminal]
----
./openshift-install 4.16.0
./openshift-install 4.17.0
built from commit abc123def456
release image quay.io/openshift-release-dev/ocp-release@sha256:123abc456def789ghi012jkl345mno678pqr901stu234vwx567yz0
release architecture amd64

View File

@@ -43,7 +43,7 @@ providerSpec:
name: openstack-cloud-credentials
namespace: openshift-machine-api
flavor: m1.xlarge
image: rhcos-4.16
image: rhcos-4.17
kind: OpenstackProviderSpec
metadata:
creationTimestamp: null

View File

@@ -38,9 +38,9 @@ $ ./openshift-install version
.Example output for a shared registry binary
[source,terminal,subs="quotes"]
----
./openshift-install 4.16.0
./openshift-install 4.17.0
built from commit ae7977b7d1ca908674a0d45c5c243c766fa4b2ca
release image registry.ci.openshift.org/origin/release:4.16ocp-release@sha256:0da6316466d60a3a4535d5fed3589feb0391989982fba59d47d4c729912d6363
release image registry.ci.openshift.org/origin/release:4.17ocp-release@sha256:0da6316466d60a3a4535d5fed3589feb0391989982fba59d47d4c729912d6363
release architecture amd64
----
====

View File

@@ -22,7 +22,7 @@ You can host different versions of control planes on the same management cluster
----
apiVersion: v1
data:
supported-versions: '{"versions":["4.16"]}'
supported-versions: '{"versions":["4.17"]}'
kind: ConfigMap
metadata:
labels:

View File

@@ -20,7 +20,7 @@ You must run a gather operation to create an Insights Operator archive.
+
[source,yaml]
----
include::https://raw.githubusercontent.com/openshift/insights-operator/release-4.16/docs/gather-job.yaml[]
include::https://raw.githubusercontent.com/openshift/insights-operator/release-4.17/docs/gather-job.yaml[]
----
. Copy your `insights-operator` image version:
+

View File

@@ -20,10 +20,10 @@ bootstrap machine that you need for your {product-title} cluster:
[source,json]
----
ifndef::ash[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.16/upi/azure/04_bootstrap.json[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.17/upi/azure/04_bootstrap.json[]
endif::ash[]
ifdef::ash[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.16/upi/azurestack/04_bootstrap.json[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.17/upi/azurestack/04_bootstrap.json[]
endif::ash[]
----
====

View File

@@ -20,10 +20,10 @@ control plane machines that you need for your {product-title} cluster:
[source,json]
----
ifndef::ash[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.16/upi/azure/05_masters.json[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.17/upi/azure/05_masters.json[]
endif::ash[]
ifdef::ash[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.16/upi/azurestack/05_masters.json[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.17/upi/azurestack/05_masters.json[]
endif::ash[]
----
====

View File

@@ -21,10 +21,10 @@ cluster:
[source,json]
----
ifndef::ash[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.16/upi/azure/03_infra.json[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.17/upi/azure/03_infra.json[]
endif::ash[]
ifdef::ash[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.16/upi/azurestack/03_infra.json[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.17/upi/azurestack/03_infra.json[]
endif::ash[]
----
====

View File

@@ -20,10 +20,10 @@ stored {op-system-first} image that you need for your {product-title} cluster:
[source,json]
----
ifndef::ash[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.16/upi/azure/02_storage.json[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.17/upi/azure/02_storage.json[]
endif::ash[]
ifdef::ash[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.16/upi/azurestack/02_storage.json[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.17/upi/azurestack/02_storage.json[]
endif::ash[]
----
====

View File

@@ -20,10 +20,10 @@ VNet that you need for your {product-title} cluster:
[source,json]
----
ifndef::ash[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.16/upi/azure/01_vnet.json[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.17/upi/azure/01_vnet.json[]
endif::ash[]
ifdef::ash[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.16/upi/azurestack/01_vnet.json[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.17/upi/azurestack/01_vnet.json[]
endif::ash[]
----
====

View File

@@ -20,10 +20,10 @@ worker machines that you need for your {product-title} cluster:
[source,json]
----
ifndef::ash[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.16/upi/azure/06_workers.json[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.17/upi/azure/06_workers.json[]
endif::ash[]
ifdef::ash[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.16/upi/azurestack/06_workers.json[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.17/upi/azurestack/06_workers.json[]
endif::ash[]
----
====

View File

@@ -13,6 +13,6 @@ You can use the following CloudFormation template to deploy the bootstrap machin
====
[source,yaml]
----
include::https://raw.githubusercontent.com/openshift/installer/release-4.16/upi/aws/cloudformation/04_cluster_bootstrap.yaml[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.17/upi/aws/cloudformation/04_cluster_bootstrap.yaml[]
----
====

View File

@@ -14,6 +14,6 @@ machines that you need for your {product-title} cluster.
====
[source,yaml]
----
include::https://raw.githubusercontent.com/openshift/installer/release-4.16/upi/aws/cloudformation/05_cluster_master_nodes.yaml[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.17/upi/aws/cloudformation/05_cluster_master_nodes.yaml[]
----
====

View File

@@ -14,7 +14,7 @@ objects and load balancers that you need for your {product-title} cluster.
====
[source,yaml]
----
include::https://raw.githubusercontent.com/openshift/installer/release-4.16/upi/aws/cloudformation/02_cluster_infra.yaml[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.17/upi/aws/cloudformation/02_cluster_infra.yaml[]
----
====

View File

@@ -14,6 +14,6 @@ that you need for your {product-title} cluster.
====
[source,yaml]
----
include::https://raw.githubusercontent.com/openshift/installer/release-4.16/upi/aws/cloudformation/03_cluster_security.yaml[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.17/upi/aws/cloudformation/03_cluster_security.yaml[]
----
====

View File

@@ -14,6 +14,6 @@ you need for your {product-title} cluster.
====
[source,yaml]
----
include::https://raw.githubusercontent.com/openshift/installer/release-4.16/upi/aws/cloudformation/01_vpc.yaml[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.17/upi/aws/cloudformation/01_vpc.yaml[]
----
====

View File

@@ -14,6 +14,6 @@ that you need for your {product-title} cluster.
====
[source,yaml]
----
include::https://raw.githubusercontent.com/openshift/installer/release-4.16/upi/aws/cloudformation/06_cluster_worker_node.yaml[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.17/upi/aws/cloudformation/06_cluster_worker_node.yaml[]
----
====

View File

@@ -14,6 +14,6 @@ machine that you need for your {product-title} cluster:
====
[source,python]
----
include::https://raw.githubusercontent.com/openshift/installer/release-4.16/upi/gcp/04_bootstrap.py[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.17/upi/gcp/04_bootstrap.py[]
----
====

View File

@@ -14,6 +14,6 @@ plane machines that you need for your {product-title} cluster:
====
[source,python]
----
include::https://raw.githubusercontent.com/openshift/installer/release-4.16/upi/gcp/05_control_plane.py[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.17/upi/gcp/05_control_plane.py[]
----
====

View File

@@ -12,6 +12,6 @@ You can use the following Deployment Manager template to deploy the external loa
====
[source,python]
----
include::https://raw.githubusercontent.com/openshift/installer/release-4.16/upi/gcp/02_lb_ext.py[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.17/upi/gcp/02_lb_ext.py[]
----
====

View File

@@ -12,6 +12,6 @@ You can use the following Deployment Manager template to deploy the firewall rue
====
[source,python]
----
include::https://raw.githubusercontent.com/openshift/installer/release-4.16/upi/gcp/03_firewall.py[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.17/upi/gcp/03_firewall.py[]
----
====

View File

@@ -12,6 +12,6 @@ You can use the following Deployment Manager template to deploy the IAM roles th
====
[source,python]
----
include::https://raw.githubusercontent.com/openshift/installer/release-4.16/upi/gcp/03_iam.py[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.17/upi/gcp/03_iam.py[]
----
====

View File

@@ -12,7 +12,7 @@ You can use the following Deployment Manager template to deploy the internal loa
====
[source,python]
----
include::https://raw.githubusercontent.com/openshift/installer/release-4.16/upi/gcp/02_lb_int.py[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.17/upi/gcp/02_lb_int.py[]
----
====

View File

@@ -12,6 +12,6 @@ You can use the following Deployment Manager template to deploy the private DNS
====
[source,python]
----
include::https://raw.githubusercontent.com/openshift/installer/release-4.16/upi/gcp/02_dns.py[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.17/upi/gcp/02_dns.py[]
----
====

View File

@@ -14,6 +14,6 @@ you need for your {product-title} cluster:
====
[source,python]
----
include::https://raw.githubusercontent.com/openshift/installer/release-4.16/upi/gcp/01_vpc.py[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.17/upi/gcp/01_vpc.py[]
----
====

View File

@@ -14,6 +14,6 @@ that you need for your {product-title} cluster:
====
[source,python]
----
include::https://raw.githubusercontent.com/openshift/installer/release-4.16/upi/gcp/06_worker.py[]
include::https://raw.githubusercontent.com/openshift/installer/release-4.17/upi/gcp/06_worker.py[]
----
====

View File

@@ -12,7 +12,7 @@ your {product-title} nodes.
.Procedure
ifndef::openshift-origin[]
. Obtain the {op-system} image from the link:https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/4.16/[{op-system} image mirror] page.
. Obtain the {op-system} image from the link:https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/4.17/[{op-system} image mirror] page.
+
[IMPORTANT]
====

View File

@@ -13,7 +13,7 @@
.Procedure
. Download the installation program from https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest-4.16/openshift-install-rhel9-amd64.tar.gz.
. Download the installation program from https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest-4.17/openshift-install-rhel9-amd64.tar.gz.
. Extract the installation program. For example, on a computer that uses a Linux operating system, run the following command:
+
[source,terminal]

View File

@@ -85,10 +85,10 @@ $ openshift-install coreos print-stream-json | grep '\.iso[^.]'
[source,terminal]
ifndef::openshift-origin[]
----
"location": "<url>/art/storage/releases/rhcos-4.16-aarch64/<release>/aarch64/rhcos-<release>-live.aarch64.iso",
"location": "<url>/art/storage/releases/rhcos-4.16-ppc64le/<release>/ppc64le/rhcos-<release>-live.ppc64le.iso",
"location": "<url>/art/storage/releases/rhcos-4.16-s390x/<release>/s390x/rhcos-<release>-live.s390x.iso",
"location": "<url>/art/storage/releases/rhcos-4.16/<release>/x86_64/rhcos-<release>-live.x86_64.iso",
"location": "<url>/art/storage/releases/rhcos-4.17-aarch64/<release>/aarch64/rhcos-<release>-live.aarch64.iso",
"location": "<url>/art/storage/releases/rhcos-4.17-ppc64le/<release>/ppc64le/rhcos-<release>-live.ppc64le.iso",
"location": "<url>/art/storage/releases/rhcos-4.17-s390x/<release>/s390x/rhcos-<release>-live.s390x.iso",
"location": "<url>/art/storage/releases/rhcos-4.17/<release>/x86_64/rhcos-<release>-live.x86_64.iso",
----
endif::openshift-origin[]
ifdef::openshift-origin[]

View File

@@ -101,18 +101,18 @@ $ openshift-install coreos print-stream-json | grep -Eo '"https.*(kernel-|initra
[source,terminal]
ifndef::openshift-origin[]
----
"<url>/art/storage/releases/rhcos-4.16-aarch64/<release>/aarch64/rhcos-<release>-live-kernel-aarch64"
"<url>/art/storage/releases/rhcos-4.16-aarch64/<release>/aarch64/rhcos-<release>-live-initramfs.aarch64.img"
"<url>/art/storage/releases/rhcos-4.16-aarch64/<release>/aarch64/rhcos-<release>-live-rootfs.aarch64.img"
"<url>/art/storage/releases/rhcos-4.16-ppc64le/49.84.202110081256-0/ppc64le/rhcos-<release>-live-kernel-ppc64le"
"<url>/art/storage/releases/rhcos-4.16-ppc64le/<release>/ppc64le/rhcos-<release>-live-initramfs.ppc64le.img"
"<url>/art/storage/releases/rhcos-4.16-ppc64le/<release>/ppc64le/rhcos-<release>-live-rootfs.ppc64le.img"
"<url>/art/storage/releases/rhcos-4.16-s390x/<release>/s390x/rhcos-<release>-live-kernel-s390x"
"<url>/art/storage/releases/rhcos-4.16-s390x/<release>/s390x/rhcos-<release>-live-initramfs.s390x.img"
"<url>/art/storage/releases/rhcos-4.16-s390x/<release>/s390x/rhcos-<release>-live-rootfs.s390x.img"
"<url>/art/storage/releases/rhcos-4.16/<release>/x86_64/rhcos-<release>-live-kernel-x86_64"
"<url>/art/storage/releases/rhcos-4.16/<release>/x86_64/rhcos-<release>-live-initramfs.x86_64.img"
"<url>/art/storage/releases/rhcos-4.16/<release>/x86_64/rhcos-<release>-live-rootfs.x86_64.img"
"<url>/art/storage/releases/rhcos-4.17-aarch64/<release>/aarch64/rhcos-<release>-live-kernel-aarch64"
"<url>/art/storage/releases/rhcos-4.17-aarch64/<release>/aarch64/rhcos-<release>-live-initramfs.aarch64.img"
"<url>/art/storage/releases/rhcos-4.17-aarch64/<release>/aarch64/rhcos-<release>-live-rootfs.aarch64.img"
"<url>/art/storage/releases/rhcos-4.17-ppc64le/49.84.202110081256-0/ppc64le/rhcos-<release>-live-kernel-ppc64le"
"<url>/art/storage/releases/rhcos-4.17-ppc64le/<release>/ppc64le/rhcos-<release>-live-initramfs.ppc64le.img"
"<url>/art/storage/releases/rhcos-4.17-ppc64le/<release>/ppc64le/rhcos-<release>-live-rootfs.ppc64le.img"
"<url>/art/storage/releases/rhcos-4.17-s390x/<release>/s390x/rhcos-<release>-live-kernel-s390x"
"<url>/art/storage/releases/rhcos-4.17-s390x/<release>/s390x/rhcos-<release>-live-initramfs.s390x.img"
"<url>/art/storage/releases/rhcos-4.17-s390x/<release>/s390x/rhcos-<release>-live-rootfs.s390x.img"
"<url>/art/storage/releases/rhcos-4.17/<release>/x86_64/rhcos-<release>-live-kernel-x86_64"
"<url>/art/storage/releases/rhcos-4.17/<release>/x86_64/rhcos-<release>-live-initramfs.x86_64.img"
"<url>/art/storage/releases/rhcos-4.17/<release>/x86_64/rhcos-<release>-live-rootfs.x86_64.img"
----
endif::openshift-origin[]
ifdef::openshift-origin[]

View File

@@ -79,7 +79,7 @@ If you plan to add more compute machines to your cluster after you finish instal
====
ifndef::openshift-origin[]
. Obtain the {op-system} OVA image. Images are available from the link:https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/4.16/[{op-system} image mirror] page.
. Obtain the {op-system} OVA image. Images are available from the link:https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/4.17/[{op-system} image mirror] page.
+
[IMPORTANT]
====

View File

@@ -84,7 +84,7 @@ $ oc get clusterserviceversion -n openshift-nmstate \
[source, terminal,subs="attributes+"]
----
Name Phase
kubernetes-nmstate-operator.4.16.0-202210210157 Succeeded
kubernetes-nmstate-operator.4.17.0-202210210157 Succeeded
----
. Create an instance of the `nmstate` Operator:

View File

@@ -10,7 +10,7 @@
* You installed the {loki-op}.
* You installed the {oc-first}.
* You deployed link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/[{rh-storage}].
* You configured your {rh-storage} cluster link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.16/html/managing_and_allocating_storage_resources/adding-file-and-object-storage-to-an-existing-external-ocs-cluster[for object storage].
* You configured your {rh-storage} cluster link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.17/html/managing_and_allocating_storage_resources/adding-file-and-object-storage-to-an-existing-external-ocs-cluster[for object storage].
.Procedure

View File

@@ -32,7 +32,7 @@ The output of this command includes pull specs for the available updates similar
Recommended updates:
VERSION IMAGE
4.16.0 quay.io/openshift-release-dev/ocp-release@sha256:6a899c54dda6b844bb12a247e324a0f6cde367e880b73ba110c056df6d018032
4.17.0 quay.io/openshift-release-dev/ocp-release@sha256:6a899c54dda6b844bb12a247e324a0f6cde367e880b73ba110c056df6d018032
...
----

View File

@@ -48,7 +48,7 @@ EOF
+
[NOTE]
====
The wildcard `*` in the commands uses the latest {microshift-short} RPMs. If you need a specific version, substitute the wildcard for the version you want. For example, insert `4.16.0` to download the {microshift-short} 4.16.0 RPMs.
The wildcard `*` in the commands uses the latest {microshift-short} RPMs. If you need a specific version, substitute the wildcard for the version you want. For example, insert `4.17.1` to download the {microshift-short} 4.17.1 RPMs.
====
. Optional. Use the blueprint installed in the `/usr/share/microshift/blueprint` directory that is specific to your platform architecture. See the following example snippet for an explanation of the blueprint sections:
@@ -58,14 +58,14 @@ The wildcard `*` in the commands uses the latest {microshift-short} RPMs. If you
[source,text]
----
name = "microshift_blueprint"
description = "MicroShift 4.16.1 on x86_64 platform"
description = "MicroShift 4.17.1 on x86_64 platform"
version = "0.0.1"
modules = []
groups = []
[[packages]] <1>
name = "microshift"
version = "4.16.1"
version = "4.17.1"
...
...
@@ -115,11 +115,11 @@ $ sudo composer-cli blueprints depsolve __<microshift_blueprint>__ | grep micros
[source,terminal]
----
blueprint: microshift_blueprint v0.0.1
microshift-greenboot-4.16.1-202305250827.p0.g4105d3b.assembly.4.16.1.el9.noarch
microshift-networking-4.16.1-202305250827.p0.g4105d3b.assembly.4.16.1.el9.x86_64
microshift-release-info-4.16.1-202305250827.p0.g4105d3b.assembly.4.16.1.el9.noarch
microshift-4.16.1-202305250827.p0.g4105d3b.assembly.4.16.1.el9.x86_64
microshift-selinux-4.16.1-202305250827.p0.g4105d3b.assembly.4.16.1.el9.noarch
microshift-greenboot-4.17.1-202305250827.p0.g4105d3b.assembly.4.17.1.el9.noarch
microshift-networking-4.17.1-202305250827.p0.g4105d3b.assembly.4.17.1.el9.x86_64
microshift-release-info-4.17.1-202305250827.p0.g4105d3b.assembly.4.17.1.el9.noarch
microshift-4.17.1-202305250827.p0.g4105d3b.assembly.4.17.1.el9.x86_64
microshift-selinux-4.17.1-202305250827.p0.g4105d3b.assembly.4.17.1.el9.noarch
----
//need updated example output
. Optional: Verify the image builder configuration listing all components to be installed by running the following command:

View File

@@ -29,7 +29,7 @@ You can use image builder to create `rpm-ostree` system images with embedded {mi
----
$ sudo dnf install -y microshift-release-info-<release_version>
----
Replace `<release_version>` with the numerical value of the release you are deploying, using the entire version number, such as `4.16.0`.
Replace `<release_version>` with the numerical value of the release you are deploying, using the entire version number, such as `4.17.1`.
.. List the contents of the `/usr/share/microshift/release` directory to verify the presence of the release information files by running the following command:
+
@@ -55,14 +55,14 @@ If you installed the `microshift-release-info` RPM, you can proceed to step 4.
----
$ sudo dnf download microshift-release-info-<release_version>
----
Replace `<release_version>` with the numerical value of the release you are deploying, using the entire version number, such as `4.16.0`.
Replace `<release_version>` with the numerical value of the release you are deploying, using the entire version number, such as `4.17.1`.
+
.Example rpm
[source,terminal]
----
microshift-release-info-4.16.0.*.el9.noarch.rpm <1>
microshift-release-info-4.17.1.*.el9.noarch.rpm <1>
----
<1> The `*` represents the date and commit ID. Your output should contain both, for example `-202311101230.p0.g7dc6a00.assembly.4.16.0`.
<1> The `*` represents the date and commit ID. Your output should contain both, for example `-202311101230.p0.g7dc6a00.assembly.4.17.1`.
.. Unpack the RPM package without installing it by running the following command:
+

View File

@@ -21,7 +21,7 @@ $ microshift-etcd version
.Example output
[source,terminal,subs="attributes+"]
----
microshift-etcd Version: 4.16.0
microshift-etcd Version: 4.17.1
Base etcd Version: 3.5.13
----
@@ -38,7 +38,7 @@ $ microshift-etcd version -o json
{
"major": "4",
"minor": "16",
"gitVersion": "4.16.0~rc.1",
"gitVersion": "4.17.1~rc.1",
"gitCommit": "140777711962eb4e0b765c39dfd325fb0abb3622",
"gitTreeState": "clean",
"buildDate": "2024-05-10T16:37:53Z",

View File

@@ -19,25 +19,24 @@ To see the images included with a {microshift-short} release, you can list the c
+
[source,terminal]
----
$ sudo dnf repoquery microshift-release-info-0:4.16.* # <1>
$ sudo dnf repoquery microshift-release-info-0:4.17.* # <1>
----
<1> Replace the example value `4.16.*` with the major and minor release numbers you are interested in.
<1> Replace the example value `4.17.*` with the major and minor release numbers you are interested in.
+
.Example output
[source,terminal]
----
Updating Subscription Management repositories.
microshift-release-info-0:4.16.0-202406260523.p0.gc5a37df.assembly.4.16.0.el9.noarch
microshift-release-info-0:4.16.1-202406281132.p0.g8babeb9.assembly.4.16.1.el9.noarch
microshift-release-info-0:4.16.10-202408291007.p0.g6e4ee4d.assembly.4.16.10.el9.noarch
microshift-release-info-0:4.16.2-202407040825.p0.g2e0407e.assembly.4.16.2.el9.noarch
microshift-release-info-0:4.16.3-202407111123.p0.ge4206d3.assembly.4.16.3.el9.noarch
microshift-release-info-0:4.16.4-202407191908.p0.g057a9af.assembly.4.16.4.el9.noarch
microshift-release-info-0:4.16.5-202407250951.p0.g0afcb57.assembly.4.16.5.el9.noarch
microshift-release-info-0:4.16.6-202408010822.p0.gc4ded66.assembly.4.16.6.el9.noarch
microshift-release-info-0:4.16.7-202408081107.p0.g0597bb8.assembly.4.16.7.el9.noarch
microshift-release-info-0:4.16.8-202408150851.p0.gc8a3bb1.assembly.4.16.8.el9.noarch
microshift-release-info-0:4.16.9-202408220842.p0.gefa92a2.assembly.4.16.9.el9.noarch
microshift-release-info-0:4.17.1-202406281132.p0.g8babeb9.assembly.4.17.1.el9.noarch
microshift-release-info-0:4.17.10-202408291007.p0.g6e4ee4d.assembly.4.17.10.el9.noarch
microshift-release-info-0:4.17.2-202407040825.p0.g2e0407e.assembly.4.17.2.el9.noarch
microshift-release-info-0:4.17.3-202407111123.p0.ge4206d3.assembly.4.17.3.el9.noarch
microshift-release-info-0:4.17.4-202407191908.p0.g057a9af.assembly.4.17.4.el9.noarch
microshift-release-info-0:4.17.5-202407250951.p0.g0afcb57.assembly.4.17.5.el9.noarch
microshift-release-info-0:4.17.6-202408010822.p0.gc4ded66.assembly.4.17.6.el9.noarch
microshift-release-info-0:4.17.7-202408081107.p0.g0597bb8.assembly.4.17.7.el9.noarch
microshift-release-info-0:4.17.8-202408150851.p0.gc8a3bb1.assembly.4.17.8.el9.noarch
microshift-release-info-0:4.17.9-202408220842.p0.gefa92a2.assembly.4.17.9.el9.noarch
----
. Download the RPM package you want by running the following command:
@@ -48,7 +47,7 @@ microshift-release-info-0:4.16.9-202408220842.p0.gefa92a2.assembly.4.16.9.el9.no
----
$ sudo dnf download microshift-release-info-_<release_version>_ # <1>
----
<1> Replace `_<release_version>_` with the numerical value of the release you are deploying, using the entire version number, for example, `4.16.0`.
<1> Replace `_<release_version>_` with the numerical value of the release you are deploying, using the entire version number, for example, `4.17.1`.
--
+
--
@@ -56,7 +55,7 @@ $ sudo dnf download microshift-release-info-_<release_version>_ # <1>
[subs="+quotes"]
[source,terminal]
----
microshift-release-info-4.16.0.-202311101230.p0.g7dc6a00.assembly.4.16.0.el9.noarch.rpm # <1>
microshift-release-info-4.17.1.-202311101230.p0.g7dc6a00.assembly.4.17.1.el9.noarch.rpm # <1>
----
<1> Your output should contain the date and commit ID.
--
@@ -68,7 +67,7 @@ microshift-release-info-4.16.0.-202311101230.p0.g7dc6a00.assembly.4.16.0.el9.noa
----
$ rpm2cpio _<microshift_release_info>_ | cpio -idmv # <1>
----
<1> Replace `_<microshift_release_info>_` with the name of the RPM package from the previous step. For example, `microshift-release-info-4.16.10-202408291007.p0.g6e4ee4d.assembly.4.16.10.el9.noarch.rpm`.
<1> Replace `_<microshift_release_info>_` with the name of the RPM package from the previous step. For example, `microshift-release-info-4.17.10-202408291007.p0.g6e4ee4d.assembly.4.17.10.el9.noarch.rpm`.
+
.Example output
[source,terminal]
@@ -94,7 +93,7 @@ $ cat ./usr/share/microshift/release/release-x86_64.json
----
{
"release": {
"base": "4.16.10"
"base": "4.17.10"
},
"images": {
"cli": "....

View File

@@ -28,7 +28,7 @@ Image mode for {op-system-base} is Technology Preview. Using a bootc image in pr
----
FROM registry.redhat.io/rhel9/rhel-bootc:9.4
ARG USHIFT_VER=4.16
ARG USHIFT_VER=4.17
RUN dnf config-manager \
--set-enabled rhocp-${USHIFT_VER}-for-rhel-9-$(uname -m)-rpms \
--set-enabled fast-datapath-for-rhel-9-$(uname -m)-rpms
@@ -77,7 +77,7 @@ Podman uses the host subscription information and repositories inside the contai
----
PULL_SECRET=~/.pull-secret.json
USER_PASSWD=<your_redhat_user_password> # <1>
IMAGE_NAME=microshift-4.16-bootc
IMAGE_NAME=microshift-4.17-bootc
$ sudo podman build --authfile "${PULL_SECRET}" -t "${IMAGE_NAME}" \
--build-arg USER_PASSWD="${USER_PASSWD}" \
@@ -106,5 +106,5 @@ $ sudo podman images "${IMAGE_NAME}"
[source,text]
----
REPOSITORY TAG IMAGE ID CREATED SIZE
localhost/microshift-4.16-bootc latest 193425283c00 2 minutes ago 2.31 GB
localhost/microshift-4.17-bootc latest 193425283c00 2 minutes ago 2.31 GB
----

View File

@@ -38,7 +38,7 @@ $ find /lib/modules/$(uname -r) -name "openvswitch*"
+
[source,termimal]
----
$ IMAGE_NAME=microshift-4.16-bootc
$ IMAGE_NAME=microshift-4.17-bootc
----
. Check the version of the kernel-core `package` used in the bootc image by running the following command:

View File

@@ -31,12 +31,12 @@ $ sudo podman login "${REGISTRY_URL}" # <1>
[source,terminal]
----
REGISTRY_IMG=<myorg/mypath>/"${IMAGE_NAME}" # <1> <2>
IMAGE_NAME=<microshift-4.16-bootc> # <3>
IMAGE_NAME=<microshift-4.17-bootc> # <3>
$ sudo podman push localhost/"${IMAGE_NAME}" "${REGISTRY_URL}/${REGISTRY_IMG}"
----
<1> Replace _<myorg/mypath>_ with your remote registry organization name and path.
<2> Replace _<microshift-4.16-bootc>_ with the name of the image you want to publish.
<2> Replace _<microshift-4.17-bootc>_ with the name of the image you want to publish.
.Verification

View File

@@ -21,7 +21,7 @@ Run your {microshift-short} bootc container to explore its reduced complexity an
[source,terminal]
----
PULL_SECRET=~/.pull-secret.json
IMAGE_NAME=microshift-4.16-bootc
IMAGE_NAME=microshift-4.17-bootc
$ sudo podman run --rm -it --privileged \
-v "${PULL_SECRET}":/etc/crio/openshift-pull-secret:ro \

View File

@@ -43,10 +43,10 @@ storageConfig:
mirror:
platform: # <1>
channels:
- name: stable-4.16
- name: stable-4.17
type: ocp
operators:
- catalog: registry.redhat.io/redhat/redhat-operator-index:v4.16
- catalog: registry.redhat.io/redhat/redhat-operator-index:v4.17
packages:
- name: serverless-operator
channels:
@@ -72,7 +72,7 @@ storageConfig: <1>
skipTLS: false
mirror:
operators:
- catalog: registry.redhat.io/redhat/redhat-operator-index:v4.16 <3>
- catalog: registry.redhat.io/redhat/redhat-operator-index:v4.17 <3>
packages:
- name: amq-broker-rhel8 <4>
channels:

View File

@@ -27,14 +27,14 @@ For catalogs made for proprietary Operators, you can format image references for
+
[source,terminal]
----
jq -r --slurp '.[] | select(.relatedImages != null) | "[[containers]]\nsource = \"" + .relatedImages[].image + "\"\n"' ./oc-mirror-workspace/src/catalogs/registry.redhat.io/redhat/redhat-operator-index/v4.16/index/index.json
jq -r --slurp '.[] | select(.relatedImages != null) | "[[containers]]\nsource = \"" + .relatedImages[].image + "\"\n"' ./oc-mirror-workspace/src/catalogs/registry.redhat.io/redhat/redhat-operator-index/v4.17/index/index.json
----
.. If you want to filter out images that cannot be mirrored, filter and parse the catalog `index.json` file by running the following command:
+
[source,terminal]
----
$ jq -r --slurp '.[] | select(.relatedImages != null) | .relatedImages[] | select(.name | contains("ppc") or contains("s390x") | not) | "[[containers]]\\nsource = \\"" + .image + "\\"\\n"' ./oc-mirror-workspace/src/catalogs/registry.redhat.io/redhat/redhat-operator-index/v4.16/index/index.json
$ jq -r --slurp '.[] | select(.relatedImages != null) | .relatedImages[] | select(.name | contains("ppc") or contains("s390x") | not) | "[[containers]]\\nsource = \\"" + .image + "\\"\\n"' ./oc-mirror-workspace/src/catalogs/registry.redhat.io/redhat/redhat-operator-index/v4.17/index/index.json
----
+
[NOTE]
@@ -84,21 +84,21 @@ storageConfig:
imageURL: registry.example.com/microshift-mirror
mirror:
operators:
- catalog: registry.redhat.io/redhat/redhat-operator-index:v4.16 <1>
- catalog: registry.redhat.io/redhat/redhat-operator-index:v4.17 <1>
packages:
- name: amq-broker-rhel8
channels:
- name: 7.11.x
----
<1> Use the value in the `mirror.catalog` catalog image reference for the follwing `jq` command to get the image digest. In this example, _<registry.redhat.io/redhat/redhat-operator-index:v4.16>_.
<1> Use the value in the `mirror.catalog` catalog image reference for the follwing `jq` command to get the image digest. In this example, _<registry.redhat.io/redhat/redhat-operator-index:v4.17>_.
. Get the SHA of the catalog index image by running the following command:
+
[source,terminal]
----
$ skopeo inspect docker://<registry.redhat.io/redhat/redhat-operator-index:v4.16> | jq `.Digest` <1>
$ skopeo inspect docker://<registry.redhat.io/redhat/redhat-operator-index:v4.17> | jq `.Digest` <1>
----
<1> Use the value in the `mirror.catalog` catalog image reference for the `jq` command to get the image digest. In this example, _<registry.redhat.io/redhat/redhat-operator-index:v4.16>_.
<1> Use the value in the `mirror.catalog` catalog image reference for the `jq` command to get the image digest. In this example, _<registry.redhat.io/redhat/redhat-operator-index:v4.17>_.
+
.Example output
[source,terminal]
@@ -120,14 +120,14 @@ source = "registry.redhat.io/redhat/redhat-operator-index@sha256:7a76c0880a83903
[source,text]
----
name = "microshift_blueprint"
description = "MicroShift 4.16.1 on x86_64 platform"
description = "MicroShift 4.17.1 on x86_64 platform"
version = "0.0.1"
modules = []
groups = []
[[packages]] <1>
name = "microshift"
version = "4.16.1"
version = "4.17.1"
...
...

View File

@@ -32,7 +32,7 @@ metadata:
namespace: openshift-marketplace <1>
spec:
sourceType: grpc
image: registry.example.com/redhat/redhat-operator-index:v4.16
image: registry.example.com/redhat/redhat-operator-index:v4.17
updateStrategy:
registryPoll:
interval: 60m

View File

@@ -42,13 +42,13 @@ $ oc mirror list operators <--catalog=<catalog_source>> <1>
+
[source,terminal]
----
$ oc mirror list operators --catalog=registry.redhat.io/redhat/redhat-operator-index:v4.16 --package=amq-broker-rhel8
$ oc mirror list operators --catalog=registry.redhat.io/redhat/redhat-operator-index:v4.17 --package=amq-broker-rhel8
----
.. Get a list of versions within a channel by running the following command:
+
[source,terminal]
----
$ oc mirror list operators --catalog=registry.redhat.io/redhat/redhat-operator-index:v4.16 --package=amq-broker-rhel8 --channel=7.11.x
$ oc mirror list operators --catalog=registry.redhat.io/redhat/redhat-operator-index:v4.17 --package=amq-broker-rhel8 --channel=7.11.x
----
.Next steps

View File

@@ -26,7 +26,7 @@ metadata:
name: cs-redhat-operator-index
namespace: openshift-marketplace <1>
spec:
image: registry.example.com/redhat/redhat-operator-index:v4.16
image: registry.example.com/redhat/redhat-operator-index:v4.17
sourceType: grpc
displayName:
publisher:

View File

@@ -32,9 +32,9 @@ Check the following update paths:
*{product-title} update paths*
* Generally Available Version 4.16.0 to 4.16.z on {op-system-ostree} 9.4
* Generally Available Version 4.17.1 to 4.17.z on {op-system-ostree} 9.4
* Generally Available Version 4.15.0 from {op-system-base} 9.2 to 4.16.0 on {op-system-base} 9.4
* Generally Available Version 4.14.0 from {op-system-base} 9.2 to 4.16.0 on {op-system-base} 9.4
* Generally Available Version 4.14.0 from {op-system-base} 9.2 to 4.15.0 on {op-system-base} 9.4
[id="microshift-ostree-update-failed_{context}"]
== OSTree update failed

View File

@@ -16,7 +16,7 @@ remote cluster:: A source or destination cluster for a migration that runs Veler
You must use the compatible {mtc-short} version for migrating your {product-title} clusters. For the migration to succeed, both your source cluster and the destination cluster must use the same version of {mtc-short}.
{mtc-short} 1.7 supports migrations from {product-title} 3.11 to 4.16.
{mtc-short} 1.7 supports migrations from {product-title} 3.11 to 4.17.
{mtc-short} 1.8 only supports migrations from {product-title} 4.14 and later.

View File

@@ -37,7 +37,7 @@ New installations use SQLite by default. Before upgrading to version 2.0, see "U
[id="mirror-registry-release-notes-1-3_{context}"]
== Mirror registry for Red{nbsp}Hat OpenShift 1.3 release notes
To view the _mirror registry for Red{nbsp}Hat OpenShift_ 1.3 release notes, see link:https://docs.openshift.com/container-platform/4.16/installing/disconnected_install/installing-mirroring-creating-registry.html#mirror-registry-release-notes-1-3_installing-mirroring-creating-registry[Mirror registry for Red{nbsp}Hat OpenShift 1.3 release notes].
To view the _mirror registry for Red{nbsp}Hat OpenShift_ 1.3 release notes, see link:https://docs.openshift.com/container-platform/4.17/installing/disconnected_install/installing-mirroring-creating-registry.html#mirror-registry-release-notes-1-3_installing-mirroring-creating-registry[Mirror registry for Red{nbsp}Hat OpenShift 1.3 release notes].
[id="mirror-registry-release-notes-1-2_{context}"]
== Mirror registry for Red{nbsp}Hat OpenShift 1.2 release notes

View File

@@ -19,7 +19,7 @@ Before you set a `NodePort`-type `Service` for each project, read the following
* You installed the {oc-first}.
* Logged in as a user with `cluster-admin` privileges.
* You created a wildcard DNS record.
// https://docs.openshift.com/container-platform/4.16/networking/ingress-controller-dnsmgt.html (does not detail how to create the DNS)
// https://docs.openshift.com/container-platform/4.17/networking/ingress-controller-dnsmgt.html (does not detail how to create the DNS)
.Procedure

View File

@@ -6,7 +6,7 @@
[id="nw-ptp-wpc-hardware-pins-reference_{context}"]
= Intel Westport Channel E810 hardware configuration reference
Use this information to understand how to use the link:https://github.com/openshift/linuxptp-daemon/blob/release-4.16/addons/intel/e810.go[Intel E810-XXVDA4T hardware plugin] to configure the E810 network interface as PTP grandmaster clock.
Use this information to understand how to use the link:https://github.com/openshift/linuxptp-daemon/blob/release-4.17/addons/intel/e810.go[Intel E810-XXVDA4T hardware plugin] to configure the E810 network interface as PTP grandmaster clock.
Hardware pin configuration determines how the network interface interacts with other components and devices in the system.
The E810-XXVDA4T NIC has four connectors for external 1PPS signals: `SMA1`, `SMA2`, `U.FL1`, and `U.FL2`.

View File

@@ -25,7 +25,7 @@ If you do not use Google workload identity federation cloud authentication, cont
.Prerequisites
* You have installed a cluster in manual mode with link:https://docs.openshift.com/container-platform/4.16/installing/installing_gcp/installing-gcp-customizations.html#installing-gcp-with-short-term-creds_installing-gcp-customizations[GCP Workload Identity configured].
* You have installed a cluster in manual mode with link:https://docs.openshift.com/container-platform/4.17/installing/installing_gcp/installing-gcp-customizations.html#installing-gcp-with-short-term-creds_installing-gcp-customizations[GCP Workload Identity configured].
* You have access to the Cloud Credential Operator utility (`ccoctl`) and to the associated workload identity pool.
.Procedure

View File

@@ -129,7 +129,7 @@ operators:
|`mirror.operators.catalog`
|The Operator catalog to include in the image set.
|String. For example: `registry.redhat.io/redhat/redhat-operator-index:v4.16`.
|String. For example: `registry.redhat.io/redhat/redhat-operator-index:v4.17`.
|`mirror.operators.full`
|When `true`, downloads the full catalog, Operator package, or Operator channel.
@@ -158,7 +158,7 @@ operators:
|`mirror.operators.packages.channels.name`
|The Operator channel name, unique within a package, to include in the image set.
|String. For example: `fast` or `stable-v4.16`.
|String. For example: `fast` or `stable-v4.17`.
|`mirror.operators.packages.channels.maxVersion`
|The highest version of the Operator mirror across all channels in which it exists. See the following note for further information.
@@ -238,7 +238,7 @@ channels:
|`mirror.platform.channels.name`
|The name of the release channel.
|String. For example: `stable-4.16`
|String. For example: `stable-4.17`
|`mirror.platform.channels.minVersion`
|The minimum version of the referenced platform to be mirrored.
@@ -246,7 +246,7 @@ channels:
|`mirror.platform.channels.maxVersion`
|The highest version of the referenced platform to be mirrored.
|String. For example: `4.16.1`
|String. For example: `4.17.1`
|`mirror.platform.channels.shortestPath`
|Toggles shortest path mirroring or full range mirroring.

View File

@@ -13,7 +13,7 @@ You can access the *Administrator* and *Developer* perspective from the web cons
To access a perspective, ensure that you have logged in to the web console. Your default perspective is automatically determined by the permission of the users. The *Administrator* perspective is selected for users with access to all projects, while the *Developer* perspective is selected for users with limited access to their own projects
.Additional resources
See link:https://docs.openshift.com/container-platform/4.16/web_console/adding-user-preferences.html[Adding User Preferences] for more information on changing perspectives.
See link:https://docs.openshift.com/container-platform/4.17/web_console/adding-user-preferences.html[Adding User Preferences] for more information on changing perspectives.
.Procedure

View File

@@ -14,18 +14,18 @@ endif::[]
Operator compatibility with the underlying cluster can be expressed by a catalog source in various ways. One way, which is used for the default Red Hat-provided catalog sources, is to identify image tags for index images that are specifically created for a particular platform release, for example {product-title} {product-version}.
During a cluster upgrade, the index image tag for the default Red Hat-provided catalog sources are updated automatically by the Cluster Version Operator (CVO) so that Operator Lifecycle Manager (OLM) pulls the updated version of the catalog. For example during an upgrade from {product-title} 4.15 to 4.16, the `spec.image` field in the `CatalogSource` object for the `redhat-operators` catalog is updated from:
During a cluster upgrade, the index image tag for the default Red Hat-provided catalog sources are updated automatically by the Cluster Version Operator (CVO) so that Operator Lifecycle Manager (OLM) pulls the updated version of the catalog. For example during an upgrade from {product-title} 4.16 to 4.17, the `spec.image` field in the `CatalogSource` object for the `redhat-operators` catalog is updated from:
[source,terminal]
----
registry.redhat.io/redhat/redhat-operator-index:v4.15
registry.redhat.io/redhat/redhat-operator-index:v4.16
----
to:
[source,terminal]
----
registry.redhat.io/redhat/redhat-operator-index:v4.16
registry.redhat.io/redhat/redhat-operator-index:v4.17
----
However, the CVO does not automatically update image tags for custom catalogs. To ensure users are left with a compatible and supported Operator installation after a cluster upgrade, custom catalogs should also be kept updated to reference an updated index image.

View File

@@ -99,7 +99,7 @@ Spec:
Source:
Image:
Pull Secret: redhat-cred
Ref: registry.redhat.io/redhat/redhat-operator-index:v4.16
Ref: registry.redhat.io/redhat/redhat-operator-index:v4.17
Type: image
Status: <1>
Conditions:
@@ -114,7 +114,7 @@ Status: <1>
Resolved Source:
Image:
Last Poll Attempt: 2024-06-10T17:35:10Z
Ref: registry.redhat.io/redhat/redhat-operator-index:v4.16
Ref: registry.redhat.io/redhat/redhat-operator-index:v4.17
Resolved Ref: registry.redhat.io/redhat/redhat-operator-index@sha256:f2ccc079b5e490a50db532d1dc38fd659322594dcf3e653d650ead0e862029d9 <4>
Type: image
Events: <none>

View File

@@ -305,7 +305,7 @@ Tags that are added by Red{nbsp}Hat are required for clusters to stay in complia
====
|--version string
|The version of ROSA that will be used to install the cluster or cluster resources. For `cluster` use an `X.Y.Z` format, for example, `4.16.0`. For `account-role` use an `X.Y` format, for example, `4.16`.
|The version of ROSA that will be used to install the cluster or cluster resources. For `cluster` use an `X.Y.Z` format, for example, `4.17.0`. For `account-role` use an `X.Y` format, for example, `4.17`.
|--worker-iam-role string
|The ARN of the IAM role that will be attached to compute instances.

View File

@@ -75,9 +75,9 @@ ifdef::hcp[]
----
I: Fetching account roles
ROLE NAME ROLE TYPE ROLE ARN OPENSHIFT VERSION AWS Managed
ManagedOpenShift-HCP-ROSA-Installer-Role Installer arn:aws:iam::<aws_account_id>:role/ManagedOpenShift-HCP-ROSA-Installer-Role 4.16 Yes
ManagedOpenShift-HCP-ROSA-Support-Role Support arn:aws:iam::<aws_account_id>:role/ManagedOpenShift-HCP-ROSA-Support-Role 4.16 Yes
ManagedOpenShift-HCP-ROSA-Worker-Role Worker arn:aws:iam::<aws_account_id>:role/ManagedOpenShift-HCP-ROSA-Worker-Role 4.16 Yes
ManagedOpenShift-HCP-ROSA-Installer-Role Installer arn:aws:iam::<aws_account_id>:role/ManagedOpenShift-HCP-ROSA-Installer-Role 4.17 Yes
ManagedOpenShift-HCP-ROSA-Support-Role Support arn:aws:iam::<aws_account_id>:role/ManagedOpenShift-HCP-ROSA-Support-Role 4.17 Yes
ManagedOpenShift-HCP-ROSA-Worker-Role Worker arn:aws:iam::<aws_account_id>:role/ManagedOpenShift-HCP-ROSA-Worker-Role 4.17 Yes
----
endif::hcp[]
.. Delete the account-wide roles:

View File

@@ -40,7 +40,7 @@ Display Name: test_cluster
ID: <cluster_id> <1>
External ID: <external_id>
Control Plane: ROSA Service Hosted
OpenShift Version: 4.16.0
OpenShift Version: 4.17.0
Channel Group: stable
DNS: test_cluster.l3cn.p3.openshiftapps.com
AWS Account: <AWS_id>

View File

@@ -56,7 +56,7 @@ Display Name: rosa-ext-test
ID: <cluster_id>
External ID: <cluster_ext_id>
Control Plane: ROSA Service Hosted
OpenShift Version: 4.16.3
OpenShift Version: 4.17.0
Channel Group: stable
DNS: <dns>
AWS Account: <AWS_id>

View File

@@ -11,7 +11,7 @@ This section lists the `aws` CLI commands that the `rosa` command generates in t
[id="rosa-sts-account-wide-role-and-policy-aws-cli-manual-mode_{context}"]
== Using manual mode for account role creation
The manual role creation mode generates the `aws` commands for you to review and run. The following command starts that process, where `<openshift_version>` refers to your version of {product-title} (ROSA), such as `4.16`.
The manual role creation mode generates the `aws` commands for you to review and run. The following command starts that process, where `<openshift_version>` refers to your version of {product-title} (ROSA), such as `4.17`.
[source,terminal]
----

View File

@@ -7,7 +7,7 @@
This section provides details about the account-wide IAM roles and policies that are required for ROSA deployments that use STS, including the Operator policies. It also includes the JSON files that define the policies.
The account-wide roles and policies are specific to an OpenShift minor release version, for example OpenShift 4.16, and are backward compatible. You can minimize the required STS resources by reusing the account-wide roles and policies for multiple clusters of the same minor version, regardless of their patch version.
The account-wide roles and policies are specific to an OpenShift minor release version, for example OpenShift 4.17, and are backward compatible. You can minimize the required STS resources by reusing the account-wide roles and policies for multiple clusters of the same minor version, regardless of their patch version.
[id="rosa-sts-account-wide-roles-and-policies-creation-methods_{context}"]
== Methods of account-wide role creation

View File

@@ -251,7 +251,7 @@ While the ROSA (`rosa`) CLI offers a permission boundary function, it applies to
+
[source,terminal]
----
$ curl -o ./rosa-installer-core.json https://raw.githubusercontent.com/openshift/managed-cluster-config/master/resources/sts/4.16/sts_installer_core_permission_boundary_policy.json
$ curl -o ./rosa-installer-core.json https://raw.githubusercontent.com/openshift/managed-cluster-config/master/resources/sts/4.17/sts_installer_core_permission_boundary_policy.json
----
. Create the policy in AWS and gather its Amazon Resource Name (ARN) by entering the following command:

View File

@@ -224,7 +224,7 @@ Any optional fields can be left empty and a default will be selected.
? Create cluster admin user: Yes <2>
? Username: user-admin <2>
? Password: [? for help] *************** <2>
? OpenShift version: 4.16.0 <3>
? OpenShift version: 4.17.0 <3>
? Configure the use of IMDSv2 for ec2 instances optional/required (optional): <4>
I: Using arn:aws:iam::<aws_account_id>:role/ManagedOpenShift-Installer-Role for the Installer role <5>
I: Using arn:aws:iam::<aws_account_id>:role/ManagedOpenShift-ControlPlane-Role for the ControlPlane role
@@ -254,7 +254,7 @@ I: Using arn:aws:iam::<aws_account_id>:role/ManagedOpenShift-Support-Role for th
? Disable Workload monitoring (optional): No
I: Creating cluster '<cluster_name>'
I: To create this cluster again in the future, you can run:
rosa create cluster --cluster-name <cluster_name> --role-arn arn:aws:iam::<aws_account_id>:role/ManagedOpenShift-Installer-Role --support-role-arn arn:aws:iam::<aws_account_id>:role/ManagedOpenShift-Support-Role --master-iam-role arn:aws:iam::<aws_account_id>:role/ManagedOpenShift-ControlPlane-Role --worker-iam-role arn:aws:iam::<aws_account_id>:role/ManagedOpenShift-Worker-Role --operator-roles-prefix <cluster_name>-<random_string> --region us-east-1 --version 4.16.0 --additional-compute-security-group-ids sg-0e375ff0ec4a6cfa2 --additional-infra-security-group-ids sg-0e375ff0ec4a6cfa2 --additional-control-plane-security-group-ids sg-0e375ff0ec4a6cfa2 --replicas 2 --machine-cidr 10.0.0.0/16 --service-cidr 172.30.0.0/16 --pod-cidr 10.128.0.0/14 --host-prefix 23 <14>
rosa create cluster --cluster-name <cluster_name> --role-arn arn:aws:iam::<aws_account_id>:role/ManagedOpenShift-Installer-Role --support-role-arn arn:aws:iam::<aws_account_id>:role/ManagedOpenShift-Support-Role --master-iam-role arn:aws:iam::<aws_account_id>:role/ManagedOpenShift-ControlPlane-Role --worker-iam-role arn:aws:iam::<aws_account_id>:role/ManagedOpenShift-Worker-Role --operator-roles-prefix <cluster_name>-<random_string> --region us-east-1 --version 4.17.0 --additional-compute-security-group-ids sg-0e375ff0ec4a6cfa2 --additional-infra-security-group-ids sg-0e375ff0ec4a6cfa2 --additional-control-plane-security-group-ids sg-0e375ff0ec4a6cfa2 --replicas 2 --machine-cidr 10.0.0.0/16 --service-cidr 172.30.0.0/16 --pod-cidr 10.128.0.0/14 --host-prefix 23 <14>
I: To view a list of clusters and their status, run 'rosa list clusters'
I: Cluster '<cluster_name>' has been created.
I: Once the cluster is installed you will need to add an Identity Provider before you can login into the cluster. See 'rosa create idp --help' for more information.
@@ -262,7 +262,7 @@ I: Once the cluster is installed you will need to add an Identity Provider befor
----
<1> Optional. When creating your cluster, you can customize the subdomain for your cluster on `*.openshiftapps.com` using the `--domain-prefix` flag. The value for this flag must be unique within your organization, cannot be longer than 15 characters, and cannot be changed after cluster creation. If the flag is not supplied, an autogenerated value is created that depends on the length of the cluster name. If the cluster name is fewer than or equal to 15 characters, that name is used for the domain prefix. If the cluster name is longer than 15 characters, the domain prefix is randomly generated to a 15 character string.
<2> When creating your cluster, you can create a local administrator user for your cluster. Selecting `Yes` then prompts you to create a user name and password for the cluster admin. The user name must not contain `/`, `:`, or `%`. The password must be at least 14 characters (ASCII-standard) without whitespaces. This process automatically configures an htpasswd identity provider.
<3> When creating the cluster, the listed `OpenShift version` options include the major, minor, and patch versions, for example `4.16.0`.
<3> When creating the cluster, the listed `OpenShift version` options include the major, minor, and patch versions, for example `4.17.0`.
<4> Optional: Specify 'optional' to configure all EC2 instances to use both v1 and v2 endpoints of EC2 Instance Metadata Service (IMDS). This is the default value. Specify 'required' to configure all EC2 instances to use IMDSv2 only.
+
ifdef::openshift-rosa[]

View File

@@ -14,7 +14,7 @@ The default value is 4,096 in {product-title} 4.11 and later. This value is cont
* Maximum number of PIDs per node.
+
The default value depends on link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.16/html-single/nodes/index#nodes-nodes-resources-configuring[node resources]. In {product-title}, this value is controlled by the link:https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#system-reserved[`--system-reserved`] parameter, which reserves PIDs on each node based on the total resources of the node.
The default value depends on link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.17/html-single/nodes/index#nodes-nodes-resources-configuring[node resources]. In {product-title}, this value is controlled by the link:https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#system-reserved[`--system-reserved`] parameter, which reserves PIDs on each node based on the total resources of the node.
When a pod exceeds the allowed maximum number of PIDs per pod, the pod might stop functioning correctly and might be evicted from the node. See link:https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction/#eviction-signals-and-thresholds[the Kubernetes documentation for eviction signals and thresholds] for more information.

View File

@@ -45,7 +45,7 @@ Both `http` and `event` trigger functions have the same template structure:
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.16</version>
<version>4.17</version>
<scope>test</scope>
</dependency>
<dependency>

View File

@@ -11,12 +11,12 @@ New in this release::
Description::
The link:https://docs.openshift.com/container-platform/4.16/rest_api/node_apis/performanceprofile-performance-openshift-io-v2.html#spec-workloadhints[Performance Profile] can be used to configure a cluster in a high power, low power, or mixed mode.
The link:https://docs.openshift.com/container-platform/4.17/rest_api/node_apis/performanceprofile-performance-openshift-io-v2.html#spec-workloadhints[Performance Profile] can be used to configure a cluster in a high power, low power, or mixed mode.
The choice of power mode depends on the characteristics of the workloads running on the cluster, particularly how sensitive they are to latency.
Configure the maximum latency for a low-latency pod by using the per-pod power management C-states feature.
+
For more information, see link:https://docs.openshift.com/container-platform/4.16/scalability_and_performance/low_latency_tuning/cnf-tuning-low-latency-nodes-with-perf-profile.html#cnf-configuring-power-saving-for-nodes_cnf-low-latency-perf-profile[Configuring power saving for nodes].
For more information, see link:https://docs.openshift.com/container-platform/4.17/scalability_and_performance/low_latency_tuning/cnf-tuning-low-latency-nodes-with-perf-profile.html#cnf-configuring-power-saving-for-nodes_cnf-low-latency-perf-profile[Configuring power saving for nodes].
Limits and requirements::
* Power configuration relies on appropriate BIOS configuration, for example, enabling C-states and P-states. Configuration varies between hardware vendors.

View File

@@ -24,7 +24,7 @@ $ mkdir -p ./out
+
[source,terminal]
----
$ podman run -it registry.redhat.io/openshift4/telco-core-rds-rhel9:v4.16 | base64 -d | tar xv -C out
$ podman run -it registry.redhat.io/openshift4/telco-core-rds-rhel9:v4.17 | base64 -d | tar xv -C out
----
.Verification

View File

@@ -12,12 +12,12 @@ New in this release::
Description::
* The scheduler is a cluster-wide component responsible for selecting the right node for a given workload. It is a core part of the platform and does not require any specific configuration in the common deployment scenarios. However, there are few specific use cases described in the following section.
NUMA-aware scheduling can be enabled through the NUMA Resources Operator.
For more information, see link:https://docs.openshift.com/container-platform/4.16/scalability_and_performance/cnf-numa-aware-scheduling.html[Scheduling NUMA-aware workloads].
For more information, see link:https://docs.openshift.com/container-platform/4.17/scalability_and_performance/cnf-numa-aware-scheduling.html[Scheduling NUMA-aware workloads].
Limits and requirements::
* The default scheduler does not understand the NUMA locality of workloads. It only knows about the sum of all free resources on a worker node. This might cause workloads to be rejected when scheduled to a node with https://docs.openshift.com/container-platform/4.16/scalability_and_performance/using-cpu-manager.html#topology_manager_policies_using-cpu-manager-and-topology_manager[Topology manager policy] set to `single-numa-node` or `restricted`.
* The default scheduler does not understand the NUMA locality of workloads. It only knows about the sum of all free resources on a worker node. This might cause workloads to be rejected when scheduled to a node with https://docs.openshift.com/container-platform/4.17/scalability_and_performance/using-cpu-manager.html#topology_manager_policies_using-cpu-manager-and-topology_manager[Topology manager policy] set to `single-numa-node` or `restricted`.
** For example, consider a pod requesting 6 CPUs and being scheduled to an empty node that has 4 CPUs per NUMA node. The total allocatable capacity of the node is 8 CPUs and the scheduler will place the pod there. The node local admission will fail, however, as there are only 4 CPUs available in each of the NUMA nodes.
** All clusters with multi-NUMA nodes are required to use the https://docs.openshift.com/container-platform/4.16/scalability_and_performance/cnf-numa-aware-scheduling.html#installing-the-numa-resources-operator_numa-aware[NUMA Resources Operator]. The `machineConfigPoolSelector` of the NUMA Resources Operator must select all nodes where NUMA aligned scheduling is needed.
** All clusters with multi-NUMA nodes are required to use the https://docs.openshift.com/container-platform/4.17/scalability_and_performance/cnf-numa-aware-scheduling.html#installing-the-numa-resources-operator_numa-aware[NUMA Resources Operator]. The `machineConfigPoolSelector` of the NUMA Resources Operator must select all nodes where NUMA aligned scheduling is needed.
* All machine config pools must have consistent hardware configuration for example all nodes are expected to have the same NUMA zone count.
Engineering considerations::

View File

@@ -18,7 +18,7 @@ SR-IOV enables physical network interfaces (PFs) to be divided into multiple vir
Limits and requirements::
* The network interface controllers supported are listed in link:https://docs.openshift.com/container-platform/4.16/networking/hardware_networks/about-sriov.html#supported-devices_about-sriov[Supported devices]
* The network interface controllers supported are listed in link:https://docs.openshift.com/container-platform/4.17/networking/hardware_networks/about-sriov.html#supported-devices_about-sriov[Supported devices]
* SR-IOV and IOMMU enablement in BIOS: The SR-IOV Network Operator automatically enables IOMMU on the kernel command line.
* SR-IOV VFs do not receive link state updates from PF. If link down detection is needed, it must be done at the protocol level.
* `MultiNetworkPolicy` CRs can be applied to `netdevice` networks only.

View File

@@ -11,7 +11,7 @@ New in this release::
Description::
Configure system level performance.
See link:https://docs.openshift.com/container-platform/4.16/scalability_and_performance/ztp_far_edge/ztp-reference-cluster-configuration-for-vdu.html#ztp-du-configuring-host-firmware-requirements_sno-configure-for-vdu[Configuring host firmware for low latency and high performance] for recommended settings.
See link:https://docs.openshift.com/container-platform/4.17/scalability_and_performance/ztp_far_edge/ztp-reference-cluster-configuration-for-vdu.html#ztp-du-configuring-host-firmware-requirements_sno-configure-for-vdu[Configuring host firmware for low latency and high performance] for recommended settings.
+
If Ironic inspection is enabled, the firmware setting values are available from the per-cluster `BareMetalHost` CR on the hub cluster.
You enable Ironic inspection with a label in the `spec.clusters.nodes` field in the `SiteConfig` CR that you use to install the cluster.

View File

@@ -72,7 +72,7 @@ Variation must still meet the specified limits.
* Hardware without IRQ affinity support impacts isolated CPUs.
To ensure that pods with guaranteed whole CPU QoS have full use of the allocated CPU, all hardware in the server must support IRQ affinity.
For more information, see link:https://docs.openshift.com/container-platform/4.16/scalability_and_performance/cnf-low-latency-tuning.html#about_irq_affinity_setting_cnf-master[About support of IRQ affinity setting].
For more information, see link:https://docs.openshift.com/container-platform/4.17/scalability_and_performance/cnf-low-latency-tuning.html#about_irq_affinity_setting_cnf-master[About support of IRQ affinity setting].
:FeatureName: cgroup v1
include::snippets/deprecated-feature.adoc[]

View File

@@ -12,7 +12,7 @@ New in this release::
* You can configure the `linuxptp` services `ptp4l` and `phc2sys` as a highly available (HA) system clock for dual PTP boundary clocks (T-BC).
Description::
See link:https://docs.openshift.com/container-platform/4.16/scalability_and_performance/ztp_far_edge/ztp-reference-cluster-configuration-for-vdu.html#ztp-sno-du-configuring-ptp_sno-configure-for-vdu[PTP timing] for details of support and configuration of PTP in cluster nodes.
See link:https://docs.openshift.com/container-platform/4.17/scalability_and_performance/ztp_far_edge/ztp-reference-cluster-configuration-for-vdu.html#ztp-sno-du-configuring-ptp_sno-configure-for-vdu[PTP timing] for details of support and configuration of PTP in cluster nodes.
The DU node can run in the following modes:
+
* As an ordinary clock (OC) synced to a grandmaster clock or boundary clock (T-BC)
@@ -27,7 +27,7 @@ The DU node can run in the following modes:
+
--
Events and metrics for grandmaster clocks are a Tech Preview feature added in the 4.14 {rds} RDS. For more information see link:https://docs.openshift.com/container-platform/4.16/networking/ptp/using-ptp-events.html[Using the PTP hardware fast event notifications framework].
Events and metrics for grandmaster clocks are a Tech Preview feature added in the 4.14 {rds} RDS. For more information see link:https://docs.openshift.com/container-platform/4.17/networking/ptp/using-ptp-events.html[Using the PTP hardware fast event notifications framework].
You can subscribe applications to PTP events that happen on the node where the DU application is running.
--

View File

@@ -9,7 +9,7 @@
[NOTE]
====
Before {product-title} 4.16, unauthenticated groups were allowed access to some cluster roles. Clusters updated from versions before {product-title} 4.16 retain this access for unauthenticated groups.
Before {product-title} 4.17, unauthenticated groups were allowed access to some cluster roles. Clusters updated from versions before {product-title} 4.17 retain this access for unauthenticated groups.
====
For security reasons {product-title} {product-version} does not allow unauthenticated groups to have default access to cluster roles.

View File

@@ -6,7 +6,7 @@
[id="update-preparing-ack_{context}"]
= Providing the administrator acknowledgment
After you have evaluated your cluster for any removed APIs and have migrated any removed APIs, you can acknowledge that your cluster is ready to upgrade from {product-title} 4.15 to 4.16.
After you have evaluated your cluster for any removed APIs and have migrated any removed APIs, you can acknowledge that your cluster is ready to upgrade from {product-title} 4.16 to 4.17.
[WARNING]
====
@@ -19,9 +19,9 @@ Be aware that all responsibility falls on the administrator to ensure that all u
.Procedure
* Run the following command to acknowledge that you have completed the evaluation and your cluster is ready for the Kubernetes API removals in {product-title} 4.16:
* Run the following command to acknowledge that you have completed the evaluation and your cluster is ready for the Kubernetes API removals in {product-title} 4.17:
+
[source,terminal]
----
$ oc -n openshift-config patch cm admin-acks --patch '{"data":{"ack-4.15-kube-1.29-api-removals-in-4.16":"true"}}' --type=merge
$ oc -n openshift-config patch cm admin-acks --patch '{"data":{"ack-4.15-kube-1.29-api-removals-in-4.17":"true"}}' --type=merge
----

View File

@@ -37,7 +37,7 @@ From the {oci} web console, you must create the following resources:
|Specify the base domain of the cluster, such as `splat-oci.devcluster.openshift.com`. Provided you previously created a compartment on {oci}, you can get this information by going to *DNS management* -> *Zones* -> *List scope* and then selecting the parent compartment. Your base domain should show under the *Public zones* tab.
|*OpenShift version*
| Specify `OpenShift 4.16` or a later version.
| Specify `OpenShift 4.17` or a later version.
|*CPU architecture*
| Specify `x86_64` or `Arm64`.

View File

@@ -63,8 +63,8 @@ $ oc get co
[source,terminal]
----
NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE MESSAGE
authentication 4.16.0-0 True False False 6m18s
baremetal 4.16.0-0 True False False 2m42s
network 4.16.0-0 True True False 5m58s Progressing: …
authentication 4.17.0-0 True False False 6m18s
baremetal 4.17.0-0 True False False 2m42s
network 4.17.0-0 True True False 5m58s Progressing: …
----

View File

@@ -167,11 +167,11 @@ data:
status.failureReason: "" # <2>
status.startTimestamp: "2023-07-31T13:14:38Z" # <3>
status.completionTimestamp: "2023-07-31T13:19:41Z" # <4>
status.result.cnvVersion: 4.16.2 # <5>
status.result.cnvVersion: 4.17.2 # <5>
status.result.defaultStorageClass: trident-nfs <6>
status.result.goldenImagesNoDataSource: <data_import_cron_list> # <7>
status.result.goldenImagesNotUpToDate: <data_import_cron_list> # <8>
status.result.ocpVersion: 4.16.0 # <9>
status.result.ocpVersion: 4.17.0 # <9>
status.result.pvcBound: "true" # <10>
status.result.storageProfileMissingVolumeSnapshotClass: <storage_class_list> # <11>
status.result.storageProfilesWithEmptyClaimPropertySets: <storage_profile_list> # <12>

View File

@@ -16,4 +16,4 @@ For more information about using the Metro-DR solution for {rh-storage} with {Vi
[role="_additional-resources-dr"]
.Additional resources
* link:https://docs.redhat.com/en/documentation/red_hat_openshift_data_foundation/4.16/html/configuring_openshift_data_foundation_disaster_recovery_for_openshift_workloads/index[Configuring OpenShift Data Foundation Disaster Recovery for OpenShift Workloads]
* link:https://docs.redhat.com/en/documentation/red_hat_openshift_data_foundation/4.17/html/configuring_openshift_data_foundation_disaster_recovery_for_openshift_workloads/index[Configuring OpenShift Data Foundation Disaster Recovery for OpenShift Workloads]

View File

@@ -83,7 +83,7 @@ spec:
containers:
- name: wasp-agent
image: >-
registry.redhat.io/container-native-virtualization/wasp-agent-rhel9:v4.16
registry.redhat.io/container-native-virtualization/wasp-agent-rhel9:v4.17
imagePullPolicy: Always
env:
- name: "FSROOT"

View File

@@ -16,8 +16,8 @@ spec:
- spoke6
ibuSpec:
seedImageRef: <2>
image: quay.io/seed/image:4.16.0-rc.1
version: 4.16.0-rc.1
image: quay.io/seed/image:4.17.0-rc.1
version: 4.17.0-rc.1
pullSecretRef:
name: "<seed_pull_secret>"
extraManifests: <3>

View File

@@ -16,7 +16,7 @@ This procedure is specific to the link:https://github.com/openshift/aws-efs-csi-
{product-title} is capable of provisioning persistent volumes (PVs) using the link:https://github.com/openshift/aws-efs-csi-driver[AWS EFS CSI driver].
Familiarity with link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.16/html-single/storage/index#persistent-storage-overview_understanding-persistent-storage[persistent storage] and link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.16/html-single/storage/index#persistent-storage-csi[configuring CSI volumes] is recommended when working with a CSI Operator and driver.
Familiarity with link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.17/html-single/storage/index#persistent-storage-overview_understanding-persistent-storage[persistent storage] and link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.17/html-single/storage/index#persistent-storage-csi[configuring CSI volumes] is recommended when working with a CSI Operator and driver.
After installing the AWS EFS CSI Driver Operator, {product-title} installs the AWS EFS CSI Operator and the AWS EFS CSI driver by default in the `openshift-cluster-csi-drivers` namespace. This allows the AWS EFS CSI Driver Operator to create CSI-provisioned PVs that mount to AWS EFS assets.
@@ -87,5 +87,5 @@ include::modules/persistent-storage-csi-olm-operator-uninstall.adoc[leveloffset=
[role="_additional-resources"]
== Additional resources
* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.16/html-single/storage/index#persistent-storage-csi[Configuring CSI volumes]
* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.17/html-single/storage/index#persistent-storage-csi[Configuring CSI volumes]

View File

@@ -16,7 +16,7 @@ This procedure is specific to the Amazon Web Services Elastic File System (AWS E
{product-title} is capable of provisioning persistent volumes (PVs) using the Container Storage Interface (CSI) driver for AWS Elastic File Service (EFS).
Familiarity with link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.16/html-single/storage/index#persistent-storage-overview_understanding-persistent-storage[persistent storage] and link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.16/html-single/storage/index#persistent-storage-csi[configuring CSI volumes] is recommended when working with a CSI Operator and driver.
Familiarity with link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.17/html-single/storage/index#persistent-storage-overview_understanding-persistent-storage[persistent storage] and link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.17/html-single/storage/index#persistent-storage-csi[configuring CSI volumes] is recommended when working with a CSI Operator and driver.
After installing the AWS EFS CSI Driver Operator, {product-title} installs the AWS EFS CSI Operator and the AWS EFS CSI driver by default in the `openshift-cluster-csi-drivers` namespace. This allows the AWS EFS CSI Driver Operator to create CSI-provisioned PVs that mount to AWS EFS assets.
@@ -51,7 +51,7 @@ include::modules/persistent-storage-csi-efs-sts.adoc[leveloffset=+1]
* xref:../../storage/persistent_storage/rosa-persistent-storage-aws-efs-csi.adoc#persistent-storage-csi-olm-operator-install_rosa-persistent-storage-aws-efs-csi[Installing the AWS EFS CSI Driver Operator]
* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.16/html-single/authentication_and_authorization/index#cco-ccoctl-configuring_cco-mode-sts[Configuring the Cloud Credential Operator utility]
* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.17/html-single/authentication_and_authorization/index#cco-ccoctl-configuring_cco-mode-sts[Configuring the Cloud Credential Operator utility]
:StorageClass: AWS EFS
:Provisioner: efs.csi.aws.com
@@ -80,5 +80,5 @@ include::modules/persistent-storage-csi-olm-operator-uninstall.adoc[leveloffset=
[role="_additional-resources"]
== Additional resources
* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.16/html-single/storage/index#persistent-storage-csi[Configuring CSI volumes]
* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.17/html-single/storage/index#persistent-storage-csi[Configuring CSI volumes]

View File

@@ -1,6 +1,6 @@
:_mod-docs-content-type: ASSEMBLY
[id="updating-cluster-prepare"]
= Preparing to update to {product-title} 4.16
= Preparing to update to {product-title} 4.17
include::_attributes/common-attributes.adoc[]
:context: updating-cluster-prepare

View File

@@ -39,7 +39,7 @@ Collecting data about your environment minimizes the time required to analyze an
// must-gather not supported for ROSA/OSD, per Dustin Row
ifndef::openshift-rosa,openshift-dedicated[]
. xref:../../support/gathering-cluster-data.adoc#support_gathering_data_gathering-cluster-data[Collect must-gather data for the cluster].
. link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.16/html-single/troubleshooting_openshift_data_foundation/index#downloading-log-files-and-diagnostic-information_rhodf[Collect must-gather data for {rh-storage-first}], if necessary.
. link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.17/html-single/troubleshooting_openshift_data_foundation/index#downloading-log-files-and-diagnostic-information_rhodf[Collect must-gather data for {rh-storage-first}], if necessary.
. xref:../../virt/support/virt-collecting-virt-data.adoc#virt-using-virt-must-gather_virt-collecting-virt-data[Collect must-gather data for {VirtProductName}].
. xref:../../observability/monitoring/managing-metrics.adoc#querying-metrics-for-all-projects-as-an-administrator_managing-metrics[Collect Prometheus metrics for the cluster].
endif::openshift-rosa,openshift-dedicated[]