From bdfd58adfea6c4176396b801dfe98dc22f6ba3f6 Mon Sep 17 00:00:00 2001 From: dfitzmau Date: Fri, 28 Feb 2025 09:36:43 +0000 Subject: [PATCH] OSDOCS-13469-19: Release notes file template --- _topic_maps/_topic_map.yml | 10 +- .../planning-migration-3-4.adoc | 2 +- release_notes/addtl-release-notes.adoc | 4 +- release_notes/ocp-4-18-release-notes.adoc | 3019 ----------------- release_notes/ocp-4-19-release-notes.adoc | 1600 +++++++++ ...otes.adoc => virt-4-19-release-notes.adoc} | 4 +- 6 files changed, 1610 insertions(+), 3029 deletions(-) delete mode 100644 release_notes/ocp-4-18-release-notes.adoc create mode 100644 release_notes/ocp-4-19-release-notes.adoc rename virt/release_notes/{virt-4-18-release-notes.adoc => virt-4-19-release-notes.adoc} (99%) diff --git a/_topic_maps/_topic_map.yml b/_topic_maps/_topic_map.yml index 9f5d39f79f..6f1ad62413 100644 --- a/_topic_maps/_topic_map.yml +++ b/_topic_maps/_topic_map.yml @@ -54,8 +54,8 @@ Name: Release notes Dir: release_notes Distros: openshift-enterprise Topics: -- Name: OpenShift Container Platform 4.18 release notes - File: ocp-4-18-release-notes +- Name: OpenShift Container Platform 4.19 release notes + File: ocp-4-19-release-notes - Name: Additional release notes File: addtl-release-notes --- @@ -748,10 +748,10 @@ Topics: - Name: Preparing to update a cluster Dir: preparing_for_updates Topics: - - Name: Preparing to update to OpenShift Container Platform 4.17 + - Name: Preparing to update to OpenShift Container Platform 4.19 File: updating-cluster-prepare Distros: openshift-enterprise - - Name: Preparing to update to OKD 4.17 + - Name: Preparing to update to OKD 4.19 File: updating-cluster-prepare Distros: openshift-origin - Name: Preparing to update a cluster with manually maintained credentials @@ -4514,7 +4514,7 @@ Topics: Distros: openshift-enterprise Topics: - Name: OpenShift Virtualization release notes - File: virt-4-18-release-notes + File: virt-4-19-release-notes - Name: Getting started Dir: getting_started Topics: diff --git a/migrating_from_ocp_3_to_4/planning-migration-3-4.adoc b/migrating_from_ocp_3_to_4/planning-migration-3-4.adoc index 5125e81f18..3249d4260d 100644 --- a/migrating_from_ocp_3_to_4/planning-migration-3-4.adoc +++ b/migrating_from_ocp_3_to_4/planning-migration-3-4.adoc @@ -9,7 +9,7 @@ toc::[] {product-title} {product-version} introduces architectural changes and enhancements/ The procedures that you used to manage your {product-title} 3 cluster might not apply to {product-title} 4. ifndef::openshift-origin[] -For information on configuring your {product-title} 4 cluster, review the appropriate sections of the {product-title} documentation. For information on new features and other notable technical changes, review the xref:../release_notes/ocp-4-18-release-notes.adoc#ocp-4-18-release-notes[OpenShift Container Platform {product-version} release notes]. +For information on configuring your {product-title} 4 cluster, review the appropriate sections of the {product-title} documentation. For information on new features and other notable technical changes, review the xref:../release_notes/ocp-4-19-release-notes.adoc#ocp-4-19-release-notes[OpenShift Container Platform {product-version} release notes]. endif::[] It is not possible to upgrade your existing {product-title} 3 cluster to {product-title} 4. You must start with a new {product-title} 4 installation. Tools are available to assist in migrating your control plane settings and application workloads. diff --git a/release_notes/addtl-release-notes.adoc b/release_notes/addtl-release-notes.adoc index 5b52611fe0..2573b0a480 100644 --- a/release_notes/addtl-release-notes.adoc +++ b/release_notes/addtl-release-notes.adoc @@ -8,7 +8,7 @@ include::_attributes/servicebinding-document-attributes.adoc[] toc::[] -Release notes for additional related components and products not included in the core xref:../release_notes/ocp-4-18-release-notes.adoc#ocp-4-18-release-notes[{product-title} {product-version} release notes] are available in the following documentation. +Release notes for additional related components and products not included in the core xref:../release_notes/ocp-4-19-release-notes.adoc#ocp-4-19-release-notes[{product-title} {product-version} release notes] are available in the following documentation. [IMPORTANT] ==== @@ -74,7 +74,7 @@ xref:../service_mesh/v2x/servicemesh-release-notes.adoc#service-mesh-release-not {nbsp} + link:https://docs.openshift.com/service-mesh/3.0.0tp1/ossm-release-notes/ossm-release-notes-assembly.html[{SMProductName} 3.x] + {nbsp} + -xref:../virt/release_notes/virt-4-18-release-notes.adoc#virt-4-18-release-notes[Red Hat {VirtProductName}] + +xref:../virt/release_notes/virt-4-19-release-notes.adoc#virt-4-19-release-notes[Red Hat {VirtProductName}] + {nbsp} + xref:../observability/otel/otel-rn.adoc#otel-rn[{OTELName}] diff --git a/release_notes/ocp-4-18-release-notes.adoc b/release_notes/ocp-4-18-release-notes.adoc deleted file mode 100644 index e41c9e740a..0000000000 --- a/release_notes/ocp-4-18-release-notes.adoc +++ /dev/null @@ -1,3019 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="ocp-4-18-release-notes"] -= {product-title} {product-version} release notes -include::_attributes/common-attributes.adoc[] -:context: release-notes - -toc::[] - -Red{nbsp}Hat {product-title} provides developers and IT organizations with a hybrid cloud application platform for deploying both new and existing applications on secure, scalable resources with minimal configuration and management. {product-title} supports a wide selection of programming languages and frameworks, such as Java, JavaScript, Python, Ruby, and PHP. - -Built on {op-system-base-full} and Kubernetes, {product-title} provides a more secure and scalable multitenant operating system for today's enterprise-class applications, while delivering integrated application runtimes and libraries. {product-title} enables organizations to meet security, privacy, compliance, and governance requirements. - -[id="ocp-4-18-about-this-release_{context}"] -== About this release - -// TODO: Update with the relevant information closer to release. -{product-title} (link:https://access.redhat.com/errata/RHSA-2024:6122[RHSA-2024:6122]) is now available. This release uses link:https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.31.md[Kubernetes 1.31] with CRI-O runtime. New features, changes, and known issues that pertain to {product-title} {product-version} are included in this topic. - -{product-title} {product-version} clusters are available at https://console.redhat.com/openshift. From the {hybrid-console}, you can deploy {product-title} clusters to either on-premises or cloud environments. - -// Double check OP system versions -{product-title} {product-version} is supported on {op-system-base-full} 8.8 and a later version of {op-system-base} 8 that is released before End of Life of {product-title} {product-version}. {product-title} {product-version} is also supported on {op-system-first}. To understand {op-system-base} versions used by {op-system}, see link:https://access.redhat.com/articles/6907891[{op-system-base} Versions Utilized by {op-system-first} and {product-title}] (Knowledgebase article). - -You must use {op-system} machines for the control plane, and you can use either {op-system} or {op-system-base} for compute machines. {op-system-base} machines are deprecated in {product-title} 4.16 and will be removed in a future release. -//Removed the note per https://issues.redhat.com/browse/GRPA-3517 - -//Even-numbered release lifecycle verbiage (Comment in for even-numbered releases) -Starting from {product-title} 4.14, the Extended Update Support (EUS) phase for even-numbered releases increases the total available lifecycle to 24 months on all supported architectures, including `x86_64`, 64-bit ARM (`aarch64`), {ibm-power-name} (`ppc64le`), and {ibm-z-name} (`s390x`) architectures. Beyond this, Red{nbsp}Hat also offers a 12-month additional EUS add-on, denoted as _Additional EUS Term 2_, that extends the total available lifecycle from 24 months to 36 months. The Additional EUS Term 2 is available on all architecture variants of {product-title}. For more information about support for all versions, see the link:https://access.redhat.com/support/policy/updates/openshift[Red Hat {product-title} Life Cycle Policy]. - -//Odd-numbered release lifecycle verbiage (Comment in for odd-numbered releases) -//// -The support lifecycle for odd-numbered releases, such as {product-title} {product-version}, on all supported architectures, including `x86_64`, 64-bit ARM (`aarch64`), {ibm-power-name} (`ppc64le`), and {ibm-z-name} (`s390x`) architectures is 18 months. For more information about support for all versions, see the link:https://access.redhat.com/support/policy/updates/openshift[Red Hat {product-title} Life Cycle Policy]. -//// - -Commencing with the {product-title} 4.14 release, Red{nbsp}Hat is simplifying the administration and management of Red{nbsp}Hat shipped cluster Operators with the introduction of three new life cycle classifications; Platform Aligned, Platform Agnostic, and Rolling Stream. These life cycle classifications provide additional ease and transparency for cluster administrators to understand the life cycle policies of each Operator and form cluster maintenance and upgrade plans with predictable support boundaries. For more information, see link:https://access.redhat.com/webassets/avalon/j/includes/session/scribe/?redirectTo=https%3A%2F%2Faccess.redhat.com%2Fsupport%2Fpolicy%2Fupdates%2Fopenshift_operators[OpenShift Operator Life Cycles]. - -// Added in 4.14. Language came directly from Kirsten Newcomer. -{product-title} is designed for FIPS. When running {op-system-base-full} or {op-system-first} booted in FIPS mode, {product-title} core components use the {op-system-base} cryptographic libraries that have been submitted to NIST for FIPS 140-2/140-3 Validation on only the `x86_64`, `ppc64le`, and `s390x` architectures. - -For more information about the NIST validation program, see link:https://csrc.nist.gov/Projects/cryptographic-module-validation-program/validated-modules[Cryptographic Module Validation Program]. For the latest NIST status for the individual versions of {op-system-base} cryptographic libraries that have been submitted for validation, see link:https://access.redhat.com/articles/2918071#fips-140-2-and-fips-140-3-2[Compliance Activities and Government Standards]. - -[id="ocp-4-18-add-on-support-status_{context}"] -== {product-title} layered and dependent component support and compatibility - -The scope of support for layered and dependent components of {product-title} changes independently of the {product-title} version. To determine the current support status and compatibility for an add-on, refer to its release notes. For more information, see the link:https://access.redhat.com/support/policy/updates/openshift[Red Hat {product-title} Life Cycle Policy]. - -[id="ocp-4-18-new-features-and-enhancements_{context}"] -== New features and enhancements - -This release adds improvements related to the following components and concepts: - -[id="ocp-release-notes-auth_{context}"] -=== Authentication and authorization - -[id="ocp-release-notes-auth-ccoctl-rotation_{context}"] -==== Rotating OIDC bound service account signer keys - -With this release, you can use the Cloud Credential Operator (CCO) utility (`ccoctl`) to rotate the OpenID Connect (OIDC) bound service account signer key for clusters installed on the following cloud providers: - -* xref:../post_installation_configuration/changing-cloud-credentials-configuration.adoc#rotating-bound-service-keys_key-rotation-aws[{aws-first} with {sts-first}] -* xref:../post_installation_configuration/changing-cloud-credentials-configuration.adoc#rotating-bound-service-keys_key-rotation-gcp[{gcp-first} with {gcp-wid-short}] -* xref:../post_installation_configuration/changing-cloud-credentials-configuration.adoc#rotating-bound-service-keys_key-rotation-azure[{azure-first} with {entra-short}] - -[id="ocp-release-notes-backup-restore_{context}"] -=== Backup and restore - -[id="ocp-4-18-hibernating_{context}"] -==== Hibernating a cluster for up to 90 days - -With this release, you can now hibernate your {product-title} cluster for up to 90 days and expect the cluster to recover successfully. Before this release, you could only hibernate for up to 30 days. - -For more information, see xref:../backup_and_restore/hibernating-cluster.adoc#hibernating-cluster[Hibernating an {product-title} cluster]. - -[id="ocp-4-18-etcd-backup-restore_{context}"] -==== Enhanced etcd backup and restore documentation - -The etcd disaster recovery documentation was updated and simplified for quicker recovery of the cluster, both in a normal disaster recovery situation and in cases where a full cluster restoration from a previous backup is not necessary. - -Two scripts, `quorum-restore.sh` and `cluster-restore.sh`, are introduced to complete many of the steps in the recovery procedure. - -In addition, a procedure was added to more quickly recover the cluster when at least one good node exists. If any of the surviving nodes meets specific criteria, you can use it to run the recovery. - -For more information, see xref:../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/about-disaster-recovery.adoc#about-dr[About disaster recovery]. - -//// -[id="ocp-release-notes-builds_{context}"] -=== Builds - -[id="ocp-release-notes-cro_{context}"] -=== Cluster Resource Override Admission Operator -//// - -[id="ocp-release-notes-edge-computing_{context}"] -=== Edge computing - -[id="ocp-4-18-edge-computing-sno-shutdown_{context}"] -==== Shutting down and restarting {sno} clusters up to 1 year after cluster installation - -With this release, you can shut down and restart {sno} clusters up to 1 year after cluster installation. If certificates expired while the cluster was shut down, you must approve certificate signing requests (CSRs) upon restarting the cluster. - -Before this update, you could shut down and restart {sno} clusters for only 120 days after cluster installation. - -[IMPORTANT] -==== -Evacuate all workload pods from the {sno} cluster before you shut it down. -==== - -For more information, see xref:../backup_and_restore/graceful-cluster-shutdown.adoc#graceful-shutdown-cluster[Shutting down the cluster gracefully]. - -[id="ocp-release-notes-extensions_{context}"] -=== Extensions ({olmv1}) - -[id="ocp-release-notes-extensions-olmv1-ga_{context}"] -==== {olmv1-first} (General Availability) - -Operator Lifecycle Manager (OLM) has been included with {product-title} 4 since its initial release and has helped enable and grow a substantial ecosystem of solutions and advanced workloads running as Operators. - -{product-title} {product-version} introduces _{olmv1}_, the next-generation Operator Lifecycle Manager, as a General Availability (GA) feature, designed to improve how you manage Operators on {product-title}. - -With {olmv1} now generally available, starting in {product-title} {product-version}, the existing version of OLM that has been included since the launch of {product-title} 4 is now known as _{olmv0}_. - -Previously available as a Technology Preview feature only, the updated framework in {olmv1} evolves many of the concepts that have been part of {olmv0} by simplifying Operator management, enhancing security, and boosting reliability. - -[IMPORTANT] -==== -* Starting in {product-title} {product-version}, {olmv1} is now enabled by default, alongside {olmv0}. {olmv1} is a xref:../installing/overview/cluster-capabilities.adoc#cluster-operators-ref-olmv1_cluster-capabilities[cluster capability] that administrators can optionally disable before installation of {product-title}. -* {olmv0} remains fully supported throughout the {product-title} 4 lifecycle. -==== - -Simplified API:: -{olmv1} simplifies Operator management with a new, user-friendly API: xref:../extensions/arch/operator-controller.adoc#olmv1-clusterextension-api_operator-controller[the `ClusterExtension` object]. By managing Operators as integral extensions of the cluster, {olmv1} caters to the special lifecycle requirements of custom resource definition (CRDs). This design aligns more closely with Kubernetes principles, treating Operators, which consist of custom controllers and CRDs, as cluster-wide singletons. -+ -{product-title} continues to give you access to the latest Operator packages, patches, and updates through default xref:../extensions/catalogs/rh-catalogs.adoc#rh-catalogs[Red Hat Operator catalogs], which are enabled by default for {olmv1} in {product-title} {product-version}. With {olmv1}, you can install an Operator package by creating and applying a `ClusterExtension` API object in your cluster. By interacting with `ClusterExtension` objects, you can manage the lifecycle of Operator packages, quickly understand their status, and troubleshoot issues. - -Streamlined declarative workflows:: -Leveraging the simplified API, you can define your desired Operator states in a declarative way and, when integrating with tools like Git and Zero Touch Provisioning, let {olmv1} automatically maintain those states. This minimizes human error and unlocks a wider range of use cases. - -Uninterrupted operations with continuous reconciliation and optional rollbacks:: -{olmv1} enhances reliability through continuous reconciliation. Rather than relying on single attempts, {olmv1} proactively addresses Operator installation and update failures, automatically retrying until the issue is resolved. This eliminates the manual steps previously required, such as deleting `InstallPlan` API objects, and greatly simplifies the resolution of off-cluster issues, such as missing container images or catalog problems. -+ -In addition, {olmv1} provides optional rollbacks, allowing you to revert Operator version updates under specific conditions after carefully assessing any potential risks. - -Granular update control for deployments:: -With granular update control, you can select a specific Operator version or define a range of acceptable versions. For example, if you have tested and approved version `1.2.3` of an Operator in a stage environment, instead of hoping the latest version works just as well in production, you can use version pinning. By specifying `1.2.3` as the desired version, you can ensure that is the exact version that will be deployed for a safe and predictable update. -+ -Alternatively, automatic z-stream updates provide a seamless and secure experience by automatically applying security fixes without manual intervention, minimizing operational disruptions. - -Enhanced security with user-provided service accounts:: -{olmv1} prioritizes security by minimizing its permission requirements and providing greater control over access. By using xref:../extensions/ce/managing-ce.adoc#olmv1-cluster-extension-permissions_managing-ce[user-provided `ServiceAccount` objects for Operator lifecycle operations], {olmv1} access is restricted to only the necessary permissions, significantly reducing the control plane attack surface and improving overall security. In this way, {olmv1} adopts a least-privilege model to minimize the impact of a compromise. - -[NOTE] -==== -The documentation for {olmv1} exists as a stand-alone guide called xref:../extensions/index.adoc#olmv1-about[Extensions]. Previously, {olmv1} documentation was a subsection of the xref:../operators/index.adoc#operators-overview[Operators] guide, which otherwise documents the {olmv0} feature set. - -The updated location and guide name reflect a more focused documentation experience and aims to differentiate between {olmv1} and {olmv0}. -==== - -[id="ocp-4-18-extensions-supported-extensions_{context}"] -==== {olmv1} supported extensions - -include::snippets/olmv1-tp-extension-support.adoc[] - -[id="ocp-4-18-extensions-disconnected_{context}"] -==== Disconnected environment support in {olmv1} - -To support cluster administrators that prioritize high security by running their clusters in internet-disconnected environments, especially for mission-critical production workloads, {olmv1} supports these disconnected environments, starting in {product-title} {product-version}. - -After using the oc-mirror plugin for the {oc-first} to mirror the images required for your cluster to a mirror registry in your fully or partially disconnected environments, {olmv1} can function properly in these environments by utilizing the sets of resources generated by either oc-mirror plugin v1 or v2. - -For more information, see xref:../extensions/catalogs/disconnected-catalogs.adoc#disconnected-catalogs[Disconnected environment support in {olmv1}]. - -[id="ocp-4-18-extensions-catalog-selection_{context}"] -==== Improved catalog selection in {olmv1} - -With this release, you can perform the following actions to control the selection of catalog content when you install or update a cluster extension: - -* Specify labels to select the catalog -* Use match expressions to filter across catalogs -* Set catalog priority - -For more information, see xref:../extensions/catalogs/catalog-content-resolution.adoc#catalog-content-resolution[Catalog content resolution]. - -[id="ocp-4-18-extensions-proxy-and-trustedCA_{context}"] -==== Basic support for proxied environments and trusted CA certificates - -With this release, Operator Controller and catalogd can now run in proxied environments and include basic support for trusted CA certificates. - -[id="ocp-4-18-extensions-maxopenshiftversion_{context}"] -==== Compatibility with {product-title} versions - -Before cluster administrators can update their {product-title} cluster to its next minor version, they must ensure that all installed Operators are updated to a bundle version that is compatible with the next minor version (4.y+1) of a cluster. - -Starting in {product-title} {product-version}, {olmv1} supports the `olm.maxOpenShiftVersion` annotation in the cluster service version (CSV) of an Operator, similar to the behavior in {olmv0}, to prevent administrators from updating the cluster before updating the installed Operator to a compatible version. - -For more information, see xref:../extensions/ce/update-paths.adoc#olmv1-ocp-compat_update-paths[Compatibility with {product-title} versions]. - -[id="ocp-4-18-extensions-user-access-resources_{context}"] -==== User access to extension resources - -After a cluster extension has been installed and is being managed by {olmv1-first}, the extension can often provide `CustomResourceDefinition` objects (CRDs) that expose new API resources on the cluster. Cluster administrators typically have full management access to these resources by default, whereas non-cluster administrator users, or _regular users_, might lack sufficient permissions. - -{olmv1} does not automatically configure or manage role-based access control (RBAC) for regular users to interact with the APIs provided by installed extensions. Cluster administrators must define the required RBAC policy to create, view, or edit these custom resources (CRs) for such users. - -For more information, see xref:../extensions/ce/user-access-resources.adoc#user-access-resources[User access to extension resources]. - -[id="ocp-4-18-extensions-sigstore_{context}"] -==== Runtime validation of container images using sigstore signatures in {olmv1} (Technology Preview) - -Starting in {product-title} {product-version}, {olmv1} support for handling runtime validation of sigstore signatures for container images is available as a Technology Preview (TP) feature. - -[id="ocp-4-18-extensions-known-issues_{context}"] -==== {olmv1} known issues - -include::snippets/olmv1-operator-conditions-support.adoc[] - -[id="ocp-4-18-deprecation-of-siteconfig-v1_{context}"] -==== Deprecation of SiteConfig v1 - -SiteConfig v1 is deprecated starting with {product-title} {product-version}. Equivalent and improved functionality is now available through the SiteConfig Operator using the `ClusterInstance` custom resource. For more information, see the Red{nbsp}Hat Knowledge Base solution link:https://access.redhat.com/articles/7105238[Procedure to transition from SiteConfig CRs to the ClusterInstance API]. - -For more information about the SiteConfig Operator, see link:https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_management_for_kubernetes/2.12/html-single/multicluster_engine_operator_with_red_hat_advanced_cluster_management/index#siteconfig-intro[SiteConfig]. - -[id="ocp-release-notes-hcp_{context}"] -=== Hosted control planes - -Because {hcp} releases asynchronously from {product-title}, it has its own release notes. For more information, see xref:../hosted_control_planes/hosted-control-planes-release-notes.adoc#hosted-control-planes-release-notes[{hcp-capital} release notes]. - -[id="ocp-release-notes-ibm-power_{context}"] -=== {ibm-power-title} - -The {ibm-power-name} release on {product-title} {product-version} adds improvements and new capabilities to {product-title} components. - -This release introduces support for the following features on {ibm-power-title}: - -* Added four new data centers to PowerVS Installer Provisioned Infrastructure deployments -* Adding compute nodes to on-premise clusters using {oc-first} - -[id="ocp-release-notes-ibm-z_{context}"] -=== {ibm-z-title} and {ibm-linuxone-title} - -With this release, {ibm-z-name} and {ibm-linuxone-name} are now compatible with {product-title} {product-version}. You can perform the installation with z/VM, LPAR, or {op-system-base-full} Kernel-based Virtual Machine (KVM). For installation instructions, see -xref:../installing/installing_ibm_z/preparing-to-install-on-ibm-z.adoc#preparing-to-install-on-ibm-z[Installation methods]. - -[IMPORTANT] -==== -Compute nodes must run {op-system-first}. -==== - -[discrete] -[id="ocp-4-18-ibm-z-enhancements_{context}"] -==== {ibm-z-title} and {ibm-linuxone-title} notable enhancements - -The {ibm-z-name} and {ibm-linuxone-name} release on {product-title} {product-version} adds improvements and new capabilities to {product-title} components and concepts. - -This release introduces support for the following features on {ibm-z-name} and {ibm-linuxone-name}: - -* Adding compute nodes to on-premise clusters using {oc-first} - -[discrete] -[id="ocp-release-notes-ibm-z-power-support-matrix_{context}"] -=== {ibm-power-title}, {ibm-z-title}, and {ibm-linuxone-title} support matrix -Starting in {product-title} 4.14, Extended Update Support (EUS) is extended to the {ibm-power-name} and the {ibm-z-name} platform. For more information, see the link:https://access.redhat.com/support/policy/updates/openshift-eus[OpenShift EUS Overview]. - -.{product-title} features -[cols="3,1,1",options="header"] -|==== -|Feature |{ibm-power-name} |{ibm-z-name} and {ibm-linuxone-name} - -|Adding compute nodes to on-premise clusters using {oc-first} -|Supported -|Supported - -|Alternate authentication providers -|Supported -|Supported - -|Agent-based Installer -|Supported -|Supported - -|Assisted Installer -|Supported -|Supported - -|Automatic Device Discovery with Local Storage Operator -|Unsupported -|Supported - -|Automatic repair of damaged machines with machine health checking -|Unsupported -|Unsupported - -|Cloud controller manager for {ibm-cloud-name} -|Supported -|Unsupported - -|Controlling overcommit and managing container density on nodes -|Unsupported -|Unsupported - -|CPU manager -|Supported -|Supported - -|Cron jobs -|Supported -|Supported - -|Descheduler -|Supported -|Supported - -|Egress IP -|Supported -|Supported - -|Encrypting data stored in etcd -|Supported -|Supported - -|FIPS cryptography -|Supported -|Supported - -|Helm -|Supported -|Supported - -|Horizontal pod autoscaling -|Supported -|Supported - -|Hosted control planes -|Supported -|Supported - -|IBM Secure Execution -|Unsupported -|Supported - -|Installer-provisioned Infrastructure Enablement for {ibm-power-server-name} -|Supported -|Unsupported - -|Installing on a single node -|Supported -|Supported - -|IPv6 -|Supported -|Supported - -|Monitoring for user-defined projects -|Supported -|Supported - -|Multi-architecture compute nodes -|Supported -|Supported - -|Multi-architecture control plane -|Supported -|Supported - -|Multipathing -|Supported -|Supported - -|Network-Bound Disk Encryption - External Tang Server -|Supported -|Supported - -|Non-volatile memory express drives (NVMe) -|Supported -|Unsupported - -|nx-gzip for Power10 (Hardware Acceleration) -|Supported -|Unsupported - -|oc-mirror plugin -|Supported -|Supported - -|OpenShift CLI (`oc`) plugins -|Supported -|Supported - -|Operator API -|Supported -|Supported - -|OpenShift Virtualization -|Unsupported -|Supported - -|OVN-Kubernetes, including IPsec encryption -|Supported -|Supported - -|PodDisruptionBudget -|Supported -|Supported - -|Precision Time Protocol (PTP) hardware -|Unsupported -|Unsupported - -|{openshift-local-productname} -|Unsupported -|Unsupported - -|Scheduler profiles -|Supported -|Supported - -|Secure Boot -|Unsupported -|Supported - -|Stream Control Transmission Protocol (SCTP) -|Supported -|Supported - -|Support for multiple network interfaces -|Supported -|Supported - -|The `openshift-install` utility to support various SMT levels on {ibm-power-name} (Hardware Acceleration) -|Supported -|Supported - -|Three-node cluster support -|Supported -|Supported - -|Topology Manager -|Supported -|Unsupported - -|z/VM Emulated FBA devices on SCSI disks -|Unsupported -|Supported - -|4K FCP block device -|Supported -|Supported -|==== - -.Persistent storage options -[cols="2,1,1",options="header"] -|==== -|Feature |{ibm-power-name} |{ibm-z-name} and {ibm-linuxone-name} -|Persistent storage using iSCSI -|Supported ^[1]^ -|Supported ^[1]^,^[2]^ - -|Persistent storage using local volumes (LSO) -|Supported ^[1]^ -|Supported ^[1]^,^[2]^ - -|Persistent storage using hostPath -|Supported ^[1]^ -|Supported ^[1]^,^[2]^ - -|Persistent storage using Fibre Channel -|Supported ^[1]^ -|Supported ^[1]^,^[2]^ - -|Persistent storage using Raw Block -|Supported ^[1]^ -|Supported ^[1]^,^[2]^ - -|Persistent storage using EDEV/FBA -|Supported ^[1]^ -|Supported ^[1]^,^[2]^ -|==== -[.small] --- -1. Persistent shared storage must be provisioned by using either {rh-storage-first} or other supported storage protocols. -2. Persistent non-shared storage must be provisioned by using local storage, such as iSCSI, FC, or by using LSO with DASD, FCP, or EDEV/FBA. --- - -.Operators -[cols="2,1,1",options="header"] -|==== -|Feature |{ibm-power-name} |{ibm-z-name} and {ibm-linuxone-name} - -|{cert-manager-operator} -|Supported -|Supported - -|Cluster Logging Operator -|Supported -|Supported - -|Cluster Resource Override Operator -|Supported -|Supported - -|Compliance Operator -|Supported -|Supported - -|Cost Management Metrics Operator -|Supported -|Supported - -|File Integrity Operator -|Supported -|Supported - -|HyperShift Operator -|Supported -|Supported - -|{ibm-power-server-name} Block CSI Driver Operator -|Supported -|Unsupported - -|Ingress Node Firewall Operator -|Supported -|Supported - -|Local Storage Operator -|Supported -|Supported - -|MetalLB Operator -|Supported -|Supported - -|Network Observability Operator -|Supported -|Supported - -|NFD Operator -|Supported -|Supported - -|NMState Operator -|Supported -|Supported - -|OpenShift Elasticsearch Operator -|Supported -|Supported - -|Vertical Pod Autoscaler Operator -|Supported -|Supported -|==== - -.Multus CNI plugins -[cols="2,1,1",options="header"] -|==== -|Feature |{ibm-power-name} |{ibm-z-name} and {ibm-linuxone-name} - -|Bridge -|Supported -|Supported - -|Host-device -|Supported -|Supported - -|IPAM -|Supported -|Supported - -|IPVLAN -|Supported -|Supported -|==== - -.CSI Volumes -[cols="2,1,1",options="header"] -|==== -|Feature |{ibm-power-name} |{ibm-z-name} and {ibm-linuxone-name} - -|Cloning -|Supported -|Supported - -|Expansion -|Supported -|Supported - -|Snapshot -|Supported -|Supported -|==== - -[id="ocp-release-notes-insights-operator-enhancements_{context}"] -=== Insights Operator - -[id="ocp-release-notes-insights-operator-runtime-extractor_{context}"] -==== Insights Runtime Extractor (Technology Preview) - -In this release, the Insights Operator introduces the workload data collection _Insights Runtime Extractor_ feature to help Red{nbsp}Hat better understand the workload of your containers. Available as a Technology Preview, the Insights Runtime Extractor feature gathers runtime workload data and sends it to Red{nbsp}Hat. Red{nbsp}Hat uses the collected runtime workload data to gain insights that can help you make investment decisions that will drive and optimize how you use your {product-title} containers. For more information, see xref:../nodes/clusters/nodes-cluster-enabling-features.adoc#nodes-cluster-enabling[_Enabling features using feature gates_]. - -[id="ocp-release-notes-insights-operator-rapid-recommendations_{context}"] -==== Rapid Recommendations - -In this release, enhancements have been made to the Rapid Recommendations mechanism for remotely configuring the rules that determine the data that the Insights Operator collects. - -The Rapid Recommendations feature is version-independent, and builds on the existing conditional data gathering mechanism. - - -The Insights Operator connects to a secure remote endpoint service running on _console.redhat.com_ to retrieve definitions that contain the rules for determining which container log messages are filtered and collected by Red{nbsp}Hat. - -The conditional data-gathering definitions get configured through an attribute named `conditionalGathererEndpoint` in the link:https://github.com/openshift/insights-operator/blob/master/config/pod.yaml[`pod.yml`] configuration file. - -[source,bash] ----- -conditionalGathererEndpoint: https://console.redhat.com/api/gathering/v2/%s/gathering_rules ----- - -[NOTE] -==== -In earlier iterations, the rules for determining the data that the Insights Operator collects were hard-coded and tied to the corresponding {product-title} version. -==== - -The preconfigured endpoint URL now provides a placeholder (`%s`) for defining a target version of {product-title}. - -[id="ocp-release-notes-insights-operator-more_data_collected_and_recommendations_added_{context}"] -==== More data collected and recommendations added - -The Insights Operator now gathers more data to detect the following scenarios, which other applications can use to generate remedial recommendations to proactively manage your {product-title} deployments: - -// Engineering reference: CCXDEV-14148 -* Collects resources from the `nmstate.io/v1` API group. - -// Engineering reference: CCXDEV-14521 -* Collects data from `clusterrole.rbac.authorization.k8s.io/v1` instances. - - -[id="ocp-release-notes-installation-and-update_{context}"] -=== Installation and update - -[id="ocp-4-18-installation-cluster-api-provider-ibmcloud_{context}"] -==== New version of the {cap-ibm-short} - -The installation program now uses a newer version of the {cap-ibm-short} provider that includes Transit Gateway fixes. Because of the cost of Transit Gateways in {ibm-cloud-title}, you can now use the {product-title} to create a Transit Gateway when creating an {product-title} cluster. For more information, see (link:https://issues.redhat.com/browse/OCPBUGS-37588[*OCPBUGS-37588*]) and (link:https://issues.redhat.com/browse/OCPBUGS-41938[*OCPBUGS-41938*]). - -[id="ocp-4-18-installation-and-update-ovn-kubernetes-join-subnet_{context}"] -==== Configuring the `ovn-kubernetes` join subnet during cluster installation -With this release, you can configure the IPv4 join subnet that is used internally by `ovn-kubernetes` when installing a cluster. You can set the `internalJoinSubnet` parameter in the `install-config.yaml` file and deploy the cluster into an existing Virtual Private Cloud (VPC). - -For more information, see xref:../installing/installing_aws/installation-config-parameters-aws.adoc#installation-configuration-parameters-network_installation-config-parameters-aws[Network configuration parameters]. - -[id="ocp-4-18-updating-oc-adm-upgrade-recommend_{context}"] -==== Introducing the oc adm upgrade recommend command (Technology Preview) -When updating your cluster, the `oc adm upgrade` command returns a list of the next available versions. As long as you are using 4.18 `oc` client binary, you can use the `oc adm upgrade recommend` command to narrow down your suggestions and recommend a new target release before you launch your update. This feature is available for {product-title} version 4.16 and newer clusters that are connected to an update service. - -For more information, see xref:../updating/updating_a_cluster/updating-cluster-cli.adoc#update-upgrading-cli_updating-cluster-cli[Updating a cluster by using the CLI] - -[id="ocp-4-18-installation-and-update-nc2-aws-support_{context}"] -==== Support for Nutanix Cloud Clusters (NC2) on {aws-first} and NC2 on {azure-first} -With this release, you can install {product-title} on Nutanix Cloud Clusters (NC2) on {aws-short} or NC2 on {azure-short}. - -For more information, see xref:../installing/installing_nutanix/preparing-to-install-on-nutanix.adoc#installation-nutanix-installer-infrastructure-reqs_preparing-to-install-on-nutanix[Infrastructure requirements]. - -[id="ocp-4-18-installation-and-update-gcp-c4-c4a_{context}"] -==== Installing a cluster on {gcp-full} using the C4 and C4A machine series - -With this release, you can deploy a cluster on {gcp-short} using the C4 and C4A machine series for compute or control plane machines. The supported disk type of these machines is `hyperdisk-balanced`. If you use an instance type that requires Hyperdisk storage, all of the nodes in your cluster must support Hyperdisk storage, and you must change the default storage class to use Hyperdisk storage. - -For more information about configuring machine types, see xref:../installing/installing_gcp/installation-config-parameters-gcp.adoc#installation-configuration-parameters-gcp[Installation configuration parameters for GCP], link:https://cloud.google.com/compute/docs/general-purpose-machines#c4_series[C4 machine series] (Compute Engine docs), and link:https://cloud.google.com/compute/docs/general-purpose-machines#c4a_series[C4A machine series] (Compute Engine docs). - -[id="ocp-4-18-installation-and-update-gcp-byo-vpc-phz_{context}"] -==== Provide your own private hosted zone when installing a cluster on {gcp-full} -With this release, you can provide your own private hosted zone when installing a cluster on {gcp-short} into a shared VPC. If you do, the requirements for the bring your own (BYO) zone are that the zone must use a DNS name such as `..` and that you bind the zone to the VPC network of the cluster. - -For more information, see xref:../installing/installing_gcp/installing-gcp-shared-vpc.adoc#installation-gcp-shared-vpc-prerequisites_installing-gcp-shared-vpc[Prerequisites for installing a cluster on GCP into a shared VPC] and xref:../installing/installing_gcp/installing-gcp-user-infra-vpc.adoc#prerequisites[Prerequisites for installing a cluster into a shared VPC on GCP using Deployment Manager templates]. - -[id="ocp-4-18-installation-and-update-nutanix-preloaded-image-support_{context}"] -==== Installing a cluster on Nutanix by using a preloaded {op-system} image object -With this release, you can install a cluster on Nutanix by using the named, preloaded {op-system} image object from the private cloud or the public cloud. Rather than creating and uploading a {op-system} image object for each {product-title} cluster, you can use the `preloadedOSImageName` parameter in the `install-config.yaml` file. - -For more information, see xref:../installing/installing_nutanix/installation-config-parameters-nutanix.adoc#installation-configuration-parameters-additional-nutanix_installation-config-parameters-nutanix[Additional Nutanix configuration parameters]. - -[id="ocp-4-18-installation-openstack-ipv6-single-stack-deployment_{context}"] -==== Single-stack IPv6 clusters on {rh-openstack} - -You can now deploy single-stack IPv6 clusters on {rh-openstack}. - -You must configure {rh-openstack} prior to deploying your {product-title} cluster. For more information, see xref:../installing/installing_openstack/installing-openstack-installer-custom.adoc#installation-configuring-shiftstack-single-ipv6_installing-openstack-installer-custom[Configuring a cluster with single-stack IPv6 networking]. - -[id="ocp-4-18-installation-and-update-nutanix-multiple-nics_{context}"] -==== Installing a cluster on Nutanix with multiple subnets -With this release, you can install a Nutanix cluster with more than one subnet for the Prism Element into which you are deploying an {product-title} cluster. - -For more information, see xref:../installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc#installation-configuring-nutanix-failure-domains_installing-nutanix-installer-provisioned[Configuring failure domains] and xref:../installing/installing_nutanix/installation-config-parameters-nutanix.adoc#installation-configuration-parameters-additional-nutanix_installation-config-parameters-nutanix[Additional Nutanix configuration parameters]. - -For an existing Nutanix cluster, you can add multiple subnets by using xref:../machine_management/creating_machinesets/creating-machineset-nutanix.adoc#machineset-yaml-nutanix_creating-machineset-nutanix[compute] or xref:../machine_management/control_plane_machine_management/cpmso_provider_configurations/cpmso-config-options-nutanix.adoc#cpmso-yaml-provider-spec-nutanix_cpmso-config-options-nutanix[control plane] machine sets. - -[id="ocp-4-18-installation-and-update-vsphere-multiple-nics_{context}"] -==== Installing a cluster on {vmw-full} with multiple network interface controllers (Technology Preview) -With this release, you can install a {vmw-full} cluster with multiple network interface controllers (NICs) for a node. - -For more information, see xref:../installing/installing_vsphere/ipi/installing-vsphere-installer-provisioned-network-customizations.adoc#installation-vsphere-multiple-nics_installing-vsphere-installer-provisioned-network-customizations[Configuring multiple NICs]. - -For an existing {vmw-short} cluster, you can add multiple subnets by using xref:../machine_management/creating_machinesets/creating-machineset-vsphere.adoc#machineset-vsphere-multiple-nics_creating-machineset-vsphere[compute machine sets]. - -[id="ocp-release-notes-agent-5-node-control-plane_{context}"] -==== Configuring 4 and 5 node control planes with the Agent-based Installer -With this release, if you are using the Agent-based Installer, you can now configure your cluster to be installed with either 4 or 5 nodes in the control plane. This feature is enabled by setting the `controlPlane.replicas` parameter to either `4` or `5` in the `install-config.yaml` file. - -For more information, see xref:../installing/installing_with_agent_based_installer/installation-config-parameters-agent.adoc#installation-configuration-parameters-optional_installation-config-parameters-agent[Optional configuration parameters] for the Agent-based Installer. - -[id="ocp-release-notes-agent-minimal-iso_{context}"] -==== Minimal ISO image support for the Agent-based Installer -With this release, the Agent-based Installer supports creating a minimal ISO image on all supported platforms. Previously, minimal ISO images were supported only on the `external` platform. - -This feature is enabled using the `minimalISO` parameter in the `agent-config.yaml` file. - -For more information, see xref:../installing/installing_with_agent_based_installer/installation-config-parameters-agent.adoc#agent-configuration-parameters-optional_installation-config-parameters-agent[Optional configuration parameters] for the Agent-based Installer. - -[id="ocp-release-notes-agent-iscsi_{context}"] -==== Internet Small Computer System Interface (iSCSI) boot support for the Agent-based Installer -With this release, the Agent-based Installer supports creating assets that can be used to boot an {product-title} cluster from an iSCSI target. - -For more information, see xref:../installing/installing_with_agent_based_installer/installing-using-iscsi.adoc#installing-using-iscsi[Preparing installation assets for iSCSI booting]. - -[id="ocp-release-notes-machine-config-operator_{context}"] -=== Machine Config Operator - -[id="ocp-release-notes-machine-config-operator-aws-boot-ga_{context}"] -==== Updated boot images for AWS clusters promoted to GA -Updated boot images has been promoted to GA for Amazon Web Services (AWS) clusters. For more information, see xref:../machine_configuration/mco-update-boot-images.adoc#mco-update-boot-images[Updated boot images]. - -[id="ocp-release-notes-machine-config-operator-imageconfignodes_{context}"] -==== Expanded image config nodes information (Technology Preview) -The image config nodes custom resource, that you can use to monitor the progress of machine configuration updates to nodes, now presents more information on the update. The output of the `oc get machineconfignodes` command now reports on the following and other conditions. You can use these statuses to follow the update, or troubleshoot the node if it experiences an error during the update: - -* If each node was cordoned and uncordoned -* If each node was drained -* If each node was rebooted -* If a node had a CRI-O reload -* If a node had the operating system and node files updated - -[id="ocp-release-notes-machine-config-operator-ocl_{context}"] -==== On-cluster layering changes (Technology Preview) - -There are several important changes to the on-cluster layering feature: - -* You can now install extensions onto an on-cluster customer layered image by using a `MachineConfig` object. -* Updating the Containerfile in a `MachineOSConfig` object now triggers a build to be performed. -* You can now revert an on-cluster custom layered image back to the base image by removing a label from the `MachineOSConfig` object. -* The `must-gather` for the Machine Config Operator now includes data on the `MachineOSConfig` and `MachineOSBuild` objects. - -For more information about on-cluster layering, see xref:../machine_configuration/mco-coreos-layering.html#coreos-layering-configuring-on_mco-coreos-layering[Using on-cluster layering to apply a custom layered image]. - -[id="ocp-release-notes-management-console_{context}"] -=== Management console - -[id="ocp-4-18-checkbox-cluster-monitoring_{context}"] -==== Checkbox for enabling cluster monitoring is marked by default - -With this update, the checkbox for enabling cluster monitoring is now checked by default when installing the {ols} Operator. (link:https://issues.redhat.com/browse/OCPBUGS-42381[*OCPBUGS-42381*]) - -[id="ocp-release-notes-monitoring_{context}"] -=== Monitoring - -The in-cluster monitoring stack for this release includes the following new and modified features: - -[id="ocp-4-18-monitoring-updates-to-monitoring-stack-components-and-dependencies"] -==== Updates to monitoring stack components and dependencies - -This release includes the following version updates for in-cluster monitoring stack components and dependencies: - -* Metrics Server to 0.7.2 -* Prometheus to 2.55.1 -* Prometheus Operator to 0.78.1 -* Thanos to 0.36.1 - -// Note: no alerting rule changes for this release - -// [id="ocp-4-18-monitoring-changes-to-alerting-rules"] -// ==== Changes to alerting rules - -// [NOTE] -// ==== -// Red{nbsp}Hat does not guarantee backward compatibility for recording rules or alerting rules. -// ==== - -[id="ocp-4-18-monitoring-added-scrape-and-evaluation-intervals-for-uwm-prometheus"] -==== Added scrape and evaluation intervals for user workload monitoring Prometheus - -With this update, you can configure the intervals between consecutive scrapes and between rule evaluations for Prometheus for user workload monitoring. - -[id="ocp-4-18-monitoring-early-validation-for-configurations-in-monitoring-config-maps"] -==== Added early validation for the monitoring configurations in monitoring config maps - -This update introduces early validation for changes to monitoring configurations in `cluster-monitoring-config` and `user-workload-monitoring-config` config maps to provide shorter feedback loops and enhance user experience. - -[id="ocp-4-18-monitoring-configure-cross-project-alerting-rules-for-uwm"] -==== Added the proxy environment variables to Alertmanager containers - -With this update, Alertmanager uses the proxy environment variables. Therefore, if you configured an HTTP cluster-wide proxy, you can enable proxying by setting the `proxy_from_environment` parameter to `true` in your alert receivers or at the global config level in Alertmanager. - -[id="ocp-4-18-monitoring-cross-project-alerting-rules-uwm"] -==== Added cross-project user workload alerting and recording rules - -With this update, you can create user workload alerting and recording rules that query multiple projects at the same time. - -[id="ocp-4-18-monitoring-rhoso-metrics-correlation_{context}"] -==== Correlating cluster metrics with {rhoso} metrics -You can now correlate observability metrics for clusters that run on {rhoso-first}. By collecting metrics from both environments, you can monitor and troubleshoot issues across the infrastructure and application layers. - -For more information, see xref:../observability/monitoring/shiftstack-prometheus-configuration.adoc#shiftstack-prometheus-configuration[Monitoring clusters that run on {rhoso}]. - -[id="ocp-release-notes-network-observability-operator_{context}"] -=== Network Observability Operator - -The Network Observability Operator releases updates independently from the OpenShift Container Platform minor version release stream. Updates are available through a single, rolling stream which is supported on all currently supported versions of OpenShift Container Platform 4. Information regarding new features, enhancements, and bug fixes for the Network Observability Operator is found in the xref:../observability/network_observability/network-observability-operator-release-notes.adoc#network-observability-rn[Network Observability release notes]. - -[id="ocp-release-notes-networking_{context}"] -=== Networking - -[id="ocp-4-18-holdover-in-grandmaster-clock_{context}"] -==== Holdover in a grandmaster clock with GNSS as the source - -With this release, you can configure the holdover behavior in a grandmaster (T-GM) clock with Global Navigation Satellite System (GNSS) as the source. Holdover allows the T-GM clock to maintain synchronization performance when the GNSS source is unavailable. During this period, the T-GM clock relies on its internal oscillator and holdover parameters to reduce timing disruptions. - -You can define the holdover behavior by configuring the following holdover parameters in the `PTPConfig` custom resource (CR): - -* `MaxInSpecOffset` -* `LocalHoldoverTimeout` -* `LocalMaxHoldoverOffSet` - -For more information, see xref:../networking/ptp/configuring-ptp.adoc#holdover-in-a-grandmaster-clock_configuring-ptp[Holdover in a grandmaster clock with GNSS as the source]. - -[id="ocp-4-18-support-for-IPVLAN_{context}"] -==== Support for configuring a multi-network policy for IPVLAN and Bond CNI - -With this release, you can configure a multi-network policy for the following network types: - -* IP Virtual Local Area Network (IPVLAN) -* Bond Container Network Interface (CNI) over SR-IOV - -For more information, see xref:../networking/multiple_networks/secondary_networks/configuring-multi-network-policy.adoc[Configuring multi-network policy] - -[id="ocp-release-notes-networking-whitelist-blacklist-annotation-updated-allowlist-denylist_{context}"] -==== Updated terminology for whitelist and blacklist annotations -The terminology for the `ip_whitelist` and `ip_blacklist` annotations have been updated to `ip_allowlist` and `ip_denylist`, respectively. Currently, {product-title} still supports the `ip_whitelist` and `ip_blacklist` annotations. However, these annotations are planned for removal in a future release. - -[id="ocp-release-notes-networking-ovn-kubernetes-observability_{context}"] -==== Checking OVN-Kubernetes network traffic with OVS sampling using the CLI - -OVN-Kubernetes network traffic can be viewed with OVS sampling via the CLI for the following network APIs: - -* `NetworkPolicy` -* `AdminNetworkPolicy` -* `BaselineNetworkPolicy` -* `UserDefinedNetwork` isolation -* `EgressFirewall` -* Multicast ACLs. - -Checking OVN-Kubernetes network traffic with OVS sampling using the CLI is intended to help with packet tracing. It can also be used while the Network Observability Operator is installed. - -For more information, see xref:../networking/ovn_kubernetes_network_provider/ovn-kubernetes-troubleshooting-sources.adoc#nw-ovn-kubernetes-observability_ovn-kubernetes-sources-of-troubleshooting-information[Checking OVN-Kubernetes network traffic with OVS sampling using the CLI]. - -[id="ocp-release-notes-networking-user-defined-network-segmentation_{context}"] -==== User-defined network segmentation (Generally Available) - -With {product-title} {product-version}, user-defined network segmentation is generally available. User-defined networks (UDN) introduce enhanced network segmentation capabilities by allowing administrators to define custom network topologies using namespace-scoped UserDefinedNetwork and cluster-scoped ClusterUserDefinedNetwork custom resources. - -With UDNs, administrators can create tailored network topologies with enhanced isolation, IP address management for workloads, and advanced networking features. Supporting both Layer 2 and Layer 3 topology types, user-defined network segmentation enables a wide range of network architectures and topologies, enhancing network flexibility, security, and performance. For more information on supported features, see xref:../networking/multiple_networks/understanding-multiple-networks.adoc#support-matrix-for-udn-nad_understanding-multiple-networks[UDN support matrix]. - -Use cases of UDN include providing virtual machines (VMs) with a lifetime duration for static IP addresses assignment as well as a Layer 2 primary pod network so that users can live migrate VMs between nodes. These features are all fully equipped in {VirtProductName}. Users can use UDNs to create a stronger, native multi-tenant environment, allowing you to secure your overlay Kubernetes network, which is otherwise open by default. For more information, see xref:../networking/multiple_networks/primary_networks/about-user-defined-networks.adoc#about-user-defined-networks[About user-defined networks]. - -[id="ocp-release-notes-networking-dynamic-config-manager_{context}"] -==== The dynamic configuration manager is enabled by default (Technology Preview) - -You can reduce your memory footprint by using the dynamic configuration manager on Ingress Controllers. The dynamic configuration manager propagates endpoint changes through a dynamic API. This process enables the underlying routers to adapt to changes (scale ups and scale downs) without reloads. - -To use the dynamic configuration manager, enable the `TechPreviewNoUpgrade` feature set by running the following command: - -[source,terminal] ----- -$ oc patch featuregates cluster -p '{"spec": {"featureSet": "TechPreviewNoUpgrade"}}' --type=merge ----- - -[id="ocp-release-notes-networking-network-matrix-environments_{context}"] -==== Additional environments for the network flow matrix - -With this release, you can view network information for ingress flows to {product-title} services in the following environments: - -* {product-title} on bare metal -* {sno-caps} on bare metal -* {product-title} on {aws-first} -* {sno-caps} on {aws-short} - -For more information, see xref:../installing/install_config/configuring-firewall.adoc#network-flow-matrix_configuring-firewall[{product-title} network flow matrix]. - -[id="ocp-release-notes-networking-metallb-dynamic-asn_{context}"] -==== MetalLB updates for Border Gateway Protocol - -With this release, MetalLB includes a new field for the Border Gateway Protocol (BGP) peer custom resource. -You can use the `dynamicASN` field to detect the Autonomous System Number (ASN) to use for the remote end of a BGP session. -This is an alternative to explicitly setting an ASN in the `spec.peerASN` field. - -[id="ocp-release-notes-networking-sr-iov-rdma-cni_{context}"] -==== Configuring an RDMA subsytem for SR-IOV - -With this release, you can configure a Remote Direct Memory Access (RDMA) Container Network Interface (CNI) on Single Root I/O Virtualization (SR-IOV) to enable high-performance, low-latency communication between containers. -When you combine RDMA with SR-IOV, you provide a mechanism to expose hardware counters of Mellanox Ethernet devices to be used inside Data Plane Development Kit (DPDK) applications. - -[id="ocp-release-notes-networking-sr-iov-mlx-secure-boot_{context}"] -==== Support configuring the SR-IOV Network Operator on a Secure-Boot-enabled environment for Mellanox cards - -With this release, you can configure the Single Root I/O Virtualization (SR-IOV) Network Operator when the system has secure boot enabled. The SR-IOV Operator is configured after you first manually configure the firmware for Mellanox devices. With secure boot enabled, the resilience of your system is enhanced, and a crucial layer of defense for the overall security of your computer is provided. - -For more information, see xref:../networking/hardware_networks/configuring-sriov-device.adoc#nw-sriov-nic-mlx-secure-boot_configuring-sriov-device[Configuring the SR-IOV Network Operator on Mellanox cards when Secure Boot is enabled]. - -[id="ocp-release-notes-networking-openstack-floatingip-support-ingress_{context}"] -==== Support for pre-created {rh-openstack} floating IP addresses in the Ingress Controller - -With this release, you can now specify pre-created floating IP addresses in the Ingress Controller for your clusters running on {rh-openstack}. - -For more information, see xref:../networking/load-balancing-openstack.adoc#nw-osp-specify-floating-ip_load-balancing-openstack[Specifying a floating IP address in the Ingress Controller]. - -[id="ocp-release-notes-networking-intel-netsec_{context}"] -==== SR-IOV Network Operator support extension - -The SR-IOV Network Operator now supports Intel NetSec Accelerator Cards and Marvell Octeon 10 DPUs. (link:https://issues.redhat.com/browse/OCPBUGS-43451[*OCPBUGS-43451*]) - -[id="ocp-release-notes-networking-ovnk-linux-bridge_{context}"] -==== Using a Linux bridge interface as the OVS default port connection - -The OVN-Kubernetes plugin can now use a Linux bridge interface as the Open vSwitch (OVS) default port connection. This means that a network interface controller, such as SmartNIC, can now bridge the underlying network with a host. -(link:https://issues.redhat.com/browse/OCPBUGS-39226[*OCPBUGS-39226*]) - -[id="ocp-release-notes-networking-live-migration-cno_{context}"] -==== Cluster Network Operator exposing network overlap metrics for an issue - -When you start the limited live migration method and an issue exists with network overlap, the Cluster Network Operator (CNO) can now expose network overlap metrics for the issue. This is possible because the `openshift_network_operator_live_migration_blocked` metric now includes the new `NetworkOverlap` label. (link:https://issues.redhat.com/browse/OCPBUGS-39096[*OCPBUGS-39096*]) -[id="ocp-release-notes-networking-network-attachment-definition_{context}"] -==== Network attachments support dynamic reconfiguration - -Previously, the `NetworkAttachmentDefinition` CR was immutable. With this release, you can edit an existing `NetworkAttachmentDefinition` CR. Support for editing makes it easier to accommodate changes in the underlying network infrastructure, such as adjusting the MTU of a network interface. - -You must ensure that the configurations of each `NetworkAttachmentDefinition` CR that reference the same network `name` and `type: ovn-k8s-cni-overlay` are in sync. Only when these values are in sync is the network attachment update successful. If the configurations are not in sync, the behavior is undefined because there is no guarantee about which `NetworkAttachmentDefinition` CR {product-title} uses for the configuration. - -You still must restart any workloads that use the network attachment definition for the network changes to take effect for those pods. - -[id="ocp-release-notes-nodes_{context}"] -=== Nodes - -[id="ocp-release-notes-nodes-crun-default_{context}"] -==== crun is now the default container runtime - -crun is now the default container runtime for new containers created in {product-title}. The runC runtime is still supported and you can change the default runtime to runC, if needed. For more information on crun, see xref:../nodes/containers/nodes-containers-using.adoc#nodes-containers-runtimes[About the container engine and container runtime]. For information on changing the default to runC, see xref:../machine_configuration/machine-configs-custom.adoc#create-a-containerruntimeconfig_machine-configs-custom[Creating a ContainerRuntimeConfig CR to edit CRI-O parameters]. - -Updating from {product-title} 4.17.z to {product-title} {product-version} does not change your container runtime. - -[id="ocp-release-notes-nodes-crun-sigstore_{context}"] -==== sigstore support (Technology Preview) - -Available as a Technology Preview, you can use the sigstore project with {product-title} to improve supply chain security. You can create signature policies at the cluster-wide level or for a specific namespace. For more information, see xref:../nodes/nodes-sigstore-using.adoc#nodes-sigstore-using[Manage secure signatures with sigstore]. - -[id="ocp-release-notes-nodes-adding-enhancements_{context}"] -==== Enhancements to process for adding nodes - -Enhancements have been added to the process for xref:../nodes/nodes/nodes-nodes-adding-node-iso.adoc#adding-node-iso[adding worker nodes to an on-premise cluster] that was introduced in {product-title} 4.17. -With this release, you can now generate Preboot Execution Environment (PXE) assets instead of an ISO image file, and you can configure reports to be generated regardless of whether the node creation process fails or not. - -[id="ocp-release-notes-nodes-select-kernel-arguments_{context}"] -==== Node Tuning Operator properly selects kernel arguments - -The Node Tuning Operator can now properly select kernel arguments and management options for Intel and AMD CPUs. (link:https://issues.redhat.com/browse/OCPBUGS-43664[*OCPBUGS-43664*]) - -[id="ocp-release-notes-nodes-default-container-runtime_{context}"] -==== Default container runtime is not always set properly - -The default container runtime that is set by the cluster Node Tuning Operator is always inherited from the cluster, and is not hard-coded by the Operator. Starting with this release, the default value is `crun`. (link:https://issues.redhat.com/browse/OCPBUGS-45450[*OCPBUGS-45450*]) - -//// -[id="ocp-release-notes-postinstallation-configuration_{context}"] -=== Postinstallation configuration -//// - -[id="ocp-release-notes-openshift-cli_{context}"] -=== OpenShift CLI (oc) - -[id="ocp-release-notes-openshift-cli-oc-mirror_{context}"] -==== oc-mirror plugin v2 (Generally Available) - -oc-mirror plugin v2 is now generally available. To use it, add the `--v2` flag when running oc-mirror commands. The previous version (oc-mirror plugin v1), which runs when the `--v2` flag is not set, is now deprecated. It is recommended to transition to oc-mirror plugin v2 for continued support and improvements. - -For more information, see xref:../disconnected/mirroring/about-installing-oc-mirror-v2.adoc#about-installing-oc-mirror-v2[Mirroring images for a disconnected installation by using the oc-mirror plugin v2]. - -oc-mirror plugin v2 now supports mirroring helm charts. Also, oc-mirror plugin v2 can now be used in environments where `HTTP/S` proxy is enabled, ensuring broader compatibility with enterprise setups. - -oc-mirror plugin v2 introduces v1 retro-compatible filtering of Operator catalogs and generates filtered catalogs. This feature allows cluster administrators to view only the Operators that have been mirrored, rather than the complete list from the origin catalog. - -[id="ocp-release-notes-olm_{context}"] -=== Operator lifecycle - -[id="ocp-release-notes-olm-classic-rename_{context}"] -==== Existing version of Operator Lifecycle Manager now known as {olmv0} - -With the release of {olmv1-first} as a General Availability (GA) feature, starting in {product-title} {product-version}, the existing version of OLM that has been included since the launch of {product-title} 4 is now known as _{olmv0}_. - -[NOTE] -==== -{olmv0} remains enabled by default and fully supported throughout the {product-title} 4 lifecycle. -==== - -For more information on the GA release of {olmv1}, see the xref:../release_notes/ocp-4-18-release-notes.adoc#ocp-release-notes-extensions_release-notes[Extensions ({olmv1})] release note sections. For full documentation focused on {olmv1}, see the stand-alone xref:../extensions/index.adoc#olmv1-about[Extensions] guide. - -For full documentation focused on {olmv0}, continue referring to the xref:../operators/index.adoc#operators-overview[Operators] guide. - -//// -[id="ocp-release-notes-osdk_{context}"] -=== Operator development -//// - -[id="ocp-release-notes-machine-management_{context}"] -=== Machine management - -[id="ocp-4-18-capi-tp-azure_{context}"] -==== Managing machines with the Cluster API for {azure-full} (Technology Preview) - -This release introduces the ability to manage machines by using the upstream Cluster API, integrated into {product-title}, as a Technology Preview for {azure-full} clusters. -This capability is in addition or an alternative to managing machines with the Machine API. -For more information, see xref:../machine_management/cluster_api_machine_management/cluster-api-about.adoc#cluster-api-about[About the Cluster API]. - -[id="ocp-release-notes-oci_{context}"] -=== {oci-first} - -[id="ocp-release-notes-oci-bare-metal_{context}"] -==== Bare-metal support on {oci-first} -{product-title} cluster installations on {oci-first} are now supported for bare-metal machines. You can install bare-metal clusters on {oci} by using either the Assisted Installer or the Agent-based Installer. To install a bare-metal cluster on {oci}, choose one of the following installation options: - -* xref:../installing/installing_oci/installing-oci-assisted-installer.adoc#installing-oci-assisted-installer[Installing a cluster on {oci-first-no-rt} by using the {ai-full}] -* xref:../installing/installing_oci/installing-oci-agent-based-installer.adoc#installing-oci-agent-based-installer[Installing a cluster on {oci-first-no-rt} by using the Agent-based Installer] - -[id="ocp-4.18-postinstallation-configuration_{context}"] -=== Postinstallation configuration - -[id="ocp-4.18-migrate-x86-cp-to-arm-arch_{context}"] -==== Migrating the x86 control plane to arm64 architecture on {aws-full} - -With this release, you can migrate the control plane in your cluster from `x86` to `arm64` architecture on {aws-first}. For more information, see xref:../updating/updating_a_cluster/migrating-to-multi-payload.adoc#migrating-from-x86-to-arm64-cp_updating-clusters-overview[Migrating the x86 control plane to arm64 architecture on Amazon Web Services]. - -[id="ocp-4.18-support-for-config-image-stream-import-beh_{context}"] -==== Configuring the image stream import mode behavior (Technology Preview) - -This feature introduces a new field, `imageStreamImportMode`, in the `image.config.openshift.io/cluster` resource. The `imageStreamImportMode` field controls the import mode behavior of image streams. You can set the `imageStreamImportMode` field to either of the following values: - -* `Legacy` -* `PreserveOriginal` - -For more information, see xref:../openshift_images/image-configuration.adoc#images-configuration-parameters_image-configuration[Image controller configuration parameters]. - -You must enable the `TechPreviewNoUpgrade` feature set in the `FeatureGate` custom resource (CR) to enable the `imageStreamImportMode` feature. For more information, see xref:../nodes/clusters/nodes-cluster-enabling-features.adoc#nodes-cluster-enabling-features-about_nodes-cluster-enabling[Understanding feature gates]. - -[id="ocp-release-notes-rhcos_{context}"] -=== {op-system-first} - -[id="ocp-release-notes-rhcos-rhel-9.4-packages_{context}"] -==== {op-system} uses {op-system-base} 9.4 - -{op-system} uses {op-system-base-full} 9.4 packages in {product-title} {product-version}. These packages ensure that your {product-title} instances receive the latest fixes, features, enhancements, hardware support, and driver updates. - -[id="ocp-release-notes-registry_{context}"] -=== Registry - -[discrete] -[id="ocp-4-release-notes-read-only-registry-enhancements_{context}"] -==== Read-only registry enhancements - -In previous versions of {product-title}, storage mounted as read-only returned no specific metrics or information about storage errors. This could result in silent failures of a registry when the storage backend was read-only. With this release, the following alerts have been added to return storage information when the backend is set to read-only: - -[cols="2", options="header"] -|=== -| Alert Name | Message - -| `ImageRegistryStorageReadOnly` -| The image registry storage is read-only and no images will be committed to storage. - -| `ImageRegistryStorageFull` -| The image registry storage disk is full and no images will be committed to storage. - -|=== - -[id="ocp-release-notes-scalability-and-performance_{context}"] -=== Scalability and performance - -[id="ocp-release-notes-scalability-and-performance-cluster-compare_{context}"] -==== Cluster validation with the cluster-compare plugin - -The `cluster-compare` plugin is an OpenShift CLI (`oc`) plugin that compares a cluster configuration with a target configuration. The plugin reports configuration differences while suppressing expected variations by using configurable validation rules and templates. - -For example, the plugin can highlight unexpected differences, such as mismatched field values, missing resources, or version discrepancies, while ignoring expected differences, such as optional components or hardware-specific fields. This focused comparison makes it easier to assess cluster compliance with the target configuration. - -You can use the `cluster-compare` plugin in development, production, and support scenarios. - -For more information about the `cluster-compare` plugin, see xref:../scalability_and_performance/cluster-compare/understanding-the-cluster-compare-plugin.adoc#cluster-compare-overview_understanding-cluster-compare[Overview of the cluster-compare plugin]. - -[id="ocp-release-notes-node-tuning-operator-deferred-updates_{context}"] -==== Node Tuning Operator: Deferred Tuning Updates - -In this release, the Node Tuning Operator introduces support for deferring tuning updates. Administrators can schedule updates to be applied during a maintenance window with this feature. - -For more information, see xref:../scalability_and_performance/using-node-tuning-operator.adoc#defer-application-of-tuning-changes_node-tuning-operator[Deferring application of tuning changes]. - -[id="ocp-release-notes-scalability-and-performance-nrop-policy_{context}"] -==== NUMA Resources Operator now uses default SELinux policy - -With this release, the NUMA Resources Operator no longer creates a custom SELinux policy to enable the installation of Operator components on a target node. Instead, the Operator uses a built-in container SELinux policy. This change removes the additional node reboot that was previously required when applying a custom SELinux policy during an installation. - -[IMPORTANT] -==== -In clusters with an existing NUMA-aware scheduler configuration, upgrading to {product-title} 4.18 might result in an additional reboot for each configured node. For further information about how to manage an upgrade in this scenario and limit disruption, see the Red Hat Knowledgebase article link:https://access.redhat.com/articles/7107603[Managing an upgrade to {product-title} 4.18 or later for a cluster with an existing NUMA-aware scheduler configuration] -==== - -[id="ocp-release-notes-scalability-and-performance-nto-platforms_{context}"] -==== Node Tuning Operator platform detection - -With this release, when you apply a performance profile, the Node Tuning Operator detects the platform and configures kernel arguments and other platform-specific options accordingly. This release adds support for detecting the following platforms: - -* AMD64 -* AArch64 -* Intel 64 - -[id="ocp-4-18-support-for-worker-nodes-on-amd-cpus_{context}"] -==== Support for worker nodes with AMD EPYC Zen 4 CPUs - -With this release, you can use the `PerformanceProfile` custom resource (CR) to configure worker nodes on machines equipped with AMD EPYC Zen 4 CPUs (Genoa and Bergamo). These CPUs are fully supported. - -[IMPORTANT] -==== -The per pod power management feature is not functional on AMD EPYC Zen 4 CPUs. -==== - -//// -[id="ocp-release-notes-etcd-certificates_{context}"] -=== Security -//// - -[id="ocp-release-notes-storage_{context}"] -=== Storage - -[id="ocp-4-18-storage-editing-overprovisioning-ratio_{context}"] -==== Over-provisioning ratio update after LVMCluster custom resource creation -Previously, the `thinPoolConfig.overprovisionRatio` field in the `LVMCluster` custom resource (CR) could be configured only during the creation of the `LVMCluster` CR. With this release, you can now update the `thinPoolConfig.overprovisionRatio` field even after creating the `LVMCluster` CR. - -[id="ocp-4-18-storage-configuring-metadata-size_{context}"] -==== Support for configuring metadata size for the thin pool - -This feature provides the following new optional fields in the `LVMCluster` custom resource (CR): - -* `thinPoolConfig.metadataSizeCalculationPolicy`: Specifies the policy to calculate the metadata size for the underlying volume group. You can set this field to either `Static` or `Host`. By default, this field is set to `Host`. -* `thinPoolConfig.metadataSize`: Specifies the metadata size for the thin pool. You can configure this field only when the `MetadataSizeCalculationPolicy` field is set to `Static`. - -For more information, see xref:../storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc#about-lvmcluster_logical-volume-manager-storage[About the LVMCluster custom resource]. - -[id="ocp-4-18-release-notes-storage-cifs-smb-csi-driver_{context}"] -==== Persistent storage using CIFS/SMB CSI Driver Operator is generally available -{product-title} is capable of provisioning persistent volumes (PVs) with a Container Storage Interface (CSI) driver for the Common Internet File System (CIFS) dialect/Server Message Block (SMB) protocol. The CIFS/SMB CSI Driver Operator that manages this driver was introduced in {product-title} 4.16 with Technology Preview status. In {product-title} 4.18, it is now generally available. - -For more information, see xref:../storage/container_storage_interface/persistent-storage-csi-smb-cifs.adoc#persistent-storage-csi-smb-cifs[CIFS/SMB CSI Driver Operator]. - -[id="ocp-4-18-release-notes-storage-secret-csi-driver_{context}"] -==== Secret Store CSI Driver Operator is generally available -The Secrets Store Container Storage Interface (CSI) Driver Operator, `secrets-store.csi.k8s.io`, allows {product-title} to mount multiple secrets, keys, and certificates stored in enterprise-grade external secrets stores into pods as an inline ephemeral volume. The Secrets Store CSI Driver Operator communicates with the provider using gRPC to fetch the mount contents from the specified external secrets store. After the volume is attached, the data in it is mounted into the container’s file system. The Secrets Store CSI Driver Operator was available in {product-title} 4.14 as a Technology Preview feature. {product-title} 4.18 introduces this feature as generally available. - -For more information about the Secrets Store CSI driver, see xref:../storage/container_storage_interface/persistent-storage-csi-secrets-store.adoc#persistent-storage-csi-secrets-store[Secrets Store CSI Driver Operator]. - -For information about using the Secrets Store CSI Driver Operator to mount secrets from an external secrets store to a CSI volume, see xref:../nodes/pods/nodes-pods-secrets-store.adoc[Providing sensitive data to pods by using an external secrets store]. - -[id="ocp-4-18-release-notes-storage-pv-last-phase-transition-time_{context}"] -==== Persistent volume last phase transition time parameter is generally available -{product-title} 4.16 introduced a new parameter, `LastPhaseTransitionTime`, which has a timestamp that is updated every time a persistent volume (PV) transitions to a different phase (`pv.Status.Phase`). For {product-title} 4.18, this feature is generally available. - -For more information about using the persistent volume last phase transition time parameter, see xref:../storage/understanding-persistent-storage.adoc#last-phase-transition-time[Last phase transition time]. - -[id="ocp-4-18-release-notes-storage-volume-multi-vcenter-support_{context}"] -==== Multiple vCenter support for vSphere CSI is generally available -{product-title} 4.17 introduced the ability to deploy {product-title} across multiple vSphere clusters (vCenters) as a Technology Preview feature. In {product-title} 4.18, Multiple vCenter support is now generally available. - -For more information, see xref:../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#persistent-storage-csi-vsphere-multi-vcenter-support-overview_persistent-storage-csi-vsphere[Multiple vCenter support for vSphere CSI] and xref:../installing/installing_vsphere/installation-config-parameters-vsphere.adoc[Installation configuration parameters for vSphere]. - -[id="ocp-4-18-release-notes-storage-always-honor-reclaim-policy_{context}"] -==== Always honor persistent volume reclaim policy (Technical Preview) -Prior to {product-title} 4.18, the persistent volume (PV) reclaim policy was not always applied. - -For a bound PV and persistent volume claim (PVC) pair, the ordering of PV-PVC deletion determined whether the PV delete reclaim policy was applied or not. The PV applied the reclaim policy if the PVC was deleted prior to deleting the PV. However, if the PV was deleted prior to deleting the PVC, then the reclaim policy was not applied. As a result of that behavior, the associated storage asset in the external infrastructure was not removed. - -With {product-title} 4.18, the PV reclaim policy is consistently always applied. This feature has Technical Preview status. - -For more information, see xref:../storage/understanding-persistent-storage.adoc#reclaiming_understanding-persistent-storage[Reclaim policy for persistent volumes]. - -[id="ocp-4-18-release-notes-storage-lso-cleanup_{context}"] -==== Improved ability to easily remove LVs or LVSs for LSO is generally available -For the Local Storage Operator (LSO), {product-title} 4.18 improves the ability to remove Local Volumes (LVs) and Local Volume Sets (LVSs) by automatically removing artifacts, thus reducing the number of steps required. - -For more information, see xref:../storage/persistent_storage/persistent_storage_local/persistent-storage-local.adoc#local-removing-device_persistent-storage-local[Removing a local volume or local volume set]. - -[id="ocp-4-18-release-notes-storage-volume-group-snapshots_{context}"] -==== CSI volume group snapshots (Technology Preview) -{product-title} 4.18 introduces Container Storage Interface (CSI) volume group snapshots as a Technology Preview feature. This feature needs to be supported by the CSI driver. CSI volume group snapshots use a label selector to group multiple persistent volume claims (PVCs) for snapshotting. A volume group snapshot represents copies from multiple volumes that are taken at the same point-in-time. This can be useful for applications that contain multiple volumes. - -{rh-storage} supports volume group snapshots. - -For more information about CSI volume group snapshots, see xref:../storage/container_storage_interface/persistent-storage-csi-group-snapshots.adoc#persistent-storage-csi-group-snapshots[CSI volume group snapshots]. - -[id="ocp-4-18-release-notes-storage-c3-n4-instance-types-hyperdisk_{context}"] -==== GCP PD CSI driver supports the C3 instance type for bare metal and N4 machine series is generally available -The Google Cloud Platform Persistent Disk (GCP PD) Container Storage Interface (CSI) driver supports the C3 instance type for bare metal and N4 machine series. The C3 instance type and N4 machine series support the hyperdisk-balanced disks. - -Additionally, hyperdisk storage pools are supported for large-scale storage. A hyperdisk storage pool is a purchased collection of capacity, throughput, and IOPS, which you can then provision for your applications as needed. - -For {product-title} 4.18, this feature is generally available. - -For more information, see xref:../storage/container_storage_interface/persistent-storage-csi-gcp-pd.adoc#c3-instance-type-for-bare-metal-and-n4-machine-series[C3 instance type for bare metal and N4 machine series]. - -[id="ocp-4-18-release-notes-storage-openstack-manila-resize-support_{context}"] -==== OpenStack Manila expanding persistent volumes is generally available -In {product-title} 4.18, OpenStack Manila supports expanding Container Storage Interface (CSI) persistent volumes (PVs). This feature is generally available. - -For more information, see xref:../storage/expanding-persistent-volumes.adoc#expanding-persistent-volumes[Expanding persistent volumes] and xref:../storage/container_storage_interface/persistent-storage-csi.adoc#csi-drivers-supported_persistent-storage-csi[CSI drivers supported by {product-title}]. - -[id="ocp-4-18-release-notes-storage-gcp-wif-support_{context}"] -==== GCP Filestore supporting Workload Identity is generally available -In {product-title} 4.18, Google Compute Platform (GCP) Filestore Container Storage Interface (CSI) storage supports Workload Identity. This allows users to access Google Cloud resources using federated identities instead of a service account key. For {product-title} 4.18, this feature is generally available. - -For more information, see xref:../storage/container_storage_interface/persistent-storage-csi-google-cloud-file.adoc[Google Compute Platform Filestore CSI Driver Operator]. - -[id="ocp-release-notes-web-console_{context}"] -=== Web console - -[id="ocp-4-18-administrator-perspective_{context}"] -==== Administrator perspective - -This release introduces the following updates to the *Administrator* perspective of the web console: - -* A new setting for hiding the *Getting started resources* card on the *Overview* page allowing for maximum use of the dashboard. -* A *Start Job* option was added to the CronJob *List* and *Details* pages, so you can start individual CronJobs manually directly in the web console without having to use the `oc` CLI. -* The *Import YAML* button in the masthead is now a *Quick Create* button that you can use for the rapid deployment of workloads by imprting from YAML, Git, or using container images. -* You can build your own generative-AI chat bot with a chat bot sample. The generative-AI chat bot sample is deployed with Helm and includes a full CI/CD pipeline. You can also run this sample on your cluster with no CPUs. -* You can import YAML into the console using {ols}. - -[id="ocp-4-18-administrator-perspective_content-security-policy{context}"] -===== Content Security Policy (CSP) - -With this release, the console Content Security Policy (CSP) is deployed in report-only mode. CSP violations will be logged in the browser console, but the associated CSP directives will not be enforced. Dynamic plugin creators can add their own policies. - -Additionally, you can report any plugins that break security policies. Administrators have the ability to disable any plugin breaking those policies. CSP violations will be logged in the browser console, but the associated CSP directives will not be enforced. This feature is behind a `feature-gate`, so you will need to manually enable it. - -For more information, see xref:../web_console/dynamic-plugin/content-security-policy.adoc#content-security-policy[Content Security Policy (CSP)] and xref:../nodes/clusters/nodes-cluster-enabling-features.adoc#nodes-cluster-enabling-features-console_nodes-cluster-enabling[Enabling feature sets using the web console]. - -[id="ocp-4-18-developer-perspective_{context}"] -==== Developer Perspective - -This release introduces the following updates to the *Developer* perspective of the web console: - -* Added a {product-title} toolkit, Quarkus tools and JBoss EAP, and a Language Server Protocol Plugin for Visual Studio Code and IntelliJ. -* Previously, when moving from light mode to dark mode in the Monaco editor, the console remained in dark mode. With this update, the Monaco code editor will match the selected theme. - -[id="ocp-4-18-notable-technical-changes_{context}"] -== Notable technical changes - -[discrete] -[id="ocp-4-18-notable-technical-changes-uninstall-sr-iov-operator_{context}"] -=== Uninstalling the SR-IOV Network Operator changed - -From {product-title} {product-version}, to successfully uninstall the SR-IOV Network Operator, you need to delete the `sriovoperatorconfigs` custom resource and custom resource definition too. - -For more information, see xref:../networking/networking_operators/sr-iov-operator/uninstalling-sriov-operator.adoc#nw-sriov-operator-uninstall_uninstalling-sr-iov-operator[Uninstalling the SR-IOV Network Operator]. - -[discrete] -[id="ocp-4-18-rhcos-iscsi-initiator_{context}"] -=== Changes to the iSCSI initiator name and service - -Previously, the `/etc/iscsi/initiatorname.iscsi` file was present by default on {op-system} images. With this release, the `initiatorname.iscsi` file is no longer present by default. Instead, it is created at run time when the `iscsi.service` and subsequent `iscsi-init.service` services start. This service is not enabled by default and might affect any CSI drivers that rely on reading the contents of the `initiatorname.iscsi` file prior to starting the service. - -[discrete] -[id="ocp-4-18-operator-sdk-1-38-0_{context}"] -=== Operator SDK 1.38.0 - -{product-title} {product-version} supports Operator SDK 1.38.0. See xref:../cli_reference/osdk/cli-osdk-install.adoc#cli-osdk-install[Installing the Operator SDK CLI] to install or update to this latest version. - -Operator SDK 1.38.0 now supports Kubernetes 1.30 and uses Kubebuilder v4. - -Metrics endpoints are now secured using native Kubebuilder link:https://book.kubebuilder.io/reference/metrics[metrics configuration] instead of `kube-rbac-proxy`, which is now removed. - -The following support has also been removed from Operator SDK: - -* Scaffolding tools for Hybrid Helm-based Operator projects -* Scaffolding tools for Java-based Operator projects - -If you have Operator projects that were previously created or maintained with Operator SDK 1.36.1, update your projects to keep compatibility with Operator SDK 1.38.0: - -* xref:../operators/operator_sdk/golang/osdk-golang-updating-projects.adoc#osdk-upgrading-projects_osdk-golang-updating-projects[Updating Go-based Operator projects] - -* xref:../operators/operator_sdk/ansible/osdk-ansible-updating-projects.adoc#osdk-upgrading-projects_osdk-ansible-updating-projects[Updating Ansible-based Operator projects] - -* xref:../operators/operator_sdk/helm/osdk-helm-updating-projects.adoc#osdk-upgrading-projects_osdk-helm-updating-projects[Updating Helm-based Operator projects] - -[id="ocp-4-18-deprecated-removed-features_{context}"] -== Deprecated and removed features - -Some features available in previous releases have been deprecated or removed. - -Deprecated functionality is still included in {product-title} and continues to be supported; however, it will be removed in a future release of this product and is not recommended for new deployments. For the most recent list of major functionality deprecated and removed within {product-title} {product-version}, refer to the table below. Additional details for more functionality that has been deprecated and removed are listed after the table. - -In the following tables, features are marked with the following statuses: - -* _Not Available_ -* _Technology Preview_ -* _General Availability_ -* _Deprecated_ -* _Removed_ - -[discrete] -[id="ocp-release-note-bare-metal-dep-rem_{context}"] -=== Bare metal monitoring deprecated and removed features - -.Bare Metal Event Relay Operator tracker -[cols="4,1,1,1",options="header"] -|==== -|Feature |4.16 |4.17 |4.18 - -|Bare Metal Event Relay Operator -|Deprecated -|Removed -|Removed -|==== - -[discrete] -[id="ocp-release-note-images-dep-rem_{context}"] -=== Images deprecated and removed features - -.Images deprecated and removed tracker -[cols="4,1,1,1",options="header"] -|==== -|Feature |4.16 |4.17 |4.18 - -|Cluster Samples Operator -|Deprecated -|Deprecated -|Deprecated -|==== - -[discrete] -[id="ocp-release-note-install-dep-rem_{context}"] -=== Installation deprecated and removed features - -.Installation deprecated and removed tracker -[cols="4,1,1,1",options="header"] -|==== -|Feature |4.16 |4.17 |4.18 - -|`--cloud` parameter for `oc adm release extract` -|Deprecated -|Deprecated -|Deprecated - -|CoreDNS wildcard queries for the `cluster.local` domain -|Deprecated -|Deprecated -|Deprecated - -|`compute.platform.openstack.rootVolume.type` for {rh-openstack} -|Deprecated -|Deprecated -|Deprecated - -|`controlPlane.platform.openstack.rootVolume.type` for {rh-openstack} -|Deprecated -|Deprecated -|Deprecated - -|`ingressVIP` and `apiVIP` settings in the `install-config.yaml` file for installer-provisioned infrastructure clusters -|Deprecated -|Deprecated -|Deprecated - -|Package-based {op-system-base} compute machines -|Deprecated -|Deprecated -|Deprecated - -|Managing machines with the Cluster API for {azure-full} -|Not Available -|Not Available -|Technology Preview - -|`platform.aws.preserveBootstrapIgnition` parameter for {aws-first} -|Deprecated -|Deprecated -|Deprecated - -|Installing a cluster on {aws-short} with compute nodes in {aws-short} Outposts -|Deprecated -|Deprecated -|Deprecated -|==== - - -[discrete] -=== Machine management deprecated and removed features - -.Machine management deprecated and removed tracker -[cols="4,1,1,1",options="header"] -|==== -|Feature |4.16 |4.17 |4.18 - -|Managing machine with Machine API for {alibaba} -|Removed -|Removed -|Removed - -|Cloud controller manager for {alibaba} -|Removed -|Removed -|Removed - -|==== - -//// -[discrete] -[id="ocp-release-note-monitoring-dep-rem_{context}"] -=== Monitoring deprecated and removed features - -.Monitoring deprecated and removed tracker -[cols="4,1,1,1",options="header"] -|==== -|Feature |4.16 |4.17 |4.18 -|==== -//// - - -[discrete] -[id="ocp-release-note-networking-dep-rem_{context}"] -=== Networking deprecated and removed features - -.Networking deprecated and removed tracker -[cols="4,1,1,1",options="header"] -|==== -|Feature |4.16 |4.17 |4.18 - -|OpenShift SDN network plugin -|Deprecated -|Removed -|Removed - -|iptables -|Deprecated -|Deprecated -|Deprecated - -|==== - -[discrete] -[id="ocp-release-note-node-dep-rem_{context}"] -=== Node deprecated and removed features - -.Node deprecated and removed tracker -[cols="4,1,1,1",options="header"] -|==== -|Feature |4.16 |4.17 |4.18 - -|`ImageContentSourcePolicy` (ICSP) objects -|Deprecated -|Deprecated -|Deprecated - -|Kubernetes topology label `failure-domain.beta.kubernetes.io/zone` -|Deprecated -|Deprecated -|Deprecated - -|Kubernetes topology label `failure-domain.beta.kubernetes.io/region` -|Deprecated -|Deprecated -|Deprecated - -|cgroup v1 -|Deprecated -|Deprecated -|Deprecated -|==== - -[discrete] -[id="ocp-release-note-cli-dep-rem_{context}"] -=== OpenShift CLI (oc) deprecated and removed features - -.OpenShift CLI (oc) deprecated and removed tracker -[cols="4,1,1,1",options="header"] -|==== -|Feature |4.16 |4.17 |4.18 - -|oc-mirror plugin v1 -|General Availability -|General Availability -|Deprecated - -|==== - - -[discrete] -[id="ocp-release-note-operators-dep-rem_{context}"] -=== Operator lifecycle and development deprecated and removed features - -// "Operator lifecycle" refers to OLMv0 and "development" refers to Operator SDK - -.Operator lifecycle and development deprecated and removed tracker -[cols="4,1,1,1",options="header"] -|==== -|Feature |4.16 |4.17 |4.18 - -|Operator SDK -|Deprecated -|Deprecated -|Deprecated - -|Scaffolding tools for Ansible-based Operator projects -|Deprecated -|Deprecated -|Deprecated - -|Scaffolding tools for Helm-based Operator projects -|Deprecated -|Deprecated -|Deprecated - -|Scaffolding tools for Go-based Operator projects -|Deprecated -|Deprecated -|Deprecated - -|Scaffolding tools for Hybrid Helm-based Operator projects -|Deprecated -|Deprecated -|Removed - -|Scaffolding tools for Java-based Operator projects -|Deprecated -|Deprecated -|Removed - -// Do not remove the SQLite database... entry until otherwise directed by the Operator Framework PM -|SQLite database format for Operator catalogs -|Deprecated -|Deprecated -|Deprecated -|==== - -//// -[discrete] -[id="ocp-4-18-hardware-an-driver-dep-rem_{context}"] -=== Specialized hardware and driver enablement deprecated and removed features - -.Specialized hardware and driver enablement deprecated and removed tracker -[cols="4,1,1,1",options="header"] -|==== -|Feature |4.16 |4.17 |4.18 -|==== -//// - -[discrete] -=== Storage deprecated and removed features - -.Storage deprecated and removed tracker -[cols="4,1,1,1",options="header"] -|==== -|Feature |4.16 |4.17 |4.18 - -|AliCloud Disk CSI Driver Operator -|General Availability -|Removed -|Removed - -|Shared Resources CSI Driver Operator -|Technical Preview -|Deprecated -|Removed -|==== - -//// -[discrete] -[id="ocp-4-18-clusters-dep-rem_{context}"] -=== Updating clusters deprecated and removed features - -.Updating clusters deprecated and removed tracker -[cols="4,1,1,1",options="header"] -|==== -|Feature |4.16 |4.17 |4.18 -|==== -//// - -[discrete] -[id="ocp-release-note-web-console-dep-rem_{context}"] -=== Web console deprecated and removed features - -.Web console deprecated and removed tracker -[cols="4,1,1,1",options="header"] -|==== -|Feature |4.16 |4.17 |4.18 - -|Patternfly 4 -|Deprecated -|Deprecated -|Deprecated - -|React Router 5 -|Deprecated -|Deprecated -|Deprecated -|==== - -[discrete] -[id="ocp-release-note-workloads-dep-rem_{context}"] -=== Workloads deprecated and removed features - -.Workloads deprecated and removed tracker -[cols="4,1,1,1",options="header"] -|==== -|Feature |4.16 |4.17 |4.18 - -|`DeploymentConfig` objects -|Deprecated -|Deprecated -|Deprecated -|==== - -//// -[id="ocp-4-18-deprecated-features_{context}"] -=== Deprecated features -//// - -[id="ocp-4-18-removed-features_{context}"] -=== Removed features - -[id="ocp-4-18-removals-storage_{context}"] -==== The Shared Resource CSI Driver is removed -The Shared Resource CSI Driver feature was deprecated in {product-title} 4.17, and is now removed from {product-title} 4.18. This feature is now generally available in Builds for Red Hat OpenShift 1.1. To use this feature, ensure you are using Builds for Red Hat OpenShift 1.1 or later. - -[id="ocp-4-18-selected-bundled-removed_{context}"] -==== The selected bundles feature is removed in oc-mirror v2 - -The selected bundles feature is removed from the oc-mirror v2 Generally Available release. This change prevents issues where specifying the wrong Operator bundle version could break the Operators in a cluster. (link:https://issues.redhat.com/browse/OCPBUGS-49419[*OCPBUGS-49419*]) - -[id="ocp-4-18-future-deprecation_{context}"] -=== Notice of future deprecation - -[id="ocp-4-18-future-removals"] -==== Future Kubernetes API removals - -// Kubernetes 1.32 isn't released yet, but it will be by the time OCP 4.18 comes out -The next minor release of {product-title} is expected to use Kubernetes 1.32. Kubernetes 1.32 removed a deprecated API. - -See the link:https://kubernetes.io/docs/reference/using-api/deprecation-guide/#v1-32[Deprecated API Migration Guide] in the upstream Kubernetes documentation for the list of planned Kubernetes API removals. - -See link:https://access.redhat.com/articles/6955985[Navigating Kubernetes API deprecations and removals] for information about how to check your cluster for Kubernetes APIs that are planned for removal. - -[id="ocp-4-18-bug-fixes_{context}"] -== Bug fixes -//Bug fix work for TELCODOCS-750 -//Bare Metal Hardware Provisioning / OS Image Provider -//Bare Metal Hardware Provisioning / baremetal-operator -//Bare Metal Hardware Provisioning / cluster-baremetal-operator -//Bare Metal Hardware Provisioning / ironic" -//CNF Platform Validation -//Cloud Native Events / Cloud Event Proxy -//Cloud Native Events / Cloud Native Events -//Cloud Native Events / Hardware Event Proxy -//Cloud Native Events -//Driver Toolkit -//Installer / Assisted installer -//Installer / OpenShift on Bare Metal IPI -//Networking / ptp -//Node Feature Discovery Operator -//Performance Addon Operator -//Telco Edge / HW Event Operator -//Telco Edge / RAN -//Telco Edge / TALO -//Telco Edge / ZTP - -[discrete] -[id="ocp-release-note-api-auth-bug-fixes_{context}"] -==== API Server and Authentication - -* Previously, API validation did not prevent an authorized client from decreasing the current revision of a static pod operand, such as kube-apiserver, or prevent the operand from progressing concurrently on two nodes. With this release, requests that attempt to do either are now rejected. (link:https://issues.redhat.com/browse/OCPBUGS-48502[*OCPBUGS-48502*]) - -* Previously, the oauth-server would crash when configuring an oath identity provider (IDP) with a callback path that contained spaces. With this release, the issue is resolved.(link:https://issues.redhat.com/browse/OCPBUGS-44099[*OCPBUGS-44099*]) - -[discrete] -[id="ocp-release-note-bare-metal-hardware-bug-fixes_{context}"] -==== Bare Metal Hardware Provisioning - -* Previously, the Bare Metal Operator (BMO) created the `HostFirmwareComponents` custom resource for all Bare Metal hosts (BMH), including ones based on the intelligent platform management interface (IPMI), which did not support it. With this release, `HostFirmwareComponents` custom resources are only created for BMH that support it. (link:https://issues.redhat.com/browse/OCPBUGS-49699[*OCPBUGS-49699*]) - -* Previously, in bare-metal configurations where the provisioning network is disabled but the `bootstrapProvisioningIP` field is set, the bare-metal provisioning components might fail to start. These failures occur when the provisioning process reconfigures the external network interface on the bootstrap VM during the process of pulling container images. With this release, dependencies were added to ensure that interface reconfiguration only occurs when the network is idle, preventing conflicts with other processes. As a result, the bare-metal provisioning components now start reliably, even when the `bootstrapProvisioningIP` field is set and the provisioning network is disabled. (link:https://issues.redhat.com/browse/OCPBUGS-36869[*OCPBUGS-36869*]) - -* Previously, Ironic inspection failed if special or invalid characters existed in the serial number of a block device. This occurred because the `lsblk` command failed to escape the characters. With this release, the command now escapes the characters so this issue no longer persists. (link:https://issues.redhat.com/browse/OCPBUGS-36492[*OCPBUGS-36492*]) - -* Previously, a check for unexpected IP addresses on the provisioning interface during metal3 pod startup was triggered. This issue occurred because of the presence of an IP addresses supplied by DHCP from a previous version of the pod that existed on another node. With this release, a pod startup check now looks only for IP addresses that exist outside the provisioning network subnet, so that a metal3 pod starts immediately, even when if the node has moved to a different node. (link:https://issues.redhat.com/browse/OCPBUGS-38507[*OCPBUGS-38507*]) - -* Previously, enabling a provisioning network by editing the cluster-wide `Provisioning` resource was only possible on installer-provisioned infrastructure clusters with platform type `baremetal`. On bare metal, {sno}, and user-provisioned infrastructure clusters, editing this resource resulted in a validation error. With this release, the excessive validation check has been removed, and enabling a provisioning network is now possible on bare-metal clusters with platform type `none`. As with installer-provisioned infrastructure clusters, users are responsible for making sure that all networking requirements are met for this operation. (link:https://issues.redhat.com/browse/OCPBUGS-43371[*OCPBUGS-43371*]) - -//// -[discrete] -[id="ocp-release-note-builds-bug-fixes_{context}"] -==== Builds -//// - -[discrete] -[id="ocp-release-note-cloud-compute-bug-fixes_{context}"] -==== Cloud Compute - -* Previously, the availability set fault domain count was hardcoded to `2`. This value works in most regions in {azure-full} because the fault domain counts are typically at least `2`, but failed in the `centraluseuap` and `eastusstg` regions. With this release, the availability set fault domain count in a region is set dynamically. (link:https://issues.redhat.com/browse/OCPBUGS-48659[*OCPBUGS-48659*]) - -* Previously, an updated zone API error message from {gcp-first} with increased granularity caused the machine controller to mistakenly mark the machine as valid with a temporary cloud error instead of recognizing it as an invalid machine configuration error. This prevented the invalid machine from transitioning to a `failed` state. With this update, the machine controller handles the new error messages correctly, and machines with an invalid zone or project ID now transition properly to a failed state. (link:https://issues.redhat.com/browse/OCPBUGS-47790[*OCPBUGS-47790*]) - -* Previously, the certificate signing request (CSR) approver included certificates from other systems within its calculations for whether it was overwhelmed and should stop approving certificates. In larger clusters, with other subsystems using CSRs, the CSR approver counted unrelated unapproved CSRs towards its total and prevented further approvals. With this release, the CSR approver only includes CSRs that it can approve, by using the `signerName` property as a filter. As a result, the CSR approver only prevents new approvals when there are a large number of unapproved CSRs for the relevant `signerName` values. (link:https://issues.redhat.com/browse/OCPBUGS-46425[*OCPBUGS-46425*]) - -* Previously, some cluster autoscaler metrics were not initialized, and therefore were not available. With this release, these metrics are initialized and available. (link:https://issues.redhat.com/browse/OCPBUGS-46416[*OCPBUGS-46416*]) - -* Previously, if an informer watch stream missed an event because of a temporary disconnection, the informer might return a special signal type after it reconnected to the network, especially when the informer recognizes that an EndpointSlice object was deleted during the temporary disconnection. The returned signal type indicated that the state of the event has stalled and that the object was deleted. The returned signal type was not accurate and might have caused confusion for a {product-title} user. With this release, the Cloud Controller Manager (CCM) handles unexpected signal types so that {product-title} users do not receive confusing information from returned types. (link:https://issues.redhat.com/browse/OCPBUGS-45972[*OCPBUGS-45972*]) - -* Previously, when the {aws-short} DHCP option set was configured to use a custom domain name that contains a trailing period (`.`), {product-title} installation failed. With this release, the logic that extracts the hostname of EC2 instances and turns them into Kubelet node names is updated to trim trailing periods so that the resulting Kubernetes object name is valid. Trailing periods in the DHCP option set no longer cause installation to fail. (link:https://issues.redhat.com/browse/OCPBUGS-45889[*OCPBUGS-45889*]) - -* Previously, installation of an {aws-short} cluster failed in certain environments on existing subnets when the `publicIp` parameter for the `MachineSet` object was explicity set to `false`. With this release, a configuration value set for `publicIp` no longer causes issues when the installation program provisions machines for your {aws-short} cluster in certain environment. (link:https://issues.redhat.com/browse/OCPBUGS-45130[*OCPBUGS-45130*]) - -* Previously, enabling a provisioning network by editing the cluster-wide Provisioning resource was only possible on clusters with platform type `baremetal`, such as ones created by the IPI installer. On baremetal SNO and UPI clusters that would result in a validation error. The excessive validation has been removed, and enabling a provisioning network is now possible on baremetal clusters with platform type `none`. As with IPI, users are responsible for making sure that all networking requirements are met for this operation. (link:https://issues.redhat.com/browse/OCPBUGS-43371[*OCPBUGS-43371*]) - -* Previously, the installation program populated the `network.devices`, `template`, and `workspace` fields in the `spec.template.spec.providerSpec.value` section of the {vmw-full} control plane machine set custom resource (CR). These fields should be set in the {vmw-short} failure domain, and the installation program populating them caused unintended behaviors. Updating these fields did not trigger an update to the control plane machines, and these fields were cleared when the control plane machine set was deleted. With this release, the installation program is updated to no longer populate values that are included in the failure domain configuration. If these values are not defined in a failure domain configuration, for instance on a cluster that is updated to {product-title} {product-version} from an earlier version, the values defined by the installation program are used. (link:https://issues.redhat.com/browse/OCPBUGS-42660[*OCPBUGS-42660*]) - -* Previously, the cluster autoscaler would occasionally leave a node with a `PreferNoSchedule` taint during deletion. With this release, the maximum bulk deletion limit is disabled so that nodes with this taint no longer remain after deletion. (link:https://issues.redhat.com/browse/OCPBUGS-42132[*OCPBUGS-42132*]) - -* Previously, the Cloud Controller Manager (CCM) liveness probe used on {ibm-cloud-title} cluster installations could not use loopback and this caused the probe to continuously restart. With this release, the probe can use loopback so that this issue not longer occurs. (link:https://issues.redhat.com/browse/OCPBUGS-41936[*OCPBUGS-41936*]) - -* Previously, the approval mechanism for certificate signing requests (CSRs) failed because the node name and internal DNS entry for a CSR did not match in terms of character case differences. With this release, an update to the approval mechanism for CSRs skips case-sensitive checks so that a CSR with a matching node name and internal DNS entry does not fail the check because of character case differences. (link:https://issues.redhat.com/browse/OCPBUGS-36871[*OCPBUGS-36871*]) - -* Previously, the cloud node manager had permission to update any node object when it needed to update only the node on which it was running. With this release, restrictions have been put in place to prevent the node manager from one node updating the node object of another node.(link:https://issues.redhat.com/browse/OCPBUGS-22190[*OCPBUGS-22190*]) - -[discrete] -[id="ocp-release-note-cloud-cred-operator-bug-fixes_{context}"] -==== Cloud Credential Operator - -* Previously, the `aws-sdk-go-v2` software development kit (SDK) failed to authenticate an `AssumeRoleWithWebIdentity` API operation on an {aws-first} {sts-first} cluster. With this release, `pod-identity-webhook` now includes a default region so that this issue no longer persists. (link:https://issues.redhat.com/browse/OCPBUGS-45937[*OCPBUGS-45937*]) - -* Previously, secrets in the cluster were fetched in a single call. When there were a large number of secrets, this caused the API to time out. With this release, the Cloud Credential Operator fetches secrets in batches limited to 100 secrets. This change prevents timeouts when there are large number of secrets in the cluster. (link:https://issues.redhat.com/browse/OCPBUGS-39531[*OCPBUGS-39531*]) - -[discrete] -[id="ocp-release-note-cluster-override-admin-operator-bug-fixes_{context}"] -==== Cluster Resource Override Admission Operator - -* Previously, if you specified the `forceSelinuxRelabel` field in a `ClusterResourceOverride` custom resource (CR), and then modified it afterwards, the change would not be reflected in the `clusterresourceoverride-configuration` config map, which is used to apply the SELinux re-labeling workaround feature. With this update, the Cluster Resource Override Operator can track the change to the `forceSelinuxRelabel` feature in order to reconcile the config map object. As a result, the config map object is correctly updated when you change the `ClusterResourceOverride` CR field. (link:https://issues.redhat.com/browse/OCPBUGS-48692[*OCPBUGS-48692*]) - -[discrete] -[id="ocp-release-note-cluster-version-operator-bug-fixes_{context}"] -==== Cluster Version Operator - -* Previously, a custom security context constraint (SCC) impacted any pod that was generated by the Cluster Version Operator from receiving a cluster version upgrade. With this release, {product-title} now sets a default SCC to each pod, so that any custom SCC created does not impact a pod. (link:https://issues.redhat.com/browse/OCPBUGS-46410[*OCPBUGS-46410*]) - -* Previously, the Cluster Version Operator (CVO) did not filter internal errors that were propogated to the `ClusterVersion Failing` condition message. As a result, errors that did not negatively impact the update were shown in the `ClusterVersion Failing` condition message. With this release, the errors that are propogated to the `ClusterVersion Failing` condition message are filtered. (link:https://issues.redhat.com/browse/OCPBUGS-15200[*OCPBUGS-15200*]) - -[discrete] -[id="ocp-release-note-dev-console-bug-fixes_{context}"] -==== Developer Console - -* Previously, if a `PipelineRun` was using a resolver, rerunning that `PipelineRun` resulted in an error. With this fix, a user can rerun `PipelineRun` if it is using resolver. (link:https://issues.redhat.com/browse/OCPBUGS-45228[*OCPBUGS-45228*]) - -* Previously, on the if you edited a deployment config in *Form view*, the `ImagePullSecrets` values were duplicated. With this update, editing the form does not add duplicate entries. (link:https://issues.redhat.com/browse/OCPBUGS-45227[*OCPBUGS-45227*]) - -* Previously, when you searched on the `OperatorHub` or another catalog, you would experience periods of latency between each key press. With this update, the input on the catalog search bars are debounced. (link:https://issues.redhat.com/browse/OCPBUGS-43799[*OCPBUGS-43799*]) - -* Previously, no option existed to close the *Getting started resources* section in the *Administrator* perspective. With this change, user can close the *Getting started resources* section. (link:https://issues.redhat.com/browse/OCPBUGS-38860[*OCPBUGS-38860*]) - -* Previously, when cronjobs were created, the creation of pods happens too quickly, causing the component that fetches new pods off the cronjob to fail. With this update, a 3 second delay was added before starting to fetch the pods of the cronjob. (link:https://issues.redhat.com/browse/OCPBUGS-37584[*OCPBUGS-37584*]) - -* Previously, resources created when a new user is created were not removed automatically when the user was deleted. This caused clutter on the cluster with configuration maps, roles, and role-bindings. With this update, `ownerRefs` was added to the resources, so they are cleared once the user is deleted and the cluster no longer clutters with users. (link:https://issues.redhat.com/browse/OCPBUGS-37560[*OCPBUGS-37560*]) - -* Previously, when importing a Git repository using the serverless import strategy, the environment variables from the `func.yaml` were not automatically loaded into the form. With this update, the environment variables are now loaded upon import. (link:https://issues.redhat.com/browse/OCPBUGS-34764[*OCPBUGS-34764*]) - -* Previously, users would erroneously see an option to import a repository using the pipeline build strategy when the devfile import strategy was selected; however, this was not possible. With this update, the pipeline strategy has been removed when the devfile import strategy is selected. (link:https://issues.redhat.com/browse/OCPBUGS-32526[*OCPBUGS-32526*]) - -* Previously, when using a custom template, you could not enter multi-line parameters, such as private keys. With this release, you can switch between single-line and multi-line modes so you can fill out template fields with multi-line inputs. (link:https://issues.redhat.com/browse/OCPBUGS-23080[*OCPBUGS-23080*]) - -//// -[discrete] -[id="ocp-release-note-driver-toolkit-bug-fixes_{context}"] -==== Driver ToolKit (DTK) - -[discrete] -[id="ocp-release-note-cloud-etcd-operator-bug-fixes_{context}"] -==== etcd Cluster Operator -//// - -[discrete] -[id="ocp-release-note-image-registry-bug-fixes_{context}"] -==== Image Registry - -* Previously, you could not install a cluster on {aws-short} in the `ap-southeast-5` region or other regions because the {product-title} internal registry did not support these regions. With this release, the internal registry is updated to include the following regions so that this issue no longer occurs: -+ -** `ap-southeast-5` -** `ap-southeast-7` -** `ca-west-1` -** `il-central-1` -** `mx-central-1` -+ -(link:https://issues.redhat.com/browse/OCPBUGS-49693[*OCPBUGS-49693*]) - -* Previously, when the Image Registry Operator was configured with `networkAccess: Internal` in {azure-first}, it would not be possible to successfully set `managementState` to `Removed` in the Operator configuration. This occurred because of an authorization error when the Operator tried to delete the storage container. With this update, the Image Registry Operator continues with the deletion of the storage account, which automatically deletes the storage container, resulting in a successful change into the `Removed` state. (link:https://issues.redhat.com/browse/OCPBUGS-42732[*OCPBUGS-42732*]) - -* Previously, when configuring the image registry to use an {azure-first} storage account located in a resource group other than the cluster's resource group, the Image Registry Operator would become degraded due to a validation error. This update changes the Image Registry Operator to allow for authentication by only storage account key without validating for other authentication requirements. (link:https://issues.redhat.com/browse/OCPBUGS-42514[*OCPBUGS-42514*]) - -* Previously, installation with the OpenShift installer used the cluster API. Virtual networks created by the cluster API use a different tag template. Consequently, setting `.spec.storage.azure.networkAccess.type: Internal` in the Image Registry Operator's `config.yaml` file resulted in the Image Registry Operator unable to discover the virtual network. With this update, the Image Registry Operator searches for both new and old tag templates, resolving the issue. (link:https://issues.redhat.com/browse/OCPBUGS-42196[*OCPBUGS-42196*]) - -* Previously, the image registry would, in some cases, panic when attempting to purge failed uploads from s3-compatible storage providers. This was caused by the image registry's s3 driver mishandling empty directory paths. With this update, the image registry properly handles empty directory paths, fixing the panic. (link:https://issues.redhat.com/browse/OCPBUGS-39108[*OCPBUGS-39108*]) - -[discrete] -[id="ocp-release-note-installer-bug-fixes_{context}"] -==== Installer - -* Previously, installing a cluster with a Dynamic Host Configuration Protocol (DHCP) network on Nutanix caused a failure. With this release, this issue is resolved. (link:https://issues.redhat.com/browse/OCPBUGS-38118[*OCPBUGS-38118*]) - -* Previously, installing an {aws-short} cluster in either the Commercial Cloud Services (C2S) region or the Secret Commercial Cloud Services (SC2S) region failed because the installation program added unsupported security groups to the load balancer. With this release, the installation program no longer adds unsupported security groups to the load balancer for a cluster that needs to be installed in either the C2S region or SC2S region. (link:https://issues.redhat.com/browse/OCPBUGS-33311[*OCPBUGS-33311*]) -* Previously, when installing a {gcp-first} cluster where instances required that IP forwarding was not set, the installation failed. With this release, IP forwarding is disabled for all {gcp-short} machines and the issue is resolved. (link:https://issues.redhat.com/browse/OCPBUGS-49842[*OCPBUGS-49842*]) - -* Previously, when installing a cluster on {aws-short} in existing subnets, for bring your own virtual private cloud (BYO VPC) in edge zones, the installation program did not tag the subnet edge resource with `kubernetes.io/cluster/:shared`. With this release, all subnets that are used in the `install-config.yaml` file contain the required tags. (link:https://issues.redhat.com/browse/OCPBUGS-49792[*OCPBUGS-49792*]) - -* Previously, a cluster that was created on {aws-first} could fail to deprovision the cluster without the permissions to release the EIP address, `ec2:ReleaseAddress`. This issue occurred when the cluster was created with the minimum permissions in an existing virtual private cloud (VPC), including an unmanaged VPC or bring your own (BYO) VPC, and BYO Public IPv4 Pool address. With this release, the `ec2:ReleaseAddress` permission is exported to the Identity and Access Management (IAM) policy generated during installation. (link:https://issues.redhat.com/browse/OCPBUGS-49735[*OCPBUGS-49735*]) - -* Previously, when installing a cluster on Nutanix, the installation program could fail with a timeout while uploading images to Prism Central. This occurred in some slower Prism Central environments when the Prism API attempted to load the {op-system-first} image. The Prism API call timeout value was 5 minutes. With this release, the Prism API call timeout value is a configurable parameter `platform.nutanix.prismAPICallTimeout` in the `install-config.yaml` file and the default timeout value is 10 minutes. (link:https://issues.redhat.com/browse/OCPBUGS-49148[*OCPBUGS-49148*]) - -* Previously, the `oc adm node-image monitor` command failed because of a temporary API server disconnection and then displayed an error or End of File message. With this release, the installation program ignores a temporary API server disconnection and the monitor command tries to connect to the API server again. (link:https://issues.redhat.com/browse/OCPBUGS-48714[*OCPBUGS-48714*]) - -* Previously, when you deleted backend service resources on {gcp-first}, some resources to be deleted were not found. For example, the associated forwarding rules, health checks, and firewall rules were not deleted. With this release, the installation program tries to find the backend service by name first, then searches for forwarding rules, health checks, and firewall rules before it determines if those results match a backend service. The algorithm for associating resources is reversed and the appropriate resources are deleted. There are no leaked backend service resources and the issue is resolved. When you delete a private cluster, the forwarding rules, backend services, health checks, and firewall rules created by the Ingress Operator are not deleted. (link:https://issues.redhat.com/browse/OCPBUGS-48611[*OCPBUGS-48611*]) - -* Previously, {product-title} was not compliant with PCI-DSS/BAFIN regulations. With this release, the cross-tenant object replication in {azure-first} is unavailable. Consequently, the chance of unauthorized data access is reduced and the strict adherence to data governance policies is ensured. (link:https://issues.redhat.com/browse/OCPBUGS-48118[*OCPBUGS-48118*]) - -* Previously, when you installed {product-title} on {aws-first} and specified an edge machine pool without an instance type, in some instances it caused the edge node to fail. With this release, if you specify an edge machine pool without an instance type you must use the permission `ec2:DescribeInstanceTypeOfferings`. The permission derives the correct instance type available, based on the {aws-short} Local Zones or Wavelength Zones locations used. (link:https://issues.redhat.com/browse/OCPBUGS-47502[*OCPBUGS-47502*]) - -* Previously, when the API server disconnected temporarily, the command `oc adm node-image monitor` reported an end of file (EOF) error. With this release, when the API server disconnects temporarily, the monitor command does not fail. (link:https://issues.redhat.com/browse/OCPBUGS-46391[*OCPBUGS-46391*]) - -* Previously, when you specified the `HostedZoneRole` permission in the `install-config.yaml` file while creating a shared Virtual Private Cloud (VPC), you also had to specify the `sts:AssumeRole` permission. Otherwise, it caused an error. With this release, if you specify the `HostedZoneRole` permission the installation program validates that the `sts:AssumeRole` permission is present. (link:https://issues.redhat.com/browse/OCPBUGS-46046[*OCPBUGS-46046*]) - -* Previously, when the `publicIpv4Pool` configuration parameter was used during installation the permissions `ec2:AllocateAddress` and `ec2:AssociateAddress` were not validated. As a consequence, permission failures could occur during installation. With this release, the required permissions are validated before the cluster is installed and the issue is resolved. (link:https://issues.redhat.com/browse/OCPBUGS-45711[*OCPBUGS-45711*]) - -* Previously, during a disconnected installation, when the `imageContentSources` parameter was configured for more than one mirror for a source, the command to create the agent ISO image could fail, depending on the sequence of the mirror configuration. With this release, multiple mirrors are handled correctly when the agent ISO is created and the issue is resolved. (link:https://issues.redhat.com/browse/OCPBUGS-45630[*OCPBUGS-45630*]) - -* Previously, when installing a cluster using the Cluster API on installer-provisioned infrastructure, the user provided a `machineNetwork` parameter. With this release, the installation program uses a random `machineNetwork` parameter. (link:https://issues.redhat.com/browse/OCPBUGS-45485[*OCPBUGS-45485*]) - -* Previously, during an installation on {aws-first}, the installation program used the wrong load balancer when searching for the `hostedZone` ID, which caused an error. With the release, the correct load balancer is used and the issue is resolved. (link:https://issues.redhat.com/browse/OCPBUGS-45301[*OCPBUGS-45301*]) - -* Previously, endpoint overrides in {ibm-power-server-title} were not conditional. As a consequence, endpoint overrides were created incorrectly and caused failures in Virtual Private Environments (VPE). With this release, endpoint overrides are conditional only for disconnected installations. (link:https://issues.redhat.com/browse/OCPBUGS-44922[*OCPBUGS-44922*]) - -* Previously, during a shared Virtual Private Cloud (VPC) installation, the installation program added the records to a private DNS zone created by the installation program instead of adding the records to the cluster's private DNS zone. As a consequence, the installation failed. With this release, the installation program searches for an existing private DNS zone and, if found, pairs that zone with the network that is supplied by the `install-config.yaml` file and the issue is resolved. (link:https://issues.redhat.com/browse/OCPBUGS-44641[*OCPBUGS-44641*]) (link:https://issues.redhat.com/browse/OCPBUGS-44641[*OCPBUGS-44641*]) - -* Previously, the `oc adm drain --delete-local-data` command was not supported in the 4.18 `oc` CLI tool. With this release, the command has been updated to `oc adm drain --delete-emptydir-data`. (link:https://issues.redhat.com/browse/OCPBUGS-44318[*OCPBUGS-44318*]) - -* Previously, US East (`wdc04`), US South (`dal13`), Sydney (`syd05`), and Toronto (`tor01`) regions were not supported for {ibm-power-server-title}. With this release, these regions, which include `PowerEdgeRouter` (PER) capabilities, are supported for {ibm-power-server-title}. -(link:https://issues.redhat.com/browse/OCPBUGS-44312[*OCPBUGS-44312*]) - -* Previously, during a {gcp-first} installation, when the installation program was creating filters with large numbers of returned data, for example for subnets, it exceeded the quota for the maximum number times that a resource can be filtered in a specific period. With this release, all relevant filtering is moved to the client so that the filter quotas are not exceeded and the issue is resolved. (link:https://issues.redhat.com/browse/OCPBUGS-44193[*OCPBUGS-44193*]) - -* Previously, during an {aws-first} installation, the installation program validated all the tags in the `install-config.yaml` file only when you set `propogateTags` to true. With this release, the installation program validates all the tags in the `install-config.yaml` file. (link:https://issues.redhat.com/browse/OCPBUGS-44171[*OCPBUGS-44171*]) - -* Previously, if the `RendezvousIP` value matched a substring in the `next-hop-address` field of a compute node configuration, it reported a validation error. The `RendezvousIP` value must match a control plane host address only. With this release, a substring comparison for `RendezvousIP` value is used against a control plane host address only, so that the error no longer exists. (link:https://issues.redhat.com/browse/OCPBUGS-44167[*OCPBUGS-44167*]) - -* Previously, when you deleted a cluster in {ibm-power-server-title}, the Transit Gateway connections were cleaned up. With this release, if the `tgName` parameter is set, {rh-openstack-first} does not clean up the Transit Gateway connection when you delete a cluster. (link:https://issues.redhat.com/browse/OCPBUGS-44162[*OCPBUGS-44162*]) - -* Previously, when installing a cluster on an {ibm-title} platform and adding an existing VPC to the cluster, the {cap-ibm-first} would not add ports 443, 5000, and 6443 to the security group of the VPC. This situation prevented the VPC from being added to the cluster. With this release, a fix ensures that the {cap-ibm-first} adds the ports to the security group of the VPC so that the VPC gets added to your cluster. (link:https://issues.redhat.com/browse/OCPBUGS-44068[*OCPBUGS-44068*]) - -* Previously, the {cap-ibm-first} module was very verbose. With this release, the verbosity of the module is reduced, and this will affect the output of the `.openshift_install.log` file. (link:https://issues.redhat.com/browse/OCPBUGS-44022[*OCPBUGS-44022*]) - -* Previously, when you deployed a cluster on a {ibm-power-server-title} zone, the load balancers were slow to create. As a consequence, the cluster failed. With this release, the {cap-ibm-first} no longer has to wait until all load balancers are ready and the issue is resolved. (link:https://issues.redhat.com/browse/OCPBUGS-43923[*OCPBUGS-43923*]) - -* Previously, for the Agent-based Installer, all host validation status logs referred to the name of the first registered host. As a consequence, when a host validation failed, it was not possible to determine the problem host. With this release, the correct host is identified in each log message and now the host validation logs correctly show the host to which they correspond, and the issue is resolved. (link:https://issues.redhat.com/browse/OCPBUGS-43768[*OCPBUGS-43768*]) - -* Previously, when you used the `oc adm node-image create` command to generate the image while running the Agent-based Installer and the step fails, the accompanying error message did not show the container log. The `oc adm node-image create` command uses a container to generate the image. When the image generation step fails, the basic error message does not show the underlying issue that caused the image generation failure. With this release, to help troubleshooting, the `oc adm node-image create` command now shows the container log, so the underlying issue is displayed. (link:https://issues.redhat.com/browse/OCPBUGS-43757[*OCPBUGS-43757*]) - -* Previously, the Agent-based Installer failed to parse the `cloud_controller_manager` parameter in the `install-config.yaml` configuration file. This resulted in the Assisted Service API failing because it received an empty string, and this in turn caused the installation of the cluster to fail on {oci-first}. With this release, an update to the parsing logic ensures that the Agent-based Installer correctly interprets the `cloud_controller_manager` parameter so that the Assisted Service API receives the correct string value. As a result, the Agent-based Installer can now installer a cluster on {oci}. (link:https://issues.redhat.com/browse/OCPBUGS-43674[*OCPBUGS-43674*]) - -* Previously, an update to {azure-short} SDK for Go removed the `SendCertificateChain` option and this changed the behavior of sending certificates. As a consequence, the full certificate chain was not sent. With this release, the option to send a full certification chain is available and the issue is resolved. (link:https://issues.redhat.com/browse/OCPBUGS-43567[*OCPBUGS-43567*]) - -* Previously, when installing a cluster on {gcp-first} using the Cluster API implementation, the installation program did not distinguish between internal and external load balancers while creating firewall rules. As a consequence, the firewall rule for internal load balancers was open to all IP address sources, that is, `0.0.0.0/0`. With this release, the {cap-gcp-short} is updated to restrict firewall rules to the machine CIDR when using an internal load balancer. The firewall rule for internal load balancers is correctly limited to machine networks, that is, nodes in the cluster and the issue is resolved. (link:https://issues.redhat.com/browse/OCPBUGS-43520[*OCPBUGS-43520*]) - -* Previously, when installing a cluster on {ibm-power-server-title}, the required security group rules were not created. With this release, the missing security group rules for installation are identified and created and the issue is resolved. (link:https://issues.redhat.com/browse/OCPBUGS-43518[*OCPBUGS-43518*]) - -* Previously, when you tried to add a compute node with the `oc adm node-image` command by using an instance that was previously created with {rh-openstack-first}, the operation failed. With this release, the issue is resolved by correctly setting the user-managed networking configuration. (link:https://issues.redhat.com/browse/OCPBUGS-43513[*OCPBUGS-43513*]) - -* Previously, when destroying a cluster on {gcp-first}, a forwarding rule incorrectly blocked the installation program. As a consequence, the destroy process failed to complete. With this release, the issue is resolved by the installation program setting its state correctly and marking all destroyed resources as deleted. (link:https://issues.redhat.com/browse/OCPBUGS-42789[*OCPBUGS-42789*]) - -* Previously, when configuring the Agent-Based Installer installation in a disconnected environment with more than one mirror for the same source, the installation might fail. This occurred because one of the mirrors was not checked. With this release, all mirrors are used when multiple mirrors are defined for the same source and the issue is resolved. (link:https://issues.redhat.com/browse/OCPBUGS-42705[*OCPBUGS-42705*]) - -* Previously, you could not change the `AdditionalTrustBundlePolicy` parameter in the `install-config.yaml` file for the Agent-based Installer. The parameter was always set to `ProxyOnly`. With this release, you can set `AdditionalTrustBundlePolicy` to other values, for example, `Always`. By default, the parameter is set to `ProxyOnly`. (link:https://issues.redhat.com/browse/OCPBUGS-42670[*OCPBUGS-42670*]) - -* Previously, when you installed a cluster and tried to add a compute node with the `oc adm node-image` command, it failed because the date, time, or both might have been inaccurate. With this release, the issue is resolved by applying the same Network Time Protocol (NTP) configuration in the target cluster `MachineConfig` chrony resource to the node ephemeral live environment (link:https://issues.redhat.com/browse/OCPBUGS-42544[*OCPBUGS-42544*]) - -* Previously, during installation the name of the artifact that the `oc adm node-image create` command generated did not include `` in its file name. As a consequence, the file name was inconsistent with other generated ISOs. With this release, a patch fixes the name of the artifact that is generated by the `oc adm node-image create` command by also including the referenced architecture as part of the file name and the issue is resolved. (link:https://issues.redhat.com/browse/OCPBUGS-42528[*OCPBUGS-42528*]) - -* Previously, the Agent-based Installer set the `assisted-service` object to a debug logging mode. Unintentionally, the `pprof` module in the `assisted-service` object, which uses port `6060`, was then turned on. As a consequence, there was a port conflict and the Cloud Credential Operator (CCO) did not run. When requested by the {vmw-first} Cloud Controller Manager (CCM), {vmw-short} secrets were not generated, the {rh-openstack} CCM failed to initialize the nodes, and the cluster installation was blocked. With this release, the `pprof` module in the `assisted-service` object does not run when invoked by the Agent-based Installer. As a result, the CCO runs correctly and cluster installations on {vmw-short} that use the Agent-based Installer succeed. (link:https://issues.redhat.com/browse/OCPBUGS-42525[*OCPBUGS-42525*]) - -* Previously, when a compute node was trying to join a cluster the rendezvous node rebooted before the process completed. As the compute node could not communicate as expected with the rendezvous node, the installation was not successful. With this release, a patch is applied that fixes the racing condition that caused the rendezvous node to reboot prematurely and the issue is resolved. (link:https://issues.redhat.com/browse/OCPBUGS-41811[*OCPBUGS-41811*]) - -* Previously, when using the {ai-full}, selecting a multi-architecture image for `s390x` CPU architecture on {hybrid-console} could cause the installation to fail. The installation program reported an error that the new cluster was not created because the skip MCO reboot was not compatible with `s390x` CPU architecture. With this release, the issue is resolved. (link:https://issues.redhat.com/browse/OCPBUGS-41716[*OCPBUGS-41716*]) - -* Previously, a coding issue caused the Ansible script on {rh-openstack} user-provisioned infrastructure installation to fail during the provisioning of compact clusters. This occurred when IPv6 was enabled for a three-node cluster. With this release, the issue is resolved and you can provision compact three-node clusters. (link:https://issues.redhat.com/browse/OCPBUGS-41538[*OCPBUGS-41538*]) - -* Previously, a coding issue caused the Ansible script on {rh-openstack} user-provisioned installation infrastructure to fail during the provisioning of compact clusters. This occurred when IPv6 was enabled for a three-node cluster. With this release, the issue is resolved and you can provision compact three-node clusters -on {rh-openstack} for user-provisioned installation infrastructure. (link:https://issues.redhat.com/browse/OCPBUGS-39402[*OCPBUGS-39402*]) - -* Previously, the order of an Ansible Playbook was modified to run before the `metadata.json` file was created, which caused issues with older versions of Ansible. With this release, the playbook is more tolerant of missing files to accommodate older versions of Ansible and the issue is resolved. (link:https://issues.redhat.com/browse/OCPBUGS-39285[*OCPBUGS-39285*]) - -* Previously, when you installed a cluster there were issues using a compute node because the date, time, or both might have been inaccurate. With this release, a patch is applied to the live ISO time synchronization. The patch configures the `/etc/chrony.conf` file with the list of the additional Network Time Protocol (NTP) servers that the user provides in the `agent-config.yaml` file, so that you can use a compute node without experiencing a cluster installation issue. (link:https://issues.redhat.com/browse/OCPBUGS-39231[*OCPBUGS-39231*]) - -* Previously, when installing a cluster on bare metal using installer-provisioned infrastructure, the installation could time out if the network to the bootstrap virtual machine is slow. With this update, the timeout duration has been increased to cover a wider range of network performance scenarios. (link:https://issues.redhat.com/browse/OCPBUGS-39081[*OCPBUGS-39081*]) - -* Previously, the `oc adm node-image create` command failed when run against a cluster in a restricted environment with a proxy because the command ignored the cluster-wide proxy setting. With this release, when the command is run it includes the cluster proxy resource settings, if available, to ensure the command is run successfully and the issue is resolved. -(link:https://issues.redhat.com/browse/OCPBUGS-38990[*OCPBUGS-38990*]) - -* Previously, when installing a cluster on {gcp-first} into a shared Virtual Private Cloud (VPC) with a bring your own (BYO) hosted zone, the installation could fail due to an error creating the private managed zone. With this release, a fix ensures that where there is a preexisting private managed zone the installation program skips creating a new one and the issue is resolved. (link:https://issues.redhat.com/browse/OCPBUGS-38966[*OCPBUGS-38966*]) -// --page 5 -* Previously, an installer-provisioned installation on {vmw-first} to run {product-title} 4.16 in a disconnected environment failed when the template could not be downloaded. With this release, the template is downloaded correctly and the issue is resolved. (link:https://issues.redhat.com/browse/OCPBUGS-38918[*OCPBUGS-38918*]) - -* Previously, during installation the `oc adm node-image create` command used the `kube-system/cluster-config-v1` resource to determine the platform type. With this release, the installation program uses the infrastructure resource, which provides more accurate information about the platform type. (link:https://issues.redhat.com/browse/OCPBUGS-38802[*OCPBUGS-38802*]) - -* Previously, a rare condition on {vmw-first} Cluster API machines caused the vCenter session management to time out unexpectedly. With this release, the Keep Alive support is disabled in the current and later versions of {cap-vsphere-short}, and the issue is resolved. (link:https://issues.redhat.com/browse/OCPBUGS-38657[*OCPBUGS-38657*]) - -* Previously, when a folder was undefined and the data center was located in a data center folder, a wrong folder structure was created starting from the root of the vCenter server. By using the `Govmomi DatacenterFolders.VmFolder`, it used the wrong path. With this release, the folder structure uses the data center inventory path and joins it with the virtual machine (VM) and cluster ID value, and the issue is resolved. (link:https://issues.redhat.com/browse/OCPBUGS-38599[*OCPBUGS-38599*]) - -* Previously, the installation program on {gcp-first} filtered addresses to find and delete internal addresses only. The addition of {cap-gcp-first} provisioned resources included changes to address resources. With this release, {cap-gcp-short} creates external addresses and these must be included in a cluster cleanup operation. (link:https://issues.redhat.com/browse/OCPBUGS-38571[*OCPBUGS-38571*]) - -* Previously, if you specified an unsupported architecture in the `install-config.yaml` file the installation program would fail with a `connection refused` message. With this update, the installation program correctly validates that the specified cluster architecture is compatible with {product-title}, leading to successful installations. (link:https://issues.redhat.com/browse/OCPBUGS-38479[*OCPBUGS-38479*]) - -* Previously, when you used the Agent-based Installer to install a cluster, `assisted-installer-controller` timed out or exited the installation process depending on whether `assisted-service` was unavailable on the rendezvous host. This situation caused the cluster installation to fail during CSR approval checks. With this release, an update to `assisted-installer-controller` ensures that the controller does not timeout or exit if `assisted-service` is unavailable. The CSR approval check now works as expected. (link:https://issues.redhat.com/browse/OCPBUGS-38466[*OCPBUGS-38466*]) - -* Previously, installing a cluster with a Dynamic Host Configuration Protocol (DHCP) network on Nutanix caused a failure. With this release, this issue is resolved. (link:https://issues.redhat.com/browse/OCPBUGS-38118[*OCPBUGS-388118*]) - -* Previously, when the {vmw-first} vCenter cluster contained an ESXi host that did not have a standard port group defined and the installation program tried to select that host to import the OVA, the import failed and the error `Invalid Configuration for device 0` was reported. With this release, the installation program verifies whether a standard port group for an ESXi host is defined and, if not, continues until it locates an ESXi host with a defined standard port group, or reports an error message if it fails to locate one, resolving the issue. (link:https://issues.redhat.com/browse/OCPBUGS-37945[*OCPBUGS-37945*]) - -* Previously, due to an EFI Secure Boot failure in the SCOS, when the FCOS pivoted to the SCOS the virtual machine (VM) failed to boot. With this release, the Secure Boot is disabled only when the Secure Boot is enabled in the `coreos.ovf ` configuration file, and the issue is resolved (link:https://issues.redhat.com/browse/OCPBUGS-37736[*OCPBUGS-37736*]) - -* Previously, when deprecated and supported fields were used with the installation program on {vmw-first} a validation error message was reported. With this release, warning messages are added specifying that using deprecated and supported fields are not recommended with the installation program on {vmw-first}. (link:https://issues.redhat.com/browse/OCPBUGS-37628[*OCPBUGS-37628*]) - -* Previously, if you tried to install a second cluster using existing Azure Virtual Networks (VNet) on {azure-first}, the installation failed. Where the front end IP address of the API server load balancer was not specified, the Cluster API fixed the address to `10.0.0.100`. As this IP address was already taken by the first cluster, this resulted in the second load balancer failing to install. With this release, a dynamic IP address checks whether the default IP address is available. If it is unavailable, the dynamic IP selects the next available address and you can install the second cluster successfully with a different load balancer IP. (link:https://issues.redhat.com/browse/OCPBUGS-37442[*OCPBUGS-37442*]) - -* Previously, the installation program attempted to download the OVA on {vmw-first} whether the template field was defined or not. With this update, the issue is resolved. The installation program verifies if the template field is defined. If the template field is not defined, the OVA is downloaded. If the template field is defined, the OVA is not downloaded. (link:https://issues.redhat.com/browse/OCPBUGS-36494[*OCPBUGS-36494*]) - -* Previously, when installing a cluster on {ibm-cloud-title} the installation program checked the first group of subnets, that is 50, only when searching for subnet details by name. With this release, pagination support is provided to search all subnets. (link:https://issues.redhat.com/browse/OCPBUGS-36236[*OCPBUGS-36236*]) - -* Previously, when installing {cap-gcp-first} into a shared Virtual Private Cloud (VPC) without the required permission `compute.firewalls.create` the installation failed because no firewall rules were created. With this release, a fix ensures that a rule to create the firewall is skipped during installation and the issue is resolved. (link:https://issues.redhat.com/browse/OCPBUGS-35262[*OCPBUGS-35262*]) - -* Previously, for the Agent-Based installer, the networking layout defined through nmstate might result in a configuration error if all hosts do not have an entry in the interfaces section that matches an entry in the `networkConfig` section. However, if the entry in the `networkConfig` section uses a physical interface name then the entry in the interfaces section is not required. -+ -This fix ensures that the configuration will not result in an error if an entry in the `networkConfig` section has a physical interface name and does not have a corresponding entry in the interfaces table. (link:https://issues.redhat.com/browse/OCPBUGS-34849[*OCPBUGS-34849*]) - -* Previously, the container tools module was enabled by default on the {op-system-base} node. With this release, the container-tools module is disabled to install the correct package between conflicting repositories. (link:https://issues.redhat.com/browse/OCPBUGS-34844[*OCPBUGS-34844*]) - -[discrete] -[id="ocp-release-note-insights-operator-bug-fixes_{context}"] -==== Insights Operator - -* Previously, during entitled builds on a Red{nbsp}Hat {product-title} cluster running on {ibm-z-title} hardware, repositories were not enabled. This issue has been resolved. You can now enable repositories during entitled builds on a Red{nbsp}Hat {product-title} cluster running on {ibm-z-title} hardware. (link:https://issues.redhat.com/browse/OCPBUGS-39108[*OCPBUGS-32233*]) - -//// -[discrete] -[id="ocp-release-note-kube-controller-bug-fixes_{context}"] -==== Kubernetes Controller Manager - -[discrete] -[id="ocp-release-note-kube-scheduler-bug-fixes_{context}"] -==== Kubernetes Scheduler -//// - -[discrete] -[id="ocp-release-note-machine-config-operator-bug-fixes_{context}"] -==== Machine Config Operator - -* Previously, {op-system-base-full} CoreOS templates that were shipped by the Machine Config Operator (MCO) caused node scaling to fail on {rh-openstack-first}. This issue happened because of an issue with `systemd` and the presence of a legacy boot image from older versions of {product-title}. With this release, a patch fixes the issue with `systemd` and removes the legacy boot image, so that node scaling can continue as expected. (link:https://issues.redhat.com/browse/OCPBUGS-42324[*OCPBUGS-42324*]) - -* Previously, if you enabled on-cluster layering for your cluster and you attempted to configure kernel arguments in the machine configuration, machine config pools (MCPs) and nodes entered a degraded state. This happened because of a configuration mismatch. With this release, a check for kernel arguments for a cluster with OCL-enabled ensures that the arguments are configured and applied to nodes in the cluster. This update prevents any mismatch that previously occurred between the machine configuration and the node configuration. (link:https://issues.redhat.com/browse/OCPBUGS-34647[*OCPBUGS-34647*]) - -[discrete] -[id="ocp-release-note-management-console-bug-fixes_{context}"] -==== Management Console - -* Previously, clicking the "Don't show again" link in the Lightspeed modal dialog did not correctly navigate to the general *User Preference* tab when one of the other *User Preference* tabs was displayed. After this update, clicking the "Don't show again" link correctly navigates to the general *User Preference* tab. (link:https://issues.redhat.com/browse/OCPBUGS-48106[*OCPBUGS-48106*]) - -* Previously, multiple external link icons might show in the primary action button of the OperatorHub modal. With this update, only a single external link icon appears. (link:https://issues.redhat.com/browse/OCPBUGS-47742[*OCPBUGS-47742*]) - -* Previously, the web console was disabled when the authorization type was set to `None` in the cluster authentication configuration. With this update, the web console no longer disables when the authorization type was set to `None`. (link:https://issues.redhat.com/browse/OCPBUGS-46068[*OCPBUGS-46068*]) - -* Previously, the *MachineConfig Details* tab displayed an error when one or more `spec.config.storage.file` did not include optional data. With this update, the error no longer occurs and the *Details* tab renders as expected. (link:https://issues.redhat.com/browse/OCPBUGS-44049[*OCPBUGS-44049*]) - -* Previously, an extra name property was passed into resource list page extensions used to list related Operands on the *CSV details* page. As a result, the Operand list was filtered by the cluster service version (CSV) name and often returned an empty list. With this update, Operands are listed as expected. (link:https://issues.redhat.com/browse/OCPBUGS-42796[*OCPBUGS-42796*]) - -* Previously, the *Sample* tab did not show when creating a new ConfigMap with one or more ConfigMap ConsoleYAMLSamples present on the cluster. After this update, the *Sample* tab shows with one or more ConfigMap ConsoleYAMLSamples present. (link:https://issues.redhat.com/browse/OCPBUGS-41492[*OCPBUGS-41492*]) - -* Previously, the *Events* page resource type filter incorrectly reported the number of resources when three or more resources were selected. With this update, the filter always reports the correct number of resources. (link:https://issues.redhat.com/browse/OCPBUGS-38701[*OCPBUGS-38701*]) - -* Previously, the version number text in the updates graph on the *Cluster Settings* page appeared as black text on a dark background while viewing the page using Firefox in dark mode. With this update, the text appears as white text. (link:https://issues.redhat.com/browse/OCPBUGS-37988[*OCPBUGS-37988*]) - -* Previously, *Alerting* pages did not show resource information in their empty state. With this update, resource information is available on the *Alerting* pages. (link:https://issues.redhat.com/browse/OCPBUGS-36921[*OCPBUGS-36921*]) - -* Previously, the Operator Lifecycle Manager (OLM) CSV annotation contained unexpected JSON, which was successfully parsed, but then threw a runtime error when attempting to use the resulting value. With this update, JSON values from OLM annotations are validated before use, errors are logged, and the console does not fail when unexpected JSON is received in an annotation. (link:https://issues.redhat.com/browse/OCPBUGS-35744[*OCPBUGS-35744*]) - -* Previously, silenced alerts were visible on the *Overview* page of the {product-title} web console. This occurred because the alerts did not include any external labels. With this release, silenced alerts include the external labels so they are filtered out and are not viewable. (link:https://issues.redhat.com/browse/OCPBUGS-31367[*OCPBUGS-31367*]) - -[discrete] -[id="ocp-release-note-monitoring-bug-fixes_{context}"] -==== Monitoring - -* Previously, if the SMTP `smarthost` or `from` fields under the `emailConfigs` object were not specified at the global or receiver level in the `AlertmanagerConfig` custom resource (CR), Alertmanager would crash because these fields are required. With this release, the Prometheus Operator fails reconciliation if these fields are not specified. Therefore, the Prometheus Operator no longer pushes invalid configurations to Alertmanager, preventing it from crashing. (link:https://issues.redhat.com/browse/OCPBUGS-48050[*OCPBUGS-48050*]) - -* Previously, the {cmo-first} did not mark configurations in `cluster-monitoring-config` and `user-workload-monitoring-config` config maps as invalid for unknown (for example, no longer supported) or duplicated fields. With this release, stricter validation is added that helps identify such errors. (link:https://issues.redhat.com/browse/OCPBUGS-42671[*OCPBUGS-42671*]) - -* Previously, it was not possible for a user to query the user workload monitoring Thanos API endpoint with `POST` requests. With this update, a cluster admin can bind a new `pod-metrics-reader` cluster role with a role binding or cluster role binding to allow `POST` queries for a user or service account. (link:https://issues.redhat.com/browse/OCPBUGS-41158[*OCPBUGS-41158*]) - -* Previously, an invalid config map configuration for core platform monitoring, user workload monitoring, or both caused {cmo-first} to report an `InvalidConfiguration` error. With this release, if only the user workload monitoring configuration is invalid, {cmo-short} reports `UserWorkloadInvalidConfiguration`, making it clear where the issue is located. (link:https://issues.redhat.com/browse/OCPBUGS-33863[*OCPBUGS-33863*]) - -* Previously, `telemeter-client containers` showed a `TelemeterClientFailures Warnings` message in multiple clusters. With this release, a runbook is added for the `TelemeterClientFailures` alert to explain the cause of the alert triggering and the alert provides resolution steps. (link:https://issues.redhat.com/browse/OCPBUGS-33285[*OCPBUGS-33285*]) - -* Previously, `AlertmanagerConfig` objects with invalid child routes generated invalid Alertmanager configuration leading to Alertmanager disruption. With this release, Prometheus Operator rejects such `AlertmanagerConfig` objects, and users receive a warning about the invalid child routes in logs. (link:https://issues.redhat.com/browse/OCPBUGS-30122[*OCPBUGS-30122*]) - -* Previously, the `config-reloader` for Prometheus for user-defined projects would fail if unset environment variables were used in the `ServiceMonitor` configuration, which resulted in Prometheus pods failing. With this release, the reloader no longer fails when an unset environment variable is encountered. Instead, unset environment variables are left as they are, while set environment variables are expanded as usual. Any expansion errors, suppressed or otherwise, can be tracked through the `reloader_config_environment_variable_expansion_errors` variable. (link:https://issues.redhat.com/browse/OCPBUGS-23252[*OCPBUGS-23252*]) - -[discrete] -[id="ocp-release-note-networking-bug-fixes_{context}"] -==== Networking - -* Previously, enabling encapsulated security payload (ESP) offload hardware when using IPSec on Open vSwitch attached interfaces would break connectivity in your cluster. To resolve this issue, {product-title} by default disables ESP offload hardware on Open vSwitch attached interfaces. This fixes the issue. (link:https://issues.redhat.com/browse/OCPBUGS-42987[*OCPBUGS-42987*]) - -* Previously, if you deleted the default `sriovOperatorConfig` custom resource (CR), you could not recreate the default `sriovOperatorConfig` CR, because the `ValidatingWebhookConfiguration` was not initially deleted. With this release, the Single Root I/O Virtualization (SR-IOV) Network Operator removes validating webhooks when you delete the `sriovOperatorConfig` CR, so that you can create a new `sriovOperatorConfig` CR. (link:https://issues.redhat.com/browse/OCPBUGS-41897[*OCPBUGS-41897*]) - -* Previously, if you set custom annotations in a custom resource (CR), the SR-IOV Operator would override all the default annotations in the `SriovNetwork` CR. With this release, when you define custom annotations in a CR, the SR-IOV Operator does not override the default annotations. (link:https://issues.redhat.com/browse/OCPBUGS-41352[*OCPBUGS-41352*]) - -* Previously, bonds that were configured in `active-backup` mode would have IPsec Encapsulating Security Payload (ESP) offload active even if underlying links did not support ESP offload. This caused IPsec associations to fail. With this release, ESP offload is disabled for bonds so that IPsec associations pass. (link:https://issues.redhat.com/browse/OCPBUGS-39438[*OCPBUGS-39438*]) - -* Previously, the Machine Config Operator (MCO)'s vSphere `resolve-prepender` script used `systemd` directives that were incompatible with old bootimage versions used in {product-title} 4. With this release, nodes can scale using newer bootimage versions {product-version} 4.13 and above, through manual intervention, or by upgrading to a release that includes this fix. (link:https://issues.redhat.com/browse/OCPBUGS-38012[*OCPBUGS-38012*]) - -* Previously, the Ingress Controller status incorrectly displayed as `Degraded=False` because of a migration time issue with the `CanaryRepetitiveFailures` condition. With this release, the Ingress Controller status is correctly marked as `Degraded=True` for the appropriate length of time that the `CanaryRepetitiveFailures` condition exists. (link:https://issues.redhat.com/browse/OCPBUGS-37491[*OCPBUGS-37491*]) - -* Previously, when a pod was running on a node on which egress IPv6 is assigned, the pod was not able to communicate with the Kubernetes service in a dual stack cluster. This resulted in the traffic with the IP family, that the egressIP is not applicable to, being dropped. With this release, only the source network address translation (SNAT) for the IP family that the egress IPs applied to is deleted, eliminating the risk of traffic being dropped. (link:https://issues.redhat.com/browse/OCPBUGS-37193[*OCPBUGS-37193*]) - -* Previously, the Single-Root I/O Virtualization (SR-IOV) Operator did not expire the acquired lease during the Operator's shutdown operation. This impacted a new instance of the Operator, because the new instance had to wait for the lease to expire before the new instance was operational. With this release, an update to the Operator shutdown logic ensures that the Operator expires the lease when the Operator is shutting down. (link:https://issues.redhat.com/browse/OCPBUGS-23795[*OCPBUGS-23795*]) - -* Previously, for an Ingress resource with an `IngressWithoutClassName` alert, the Ingress Controller did not delete the alert along with deletion of the resource. The alert continued to show on the {product-title} web console. With this release, the Ingress Controller resets the `openshift_ingress_to_route_controller_ingress_without_class_name` metric to `0` before the controller deletes the Ingress resource, so that the alert is deleted and no longer shows on the web console. (link:https://issues.redhat.com/browse/OCPBUGS-13181[*OCPBUGS-13181*]) - -* Previously, when either the `clusterNetwork` or `serviceNetwork` IP address pools overlapped with the default `transit_switch_subnet` `100.88.0.0/16` IP address and the custom value of `transit_switch_subnet` did not take effect, `ovnkube-node` pods crashed after the live migration operation. With this release, the custom value of `transit_switch_subnet` can be passed to `ovnkube node` pods, so that this issue no longer persists. (link:https://issues.redhat.com/browse/OCPBUGS-43740[*OCPBUGS-43740*]) - -* Previously, a change in OVN-Kubernetes that standardized the `appProtocol` value `h2c` to `kubernetes.io/h2c` was not recognized by OpenShift router. Consequently, specifying `appProtocol: kubernetes.io/h2c` on a service did not cause OpenShift router to use clear-text HTTP/2 to connect to the service endpoints. With this release, OpenShift router was changed to handle `appProtocol: kubernetes.io/h2c` the same way as it handles `appProtocol: h2c` resolving the issue. (link:https://issues.redhat.com/browse/OCPBUGS-42972[*OCPBUGS-42972*]) - -* Previously, instructions that guided the user after changing the `LoadBalancer` parameter from `External` to `Internal` were missing for {ibm-power-server-title}, {alibaba}, and {rh-openstack-first}. This caused the Ingress Controller to be put in a permanent `Progressing` state. With this release the message `The IngressController scope was changed from Internal to External` is followed by `To effectuate this change, you must delete the service` resolving the permanent `Progressing` state. (link:https://issues.redhat.com/browse/OCPBUGS-39151[*OCPBUGS-39151*]) - -* Previously, there was no event logged when an error occurred from failed conversion from ingress to route conversion. With this update, this error appear in the event logs. (link:https://issues.redhat.com/browse/OCPBUGS-29354[*OCPBUGS-29354*]) - -* Previously, an `ovnkube-node` pod on a node that uses cgroup v1 was failing because it could not find the kubelet cgroup path. With this release, an `ovnkube-node` pod no longer fails if the node uses cgroup v1. However, the OVN-Kubernetes network plugin outputs an `UDNKubeletProbesNotSupported` event notification. If you enable cgroup v2 for each node, OVN-Kubernetes no longer outputs the event notification.(link:https://issues.redhat.com/browse/OCPBUGS-50513[*OCPBUGS-50513*]) - -* Previously, when you finished the live migration for a kubevirt virtual machine (VM) that uses the Layer 2 topology, an old node still transmits IPv4 egress traffic to the virtual machine. With this release, the OVN-Kubernetes plugin updates the gateway MAC address for a kubevirt virtual machine (VM) during the live migration process so that this issue no longer occurs. (link:https://issues.redhat.com/browse/OCPBUGS-49857[*OCPBUGS-49857*]) - -* Previously, the DNS-based egress firewall incorrectly prevented creation of a firewall rule that contained a DNS name in uppercase characters. With this release, an fix to the egress firewall no longer prevents creation of a firewall rule that contains a DNS name in uppercase characters. (link:https://issues.redhat.com/browse/OCPBUGS-49589[*OCPBUGS-49589*]) - -* Previously, when you attempted to use the Cluster Network Operator (CNO) to upgrade a cluster with existing `localnet` networks, `ovnkube-control-plane` pods failed to run. This happened because the `ovnkube-cluster-manager` container could not process an OVN-Kubernetes `localnet` topology network that did not have subnets defined. With this release, a fix ensures that the `ovnkube-cluster-manager` container can process an OVN-Kubernetes `localnet` topology network that does not have subnets defined. (link:https://issues.redhat.com/browse/OCPBUGS-44195[*OCPBUGS-44195*]) - -* Previously, the SR-IOV Network Operator could not retrieve metadata when cloud-native network (CNF) workers were deployed with a configuration drive on {rh-openstack-first}. A configuration drive is often unmounted after a boot operation on immutable systems, so now the Operator dynamically mounts a configuration drive when required. The Operator can now retrieve the metadata and then unmount the configuration drive. This means that you no longer need to manually mount and unmount the configuration drive. (link:https://issues.redhat.com/browse/OCPBUGS-41829[*OCPBUGS-41829*]) - -* Previously, when you switched your cluster to use a different load balancer, the Ingress Operator did not remove the values from the `classicLoadBalancer` and `networkLoadBalancer` parameters in the `IngressController` custom resource (CR) status. This situation caused the status of the CR to report wrong information from the `classicLoadBalancer` and `networkLoadBalancer` parameters. With this release, after you switch your cluster to use a different load balancer, the Ingress Operator removes values from these parameters so that the CR reports a more accurate and less confusing message status. (link:https://issues.redhat.com/browse/OCPBUGS-38217[*OCPBUGS-38217*]) - -* Previously, a duplicate feature gate, `ExternalRouteCertificate`, was added to the `FeatureGate` CR. With this release, `ExternalRouteCertificate` is removed because a {product-title} cluster does not use this feature gate. (link:https://issues.redhat.com/browse/OCPBUGS-36479[*OCPBUGS-36479*]) - -* Previously, after a user created a route, the user needed both `create` and `update` permissions on the `routes/custom-host` sub-resource to edit the `.spec.tls.externalCertificate` field of a route. With this release, this permission requirement has been fixed, so that a user only needs the `create` permission to edit the `.spec.tls.externalCertificate` field of a route. The `update` permission is now marked as an optional permission. (link:https://issues.redhat.com/browse/OCPBUGS-34373[*OCPBUGS-34373*]) - - -[discrete] -[id="ocp-release-note-node-bug-fixes_{context}"] -==== Node - -* Previously, the `cadvisor` code that collected and reported container network metrics contained a bug that caused inaccurate results. With this release, the container network metrics are correctly reported. (link:https://issues.redhat.com/browse/OCPBUGS-38515[*OCPBUGS-38515*]) - -[discrete] -[id="ocp-release-note-node-tuning-operator-bug-fixes_{context}"] -==== Node Tuning Operator (NTO) - -* Previously, CPU masks for interrupt and network handling CPU affinity were computed incorrectly on machines with more than 256 CPUs. This issue prevented proper CPU isolation and caused `systemd` unit failures during internal node configuration. This fix ensures accurate CPU affinity calculations, enabling correct CPU isolation on machines with more than 256 CPUs. (link:https://issues.redhat.com/browse/OCPBUGS-36431[*OCPBUGS-36431*]) - -* Previously, entering an invalid value in any `cpuset` field under `spec.cpu` in the `PerformanceProfile` resource caused the webhook validation to crash. With this release, improved error handling for the `PerformanceProfile` validation webhook ensures that invalid values for these fields return an informative error. (link:https://issues.redhat.com/browse/OCPBUGS-45616[*OCPBUGS-45616*]) - -* Previously, users could enter an invalid string for any CPU set in the performance profile, resulting in a broken cluster. With this release, the fix ensures that only valid strings can be entered, eliminating the risk of cluster breakage. (link:https://issues.redhat.com/browse/OCPBUGS-47678[*OCPBUGS-47678*]) - -* Previously, configuring the Node Tuning Operator (NTO) using `PerformanceProfiles` created the `ocp-tuned-one-shot` `systemd` service, which ran before kubelet and blocked its execution. The `systemd` service invoked Podman, which used the NTO image. When the NTO image was not present, Podman tried to fetch the image. With this release, support for cluster-wide proxy environment variables defined in `/etc/mco/proxy.env` is added. This support allows Podman to pull the NTO image in environments that need to use `http(s)` proxy for out-of-cluster connections. (link:https://issues.redhat.com/browse/OCPBUGS-39005[*OCPBUGS-39005*]) - -[discrete] -[id="ocp-release-note-observability-bug-fixes_{context}"] -==== Observability - -* Previously, a namespace was passed to a full cluster query on the alerts graph, and this caused the tenancy API path to be used. The API lacked permissions to retrieve data so no data was shown on the alerts graph. With this release, the namespace is no longer passed to a full cluster query for an alert graph. A non-tenancy API path is now used because this API has the correct permissions to retrieve data. Data is not available on an alert graph. (link:https://issues.redhat.com/browse/OCPBUGS-46371[*OCPBUGS-46371*]) - -* Previously, bounds were based on the first bar in a bar chart. If a bar was larger in size than the first bar, the bar would extend beyond the bar chart boundary. With this release, the bound for a bar chart is based on the largest bar, so no bars extend outside the boundary of a bar chart. (link:https://issues.redhat.com/browse/OCPBUGS-46059[*OCPBUGS-46059*]) - -* Previously, a {rh-rhacm-first} Alerting UI refactor update caused an `isEmpty` check to go missing on the *Observe -> Metrics* menu. The missing check inverted the behavior of the *Show all Series* and *Hide all Series* states. This release readds `isEmpty` check so that *Show all Series* is now visible when series are hidden and *Hide all Series* is now visible when the series are shown. (link:https://issues.redhat.com/browse/OCPBUGS-46047[*OCPBUGS-46047*]) - -* Previously, on the *Observe -> Alerting -> Silences* tab, the `DateTime` component changed the ordering of an event and its value. Because of this issue, you could not edit the `until` parameter for a silent alert in either the *Developer* or the *Administrator* perspective. With this release, a fix means to the `DateTime` component means that you can now edit the `until` parameter for a silent alert. (link:https://issues.redhat.com/browse/OCPBUGS-46021[*OCPBUGS-46021*]) - -* Previously, when using the *Developer* perspective with custom editors, clicking the `n` key caused the *Namespace* menu unexpectedly opened. The issue happened because the keyboard shortcut did not account for custom editors. With this release, the *Namespace* menu accounts for custom editors and does not open if you type the `n` key. (link:https://issues.redhat.com/browse/OCPBUGS-38775[*OCPBUGS-38775*]) - -* Previously, on the *Observe -> Alerting -> Silences* tab, the `creator` field was not autopopulated and was not designated as mandatory. This issue happened when the API made the field empty from {product-title} 4.15 and onwards. With this update, the field is marked as mandatory and populated with the current user for correct validation. (link:https://issues.redhat.com/browse/OCPBUGS-35048[*OCPBUGS-35048*]) - - -[discrete] -[id="ocp-release-note-oc-mirror-bug-fixes_{context}"] -==== oc-mirror - -* Previously, when using `oc-mirror --v2 delete --generate` command, the contents of the `working-dir/cluster-resources` directory were cleared. With this fix, the `working-dir/cluster-resources` directory is not cleaned when the delete feature is used. (link:https://issues.redhat.com/browse/OCPBUGS-48430[*OCPBUGS-48430*]) - -* Previously, release images were signed using a `SHA-1` key. On {op-system-base} 9 FIPS STIG-compliant machines, verification of release signatures using the old `SHA-1` key failed due to security restrictions on weak keys. With this release, release images are signed using a new `SHA-256` trusted key so that the release signatures no longer fail. (link:https://issues.redhat.com/browse/OCPBUGS-48314[*OCPBUGS-48314*]) - -// Subhashini to confirm the TP status on these bugs -//// - -* Previously, oc-mirror v2 Technology Preview did not support images referenced by both `tag` and `digest`. These images were excluded from the archive during the mirror-to-disk workflow, causing failures when mirroring from the archive to a disconnected mirror registry. With this fix, oc-mirror v2 includes these images in the archive by pulling the source image by `digest` while retaining the `tag` for reference. A warning message is displayed to inform users of this behavior. (link:https://issues.redhat.com/browse/OCPBUGS-37867[*OCPBUGS-37867*]) - -* Previously, when running oc-mirror v2 Technology Preview in mirror-to-disk mode within an disconnected environment, the tool attempted to download `graph.tar.gz` from `api.openshift.com`, causing the mirroring process to fail. With this fix, when the `UPDATE_URL_OVERRIDE` environment variable is set, oc-mirror first checks for the graph image in the oc-mirror cache. If the image is not found, oc-mirror skips it without failing, ensuring successful mirroring in air-gapped environments. (link:https://issues.redhat.com/browse/OCPBUGS-38037[*OCPBUGS-38037*]) - -* Previously, mirroring a release that used a digest instead of a tag did not work as expected. This issue has been fixed, ensuring successful mirroring for releases that use digests. (link:https://issues.redhat.com/browse/OCPBUGS-45249[*OCPBUGS-45249*]) - -* Previously, API calls to Cincinnati that encountered errors did not fail immediately, potentially leading to inconsistencies. With this update, errors related to Cincinnati are now considered severe and cause an immediate failure. (link:https://issues.redhat.com/browse/OCPBUGS-38461[*OCPBUGS-38461*]) - -* When you used oc-mirror v2 Technology Preview in a mirror-to-mirror workflow, logs displayed catalog images twice. This could mislead users into thinking the catalog image was mirrored twice. In reality, the image was copied once to the destination registry and once to the oc-mirror local cache. This issue has been fixed to ensure logs accurately reflect the mirroring process. (link:https://issues.redhat.com/browse/OCPBUGS-41331[*OCPBUGS-41331*]) -//// - -* Previously, when using the `--force-cache-delete` flag to delete images from a remote registry, the deletion process did not work as expected. With this update, the issue has been resolved, ensuring that images are deleted properly when the flag is used. (link:https://issues.redhat.com/browse/OCPBUGS-47690[*OCPBUGS-47690*]) - -* Previously, oc-mirror plugin v2 could not delete the graph image when the mirroring uses a partially disconnected mirroring workflow (mirror-to-mirror). With this update, graph images can now be deleted regardless of the mirroring workflow used. (link:https://issues.redhat.com/browse/OCPBUGS-46145[*OCPBUGS-46145*]) - -* Previously, if the same image was used by multiple {product-title} release components, oc-mirror plugin v2 attempted to delete the image multiple times, but failed after the first attempt. This issue has been resolved by ensuring oc-mirror plugin v2 generates a list of unique images during the delete `--generate` phase. (link:https://issues.redhat.com/browse/OCPBUGS-45299[*OCPBUGS-45299*]) - -* Previously, `oci` catalogs on disk were not mirrored correctly in the oc-mirror plugin v2. With this update, `oci` catalogs are now successfully mirrored. (link:https://issues.redhat.com/browse/OCPBUGS-44225[*OCPBUGS-44225*]) - -* Previously, if you reran the `oc-mirror` command, the rebuild of the `oci` catalog failed and an error was generated. With this release, if you rerun the `oc-mirror` command, the wrokspace file is deleted so that the failed catalog issue does not happen. (link:https://issues.redhat.com/browse/OCPBUGS-45171[*OCPBUGS-45171*]) - -* Previously, if you ran the `oc adm node-image create` command on the first attempt, sometimes an `image can't be pulled` error message was generated. With this release, a retry mechanism addresses temporary failures when pulling the image from the release payload. (link:https://issues.redhat.com/browse/OCPBUGS-44388[*OCPBUGS-44388*]) - -* Previously, duplicate entries could appear in the signature `ConfigMap YAML` and `JSON` files created in the `clusterresource` object, leading to issues when applying them to the cluster. This update ensures that the generated files do not contain duplicates. (link:https://issues.redhat.com/browse/OCPBUGS-42428[*OCPBUGS-42428*]) - -* Previously, the release signature `ConfigMap` for oc-mirror plugin v2 was incorrectly stored in an archived TAR file instead of in the `cluster-resources` folder. This caused `mirror2disk` to fail. With this release. the release signature `ConfigMap` for oc-mirror plugin v2 that is in JSON format or YAML format, compatible with oc-mirror plugin v1, now get stored in the `cluster-resources` folder. (link:https://issues.redhat.com/browse/OCPBUGS-38343[*OCPBUGS-38343*]) and (link:https://issues.redhat.com/browse/OCPBUGS-38233[*OCPBUGS-38233*]) - -* Previously, using an invalid log-level flag caused oc-mirror plugin v2 to panic. This update ensures that the oc-mirror plugin v2 handles invalid log levels gracefully. Additionally, the `loglevel` flag has been renamed to `log-level` to align with tools like Podman for the convenience of the user. (link:https://issues.redhat.com/browse/OCPBUGS-37740[*OCPBUGS-37740*]) - -[discrete] -[id="ocp-release-note-openshift-cli-bug-fixes_{context}"] -==== OpenShift CLI (oc) - -* Previously, the `oc adm node-image create --pxe generated` command did not create only the Preboot Execution Environment (PXE) artifacts. Instead, the command created the PXE artifacts with other artifacts from a `node-joiner` pod and stored them all in the wrong subdirectory. Additionally, the PXE artifacts were incorrectly prefixed with `agent` instead of `node`. With this release, generated PXE artifacts are stored in the correct directory and receive the correct prefix. (link:https://issues.redhat.com/browse/OCPBUGS-46449[*OCPBUGS-46449*]) - -* Previously, requests to the `deploymentconfig/scale` subresource would fail when there was an admission webhook matching the request. With this release, the issue is resolved and requests to the `deploymentconfig/scale` subresource will succeed. (link:https://issues.redhat.com/browse/OCPBUGS-41136[*OCPBUGS-41136*]) - - -[discrete] -[id="ocp-release-note-olm-bug-fixes_{context}"] -==== Operator Lifecycle Manager (OLM) - -* Previously, concurrent reconciliation of the same namespace in {olmv0-first} led to `ConstraintsNotSatisfiable` errors on subscriptions. This update resolves the issue. (link:https://issues.redhat.com/browse/OCPBUGS-48660[*OCPBUGS-48660*]) - -* Previously, excessive catalog source snapshots caused severe performance regressions. This update fixes the issue. (link:https://issues.redhat.com/browse/OCPBUGS-48644[*OCPBUGS-48644*]) - -* Previously, when the kubelet terminated catalog registry pods with the `TerminationByKubelet` message, the registry pods were not recreated by the catalog Operator. This update fixes the issue. (link:https://issues.redhat.com/browse/OCPBUGS-46474[*OCPBUGS-46474*]) - -* Previously, {olmv0} failed to upgrade Operator cluster service versions (CSVs) due to a TLS validation error. This update fixes the issue. (link:https://issues.redhat.com/browse/OCPBUGS-43581[*OCPBUGS-43581*]) - -* Previously, service account tokens for Operator groups failed to generate automatically in {olmv0-first}. This update fixes the issue. (link:https://issues.redhat.com/browse/OCPBUGS-42360[*OCPBUGS-42360*]) - -* Previously when {olmv1-first} validated custom resource definition (CRD) upgrades, the message output when detecting changed default values was rendered in bytes instead of human-readable language. With this update, related messages are now updated to show human-readable values. (link:https://issues.redhat.com/browse/OCPBUGS-41726[*OCPBUGS-41726*]) - -* Previously, the status update function did not return an error when a connection error occurred in the Catalog Operator. As a result, the Operator might crash because the IP address returned a `nil` status. This update resolves the issue so that an error message is returned and the Operator no longer crashes. (link:https://issues.redhat.com/browse/OCPBUGS-37637[*OCPBUGS-37637*]) - -* Previously, catalog source registry pods did not recover from cluster node failures. This update fixes the issue. (link:https://issues.redhat.com/browse/OCPBUGS-36661[*OCPBUGS-36661*]) - -* Previously, Operators with many custom resources (CRs) exceeded API server timeouts. As a result, the install plan for the Operator got stuck in a pending state. This update fixes the issue by adding a page view for list CRs deployed on the cluster. (link:https://issues.redhat.com/browse/OCPBUGS-35358[*OCPBUGS-35358*]) - -//// -[discrete] -[id="ocp-release-note-openshift-api-server-bug-fixes_{context}"] -==== OpenShift API server -//// - -[discrete] -[id="ocp-release-note-pao-bug-fixes_{context}"] -==== Performance Addon Operator - -* Previously, the Performance Profile Creator (PPC) failed to build a performance profile for compute nodes that had different core ID numbering (core per socket) for their logical processors and the nodes existed under the same node pool. For example, the PPC failed in a situation for two compute nodes that have logical processors `2` and `18`, where one node groups them as core ID `2` and the other node groups them as core ID `9`. -+ -With this release, PPC no longer fails to create the performance profile because PPC can now build a performance profile for a cluster that has compute nodes that each have different core ID numbering for their logical processors. The PPC now outputs a warning message that indicates to use the generated performance profile with caution, because different core ID numbering might impact system optimization and isolated management of tasks. (link:https://issues.redhat.com/browse/OCPBUGS-45903[*OCPBUGS-45903*]) - -* Previously, if you specified a long string of isolated CPUs in a performance profile, such as `0,1,2,...,512`, the `tuned`, Machine Config Operator and `rpm-ostree` components failed to process the string as expected. As a consequence, after you applied the performance profile, the expected kernel arguments were missing. The system failed silently with no reported errors. With this release, the string for isolated CPUs in a performance profile is converted to sequential ranges, such as `0-512`. As a result, the kernel arguments are applied as expected in most scenarios. (link:https://issues.redhat.com/browse/OCPBUGS-45472[*OCPBUGS-45472*]) -+ -[NOTE] -==== -The issue might still occur with some combinations of input for isolated CPUs in a performance profile, such as a long list of odd numbers `1,3,5,...,511`. -==== - -[discrete] -[id="ocp-release-note-rhcos-bug-fixes_{context}"] -==== {op-system-first} - -* Previously, the `kdump` initramfs would stop responding when trying to open a local encrypted disk. This occurred even when the `kdump` destination was a remote machine that did not need access to the local disk. With this release, the issue is fixed and the `kdump` initramfs successfully opens a local encrypted disk. (link:https://issues.redhat.com/browse/OCPBUGS-43040[*OCPBUGS-43040*]) - -* Previously, explicitly disabling FIPS mode with `fips=0` caused some systemd services, that assume FIPS mode was requested, to run and consequently fail. This issue resulted in {op-system} failing to boot. With this release, the relevant systemd services now only run if FIPS mode is enabled by specifying `fips=1`. As a result, {op-system} now correctly boots without FIPS mode enabled when `fips=0` is specified. (link:https://issues.redhat.com/browse/OCPBUGS-39536[*OCPBUGS-39536*]) - -[discrete] -[id="ocp-release-note-scalability-and-performance-bug-fixes_{context}"] -==== Scalability and performance - -* Previously, you could configure the NUMA Resources Operator to map a `nodeGroup` to more than one `MachineConfigPool`. This implementation is contrary to the intended design of the Operator, which assumed a one-to-one mapping between a `nodeGroup` and a `MachineConfigPool`. With this release, if a `nodeGroup` maps to more than one `MachineConfigPool`, the Operator accepts the configuration, but the Operator state moves to `Degraded`. To retain the previous behavior, you can apply the `config.node.openshift-kni.io/multiple-pools-per-tree: enabled` annotation to the NUMA Resources Operator. However, the ability to assign a `nodeGroup` to more than one `MachineConfigPool` will be removed in a future release. (link:https://issues.redhat.com/browse/OCPBUGS-42523[*OCPBUGS-42523*]) - -[discrete] -[id="ocp-release-note-storage-bug-fixes_{context}"] -==== Storage - -* Previously, Portworx plugin Container Storage Interface (CSI) migration failed without the inclusion of an upstream patch. With this release, the Portworx plugin CSI translation now copies the secret name and namespace to Kubernetes version to 1.31 so that an upstream patch is not required. (link:https://issues.redhat.com/browse/OCPBUGS-49437[*OCPBUGS-49437*]) - -* Previously, the VSphere Problem Detector Operator waited up to 24 hours to reflect a change in the `clustercsidrivers.managementState` parameter from `Managed` to `Removed` for a {vmw-full} cluster. With this release, the VSphere Problem Detector Operator now reflects this state change in about 1 hour. (link:https://issues.redhat.com/browse/OCPBUGS-39358[*OCPBUGS-39358*]) - -* Previously, the Azure File Driver attempted to reuse existing storage accounts. With this release, the Azure File Driver creates storage accounts during dynamic provisioning. This means that updated clusters using newly-created Persistent Volumes (PVs) also use a new storage account. PVs that were previously provisioned continue using the same storage account used before the cluster update. (link:https://issues.redhat.com/browse/OCPBUGS-38922[*OCPBUGS-38922*]) - -* Previously, the configuration loader logged YAML `unmarshall` errors when the `INI` succeeded. With this release, the `unmarshall` errors are no longer logged when the `INI` succeeds. (link:https://issues.redhat.com/browse/OCPBUGS-38368[*OCPBUGS-38368*]) - -* Previously, the Storage Operator counted an incorrect number of control plane nodes that existed in a cluster. This count is needed for the Operator to determine the number of replicas for controllers. With this release, the Storage Operator now counts the correct number of control plane nodes, leading to a more accurate count of replica controllers. (link:https://issues.redhat.com/browse/OCPBUGS-36233[*OCPBUGS-36233*]) - -* Previously, the `manila-csi-driver` and node registrar pods had missing health checks because of a configuration issue. With this release, the health checks are now added to both of these resources. (link:https://issues.redhat.com/browse/OCPBUGS-29240[*OCPBUGS-29240*]) - -//// -[discrete] -[id="ocp-release-note-windows-containers-bug-fixes_{context}"] -==== Windows containers -//// - -[id="ocp-4-18-technology-preview-tables_{context}"] -== Technology Preview features status - -Some features in this release are currently in Technology Preview. These experimental features are not intended for production use. Note the following scope of support on the Red{nbsp}Hat Customer Portal for these features: - -link:https://access.redhat.com/support/offerings/techpreview[Technology Preview Features Support Scope] - -In the following tables, features are marked with the following statuses: - -* _Not Available_ -* _Technology Preview_ -* _General Availability_ -* _Deprecated_ -* _Removed_ - - -[discrete] -[id="ocp-release-notes-auth-tech-preview_{context}"] -=== Authentication and authorization Technology Preview features - -.Authentication and authorization Technology Preview tracker -[cols="4,1,1,1",options="header"] -|==== -|Feature |4.16 |4.17 |4.18 - -|Pod security admission restricted enforcement -|Technology Preview -|Technology Preview -|Technology Preview - -|==== - -[discrete] -[id="ocp-release-notesedge-computing-tp-features_{context}"] -=== Edge computing Technology Preview features - -.Edge computing Technology Preview tracker -[cols="4,1,1,1",options="header"] -|==== -|Feature |4.16 |4.17 |4.18 - -|Accelerated provisioning of {ztp} -|Technology Preview -|Technology Preview -|Technology Preview - -|Enabling disk encryption with TPM and PCR protection -|Not Available -|Technology Preview -|Technology Preview - -|==== - -[discrete] -[id="ocp-release-notes-installing-tech-preview_{context}"] -=== Installation Technology Preview features - -.Installation Technology Preview tracker -[cols="4,1,1,1",options="header"] -|==== -|Feature |4.16 |4.17 |4.18 - -// All GA in 4.17 notes for oci-first -|Adding kernel modules to nodes with kvc -|Technology Preview -|Technology Preview -|Technology Preview - -|Enabling NIC partitioning for SR-IOV devices -|Technology Preview -|General Availability -|General Availability - -|User-defined labels and tags for {gcp-first} -|Technology Preview -|General Availability -|General Availability - -|Installing a cluster on Alibaba Cloud by using Assisted Installer -|Technology Preview -|Technology Preview -|Technology Preview - -|Mount shared entitlements in BuildConfigs in RHEL -|Technology Preview -|Technology Preview -|Technology Preview - -|{product-title} on {oci-first} -|General Availability -|General Availability -|General Availability - -|Selectable Cluster Inventory -|Technology Preview -|Technology Preview -|Technology Preview - -|Installing a cluster on {gcp-short} using the Cluster API implementation -|Technology Preview -|General Availability -|General Availability - -|{product-title} on Oracle Compute Cloud@Customer (C3) -|Not Available -|Not Available -|General Availability - -|{product-title} on Oracle Private Cloud Appliance (PCA) -|Not Available -|Not Available -|General Availability - -|Installing a cluster on {vmw-full} with multiple network interface controllers -|Not Available -|Not Available -|Technology Preview -|==== - -[discrete] -[id="ocp-release-notes-mco-tech-preview_{context}"] -=== Machine Config Operator Technology Preview features - -.Machine Config Operator Technology Preview tracker -[cols="4,1,1,1",options="header"] -|==== -|Feature |4.16 |4.17 |4.18 - -|Improved MCO state reporting (`oc get machineconfigpool`) -|Technology Preview -|Technology Preview -|Technology Preview - -|On-cluster RHCOS image layering -|Technology Preview -|Technology Preview -|Technology Preview - -|Node disruption policies -|Technology Preview -|General Availability -|General Availability - -|Updating boot images for GCP clusters -|Technology Preview -|General Availability -|General Availability - -|Updating boot images for AWS clusters -|Technology Preview -|Technology Preview -|General Availability - -|==== - -[discrete] -[id="ocp-release-notes-machine-management-tech-preview_{context}"] -=== Machine management Technology Preview features - -.Machine management Technology Preview tracker -[cols="4,1,1,1",options="header"] -|==== -|Feature |4.16 |4.17 |4.18 - -|Managing machines with the Cluster API for {aws-full} -|Technology Preview -|Technology Preview -|Technology Preview - -|Managing machines with the Cluster API for {gcp-full} -|Technology Preview -|Technology Preview -|Technology Preview - -|Managing machines with the Cluster API for {vmw-full} -|Technology Preview -|Technology Preview -|Technology Preview - -|Cloud controller manager for {ibm-power-server-name} -|Technology Preview -|Technology Preview -|Technology Preview - -|Defining a {vmw-short} failure domain for a control plane machine set -|General Availability -|General Availability -|General Availability - -|Cloud controller manager for {alibaba} -|Removed -|Removed -|Removed - -|Adding multiple subnets to an existing {vmw-full} cluster by using compute machine sets -|Not Available -|Not Available -|Technology Preview - -|==== - -[discrete] -[id="ocp-release-notes-monitoring-tech-preview_{context}"] -=== Monitoring Technology Preview features - -.Monitoring Technology Preview tracker -[cols="4,1,1,1",options="header"] -|==== -|Feature |4.16 |4.17 |4.18 - -|Metrics Collection Profiles -|Technology Preview -|Technology Preview -|Technology Preview - -|==== - -[discrete] -[id="ocp-release-notes-web-console-tech-preview_{context}"] -=== Web console Technology Preview features - -.Web console Technology Preview tracker -[cols="4,1,1,1",options="header"] -|==== -|Feature |4.16 |4.17 |4.18 - -|{ols-official} in the {product-title} web console -| Technology Preview -| Technology Preview -| Technology Peview - -|==== - -[discrete] -[id="ocp-release-notes-multi-arch-tech-preview_{context}"] -=== Multi-Architecture Technology Preview features - -.Multi-Architecture Technology Preview tracker -[cols="4,1,1,1",options="header"] -|==== -|Feature |4.16 |4.17 |4.18 - -|`kdump` on `arm64` architecture -|Technology Preview -|Technology Preview -|Technology Preview - -|`kdump` on `s390x` architecture -|Technology Preview -|Technology Preview -|Technology Preview - -|`kdump` on `ppc64le` architecture -|Technology Preview -|Technology Preview -|Technology Preview - -|Multiarch Tuning Operator -|General Availability -|General Availability -|General Availability - -|Support for configuring the image stream import mode behavior -|Not Available -|Not Available -|Technology Preview - -|==== - -[discrete] -[id="ocp-release-notes-networking-tech-preview_{context}"] -=== Networking Technology Preview features - -.Networking Technology Preview tracker -[cols="4,1,1,1",options="header"] -|==== -|Feature |4.16 |4.17 |4.18 - -|eBPF manager Operator -|N/A -|Technology Preview -|Technology Preview - -|Advertise using L2 mode the MetalLB service from a subset of nodes, using a specific pool of IP addresses -|Technology Preview -|Technology Preview -|Technology Preview - -|Updating the interface-specific safe sysctls list -|Technology Preview -|Technology Preview -|Technology Preview - -|Egress service custom resource -|Technology Preview -|Technology Preview -|Technology Preview - -|VRF specification in `BGPPeer` custom resource -|Technology Preview -|Technology Preview -|Technology Preview - -|VRF specification in `NodeNetworkConfigurationPolicy` custom resource -|Technology Preview -|Technology Preview -|Technology Preview - -|Host network settings for SR-IOV VFs -|Technology Preview -|General Availability -|General Availability - -|Integration of MetalLB and FRR-K8s -|Technology Preview -|General Availability -|General Availability - -|Automatic leap seconds handling for PTP grandmaster clocks -|Not Available -|General Availability -|General Availability - -|PTP events REST API v2 -|Not Available -|General Availability -|General Availability - -|Customized `br-ex` bridge needed by OVN-Kuberenetes to use NMState -|Technology Preview -|Technology Preview -|General Availability - -| Live migration to OVN-Kubernetes from OpenShift SDN -| Not Available -| General Availability -| Not Available - -|User defined network segmentation -|Not Available -|Technology Preview -|General Availablity - -|Dynamic configuration manager -|Not Available -|Not Available -|Technology Preview - -|SR-IOV Network Operator support for Intel C741 Emmitsburg Chipset -|Not Available -|Not Available -|Technology Preview - -|==== - -[discrete] -[id="ocp-release-notes-nodes-tech-preview_{context}"] -=== Node Technology Preview features - -.Nodes Technology Preview tracker -[cols="4,1,1,1",options="header"] -|==== -|Feature |4.16 |4.17 |4.18 - -|`MaxUnavailableStatefulSet` featureset -|Technology Preview -|Technology Preview -|Technology Preview - -|sigstore support -|Not Available -|Technology Preview -|Technology Preview - -|==== - -[discrete] -[id="ocp-release-notes-oc-cli-tech-preview_{context}"] -=== OpenShift CLI (oc) Technology Preview features - -.OpenShift CLI (`oc`) Technology Preview tracker -[cols="4,1,1,1",options="header"] -|==== -|Feature |4.16 |4.17 |4.18 - -|oc-mirror plugin v2 -|Technology Preview -|Technology Preview -|General Availability - -|oc-mirror plugin v2 enclave support -|Technology Preview -|Technology Preview -|General Availability - -|oc-mirror plugin v2 delete functionality -|Technology Preview -|Technology Preview -|General Availability - -|==== - -[discrete] -[id="ocp-release-notes-extensions-tech-preview_{context}"] -=== Extensions Technology Preview features - -// "Extensions" refers to OLMv1 - -.Extensions Technology Preview tracker -[cols="4,1,1,1",options="header"] -|==== -|Feature |4.16 |4.17 |4.18 - -|{olmv1-first} -|Technology Preview -|Technology Preview -|General Availability - -|{olmv1} runtime validation of container images using sigstore signatures -|Not Available -|Not Available -|Technology Preview - -|==== - -[discrete] -[id="ocp-release-notes-operator-lifecycle-tech-preview_{context}"] -=== Operator lifecycle and development Technology Preview features - -// "Operator lifecycle" refers to OLMv0 and "development" refers to Operator SDK - -.Operator lifecycle and development Technology Preview tracker -[cols="4,1,1,1",options="header"] -|==== -|Feature |4.16 |4.17 |4.18 - -|{olmv1-first} -|Technology Preview -|Technology Preview -|General Availability - -|Scaffolding tools for Hybrid Helm-based Operator projects -|Deprecated -|Deprecated -|Removed - -|Scaffolding tools for Java-based Operator projects -|Deprecated -|Deprecated -|Removed - -|==== - -[discrete] -[id="ocp-release-notes-rhcos-tech-preview_{context}"] -=== {rh-openstack-first} Technology Preview features - -.{rh-openstack} Technology Preview tracker -[cols="4,1,1,1",options="header"] -|==== -|Feature |4.16 |4.17 |4.18 - -|{rh-openstack} integration into the {cluster-capi-operator} -|Technology Preview -|Technology Preview -|Technology Preview - -|Control Plane with `rootVolumes` and `etcd` on local disk -|Technology Preview -|General Availability -|General Availability - -|==== - -[discrete] -[id="ocp-release-notes-scalability-tech-preview_{context}"] -=== Scalability and performance Technology Preview features - -.Scalability and performance Technology Preview tracker -[cols="4,1,1,1",options="header"] -|==== -|Feature |4.16 |4.17 |4.18 - -|{factory-prestaging-tool} -|Technology Preview -|Technology Preview -|Technology Preview - -|Hyperthreading-aware CPU manager policy -|Technology Preview -|Technology Preview -|Technology Preview - -|Mount namespace encapsulation -|Technology Preview -|Technology Preview -|Technology Preview - -|Node Observability Operator -|Technology Preview -|Technology Preview -|Technology Preview - -|Increasing the etcd database size -|Technology Preview -|Technology Preview -|Technology Preview - -|Using {rh-rhacm} `PolicyGenerator` resources to manage {ztp} cluster policies -|Technology Preview -|Technology Preview -|Technology Preview - -|Pinned Image Sets -|Technology Preview -|Technology Preview -|Technology Preview - -|==== - -//// -[discrete] -[id="ocp-release-notes-special-hardware-tech-preview_{context}"] -=== Specialized hardware and driver enablement Technology Preview features - -.Specialized hardware and driver enablement Technology Preview tracker -[cols="4,1,1,1",options="header"] -|==== -|Feature |4.16 |4.17 |4.18 - -|==== -//// - -[discrete] -[id="ocp-release-notes-storage-tech-preview_{context}"] -=== Storage Technology Preview features - -.Storage Technology Preview tracker -[cols="4,1,1,1",options="header"] -|==== -|Feature |4.16 |4.17 |4.18 - -|AWS EFS storage CSI usage metrics -|Not Available -|General Availability -|General Availability - -|Automatic device discovery and provisioning with Local Storage Operator -|Technology Preview -|Technology Preview -|Technology Preview - -|Azure File CSI snapshot support -|Not Available -|Technology Preview -|Technology Preview - -|Read Write Once Pod access mode -|General Availability -|General Availability -|General Availability - -|Shared Resources CSI Driver in OpenShift Builds -|Technology Preview -|Technology Preview -|Technology Preview - -|{secrets-store-operator} -|Technology Preview -|Technology Preview -|General Availability - -|CIFS/SMB CSI Driver Operator -|Technology Preview -|Technology Preview -|General Availability - -|VMware vSphere multiple vCenter support -|Not Available -|Technology Preview -|General Availability - -|Disabling/enabling storage on vSphere -|Not Available -|Technology Preview -|Technology Preview - -|RWX/RWO SELinux Mount -|Not Available -|Developer Preview -|Developer Preview - -|Migrating CNS Volumes Between Datastores -|Not Available -|Developer Preview -|Developer Preview - -|CSI volume group snapshots -|Not Available -|Not Available -|Technology Preview - -|GCP PD supports C3/N4 instance types and hyperdisk-balanced disks -|Not Available -|Not Available -|General Availability - -|GCP Filestore supports Workload Identity -|Not Available -|General Availability -|General Availability - -|OpenStack Manila support for CSI resize -|Not Available -|Not Available -|General Availability - -|==== - -[id="ocp-4-18-known-issues_{context}"] -== Known issues - -* Previously, when you attempted to set the policy for a {gcp-first} service account, the API reported a `400: Bad Request` validation error. When you create a service account, it might take up to 60 seconds for the account to become active, and this causes the validation error. If this error occurs, create a service account with a true exponential backoff that lasts at least 60 seconds. (link:https://issues.redhat.com/browse/OCPBUGS-48187[*OCPBUGS-48187*]) - -* An installation can succeed when installing a cluster on a {gcp-full} shared virtual private network (VPC) using the minimum permissions and without specifying the`controlPlane.platform.gcp.serviceAccount` in the `install-config.yaml` file. Firewall rules in Kubernetes (K8s) are created in the shared VPC, but destroying the cluster will not delete these firewall rules in K8s because the host project lacks the permissions. (link:https://issues.redhat.com/browse/OCPBUGS-38689[*OCPBUGS-38689*]) - -* oc-mirror plugin v2 currently returns an exit status of `0`, meaning "success", even when mirroring errors occur. As a result, do not rely on the exit status in automated workflows. Until this issue is resolved, manually check the `mirroring_errors_XXX_XXX.txt` file generated by `oc-mirror` for errors. (link:https://issues.redhat.com/browse/OCPBUGS-49880[*OCPBUGS-49880*]) - -* The DNF package manager included in {op-system-first} images cannot be used at runtime, because DNF relies on additional packages to access entitled nodes in a cluster that are under a Red Hat subscription. As a workaround, use the `rpm-ostree` command instead. (link:https://issues.redhat.com/browse/OCPBUGS-35247[*OCPBUGS-35247*]) - -* A regression in the behaviour of `libreswan` caused some nodes with IPsec enabled to lose communication with pods on other nodes in the same cluster. To resolve this issue, consider disabling IPsec for your cluster. (link:https://issues.redhat.com/browse/OCPBUGS-43713[*OCPBUGS-43713*]) - -* There is a known issue in {product-title} version 4.18 that prevents configuring multiple subnets in the failure domain of a Nutanix cluster during installation. -There is no workaround for this issue. -(link:https://issues.redhat.com/browse/OCPBUGS-49885[*OCPBUGS-49885*]) - -* The following known issues exist for configuring multiple subnets for an existing Nutanix cluster by using a control plane machine set: -+ --- -** Adding subnets above the existing subnet in the `subnets` stanza causes a control plane node to become stuck in the `Deleting` state. -As a workaround, only add subnets below the existing subnet in the `subnets` stanza. - -** Sometimes, after adding a subnet, the updated control plane machines appear in the Nutanix console but the {product-title} cluster is unreachable. -There is no workaround for this issue. --- -+ -These issues occur on clusters that use a control plane machine set to configure subnets regardless of whether subnets are specified in a failure domain or the provider specification. -(link:https://issues.redhat.com/browse/OCPBUGS-50904[*OCPBUGS-50904*]) - -* There is a known issue with RHEL 8 worker nodes that use `cgroupv1` Linux Control Groups (cgroup). The following is an example of the error message displayed for impacted nodes: `UDN are not supported on the node ip-10-0-51-120.us-east-2.compute.internal as it uses cgroup v1.` As a workaround, users should migrate worker nodes from `cgroupv1` to `cgroupv2`. (link:https://issues.redhat.com/browse/OCPBUGS-49933[*OCPBUGS-49933*]) - -* The current PTP grandmaster clock (T-GM) implementation has a single National Marine Electronics Association (NMEA) sentence generator sourced from the GNSS without a backup NMEA sentence generator. If NMEA sentences are lost before reaching the e810 NIC, the T-GM cannot synchronize the devices in the network synchronization chain and the PTP Operator reports an error. A proposed fix is to report a `FREERUN` event when the NMEA string is lost. Until this limitation is addressed, T-GM does not support PTP clock holdover state. (link:https://issues.redhat.com/browse/OCPBUGS-19838[*OCPBUGS-19838*]) - -* There is a known issue with a Layer 2 network topology on clusters running on Google Cloud Platform (GCP). At this time, the egress IP addresses being used in the Layer 2 network that is created by a `UserDefinedNetwork` (UDN) resource are using the wrong source IP address. Consequentially, UDN is not supported on Layer 2 on GCP. Currently, there is no fix for this issue. (link:https://issues.redhat.com/browse/OCPBUGS-48301[*OCPBUGS-48301*]) - -* There is a known issue with user-defined networks (UDN) that causes OVN-Kubernetes to delete any routing table ID equal or higher to 1000 that it does not manage. Consequently, any Virtual Routing and Forwarding (VRF) instance created outside OVN-Kubernetes is deleted. This issue impacts users who have created user-defined VRFs with a table ID greater than or equal to 1000. As a workaround, users must change their VRFs to a table ID lower than 1000 as these are reserved for {product-title}. (link:https://issues.redhat.com/browse/OCPBUGS-50855[*OCPBUGS-50855*]) - -* If you attempted to log in to a {product-title} 4.17 server by using the {oc-first} that you installed as part of the {product-title} {product-version}, you would see the following warning message in your terminal: -+ -[source,terminal] ----- -Warning: unknown field "metadata" -You don't have any projects. You can try to create a new project, by running - - oc new-project ----- -+ -This warning message is a known issue but does not indicate any functionality issues with {product-title}. You can safely ignore the warning message and continue to use {product-title} as intended. (link:https://issues.redhat.com/browse/OCPBUGS-44833[*OCPBUGS-44833*]) - -* There is a known issue in {product-title} {product-version} which causes the cluster's masquerade subnet to be set to `169.254.169.0/29` if the `ovnkube-node` daemon set is deleted. When the masquerade subnet is set to `169.254.169.0/29`, `UserDefinedNetwork` custom resources (CRs) cannot be created. -+ -[NOTE] -==== -* If your masquerade subnet has been configured at Day 2 by making changes to the `network.operator` CR, it will not be reverted to `169.254.169.0/29`. -* If a cluster has been upgraded from {product-title} 4.16, the masquerade subnet remains `169.254.169.0/29` for backward compatibility. The masquerade subnet should be changed to a subnet with more IPs, for example, `169.254.0.0/17`, to use the user-defined networks feature. -==== -+ -This known issue occurs after performing one of the following actions: -+ -[cols="2", options="header"] -|=== -| Action | Consequence - -| You have restarted the `ovnkube-node` `DaemonSet` object. -| The masquerade subnet is set to `169.254.169.0/29`, which does not support `UserDefinedNetwork` CRs. - -| You have deleted the `ovnkube-node` `DaemonSet` object. -| The masquerade subnet is set to `169.254.169.0/29`, which does not support `UserDefinedNetwork` CRs. Additionally, `ovnkube-node` pods crash and remain in a `CrashLoopBackOff` state. -|=== -+ -As a temporary workaround, you can delete the `UserDefinedNetwork` CR and then restart all `ovnkube-node` pods by running the following command: -+ -[source,terminal] ----- -$ oc delete pod -l app=ovnkube-node -n openshift-ovn-kubernetes ----- -+ -The `ovnkube-node` pods automatically restart, which re-stabilizes the cluster. Then, you can set the masquerade subnet to a larger IP address, for example, `169.254.0.0/17` for IPv4. As a result, `NetworkAttachmentDefinition` or `UserDefinedNetwork` CRs can be created. -+ -[IMPORTANT] -==== -Do not delete the `ovnkube-node` `DaemonSet` object when deleting `ovnkube-node` pods. Doing so sets the masquerade subnet to `169.254.169.0/29`. -==== -+ -For more information, see xref:../networking/ovn_kubernetes_network_provider/configure-ovn-kubernetes-subnets.adoc#nw-ovn-k-day-2-masq-subnet_configure-ovn-kubernetes-subnets[Configuring the OVN-Kubernetes masquerade subnet as a Day 2 operation]. -+ -(link:https://issues.redhat.com/browse/OCPBUGS-49662[*OCPBUGS-49662*]) - -* Adding or removing nodes from the cluster can cause ownership contention over the node status. This can cause new nodes to take an extended period of time to appear. As a workaround, you can restart the `kube-apiserver-operator` pod in the `openshift-kube-apiserver-operator` namespace to expedite the process. (link:https://issues.redhat.com/browse/OCPBUGS-50587[*OCPBUGS-50587*]) - -* For dual-stack networking clusters that run on {rh-openstack}, when a Virtual IP (VIP) that is attached to a Floating IP (FIP) moves between master nodes, the association between VIP and FIP might stop working if the new master is on a different compute node. This issue occurs because OVN assumes that both IPv4 and IPv6 addresses on a shared Neutron port belong to the same node. (link:https://issues.redhat.com//browse/OCPBUGS-50599[*OCPBUGS-50599*]) - -[id="ocp-telco-ran-4-18-known-issues_{context}"] - -* When you run Cloud-native Network Functions (CNF) latency tests on an {product-title} cluster, the test can sometimes return results greater than the latency threshold for the test; for example, 20 microseconds for `cyclictest` testing. This results in a test failure. -(link:https://issues.redhat.com/browse/OCPBUGS-42328[*OCPBUGS-42328*]) - -* There is a known issue when the grandmaster clock (T-GM) transitions to the `Locked` state too soon. This happens before the Digital Phase-Locked Loop (DPLL) completes its transition to the `Locked-HO-Acquired` state, and after the Global Navigation Satellite Systems (GNSS) time source is restored. -(link:https://issues.redhat.com/browse/OCPBUGS-49826[*OCPBUGS-49826*]) - -[id="ocp-telco-core-4-18-known-issues_{context}"] - -* Due to an issue with Kubernetes, the CPU Manager is unable to return CPU resources from the last pod admitted to a node to the pool of available CPU resources. These resources are allocatable if a subsequent pod is admitted to the node. However, this pod then becomes the last pod, and again, the CPU manager cannot return this pod's resources to the available pool. -+ -This issue affects CPU load-balancing features, which depend on the CPU Manager releasing CPUs to the available pool. Consequently, non-guaranteed pods might run with a reduced number of CPUs. As a workaround, schedule a pod with a `best-effort` CPU Manager policy on the affected node. This pod will be the last admitted pod and this ensures the resources will be correctly released to the available pool. (link:https://issues.redhat.com/browse/OCPBUGS-46428[*OCPBUGS-46428*]) - -* When a pod uses the CNI plugin for DHCP address assignment in conjunction with other CNI plugins, the network interface for the pod might be unexpectedly deleted. As a result, when the DHCP lease for the pod expires, the DHCP proxy enters a loop when trying to re-create a new lease, leading to the node becoming unresponsive. There is currently no workaround. (link:https://issues.redhat.com/browse/OCPBUGS-45272[*OCPBUGS-45272*]) - -[id="ocp-nodes-4-18-known-issues_{context}"] - -* When using PXE boot to xref:../nodes/nodes/nodes-nodes-adding-node-iso.adoc#adding-node-iso[add a worker node to an on-premise cluster], sometimes the host fails to reboot from the disk properly, preventing the installation from completing. -As a workaround, you must manually reboot the failed host from the disk. (link:https://issues.redhat.com/browse/OCPBUGS-45116[*OCPBUGS-45116*]) - -[id="ocp-storage-core-4-18-known-issues_{context}"] - -* The GCP PD CSI driver does not support hyperdisk-balanced volumes with RWX mode. Attempting to provision hyperdisk-balanced volumes with RWX mode using the GCP PD CSI driver produces errors and does not mount the volumes with the desired access mode. (link:https://issues.redhat.com/browse/OCPBUGS-44769[*OCPBUGS-44769*]) - -* Currently, a GCP PD cluster with c3-standard-2, c3-standard-4, n4-standard-2, and n4-standard-4 nodes can erroneously exceed the maximum attachable disk number, which should be 16. This issue may prevent you from successfully creating or attaching volumes to your pods. (link:https://issues.redhat.com/browse/OCPBUGS-39258[*OCPBUGS-39258*]) - -[id="ocp-hosted-control-planes-4-18-known-issues_{context}"] - -[id="ocp-4-18-asynchronous-errata-updates_{context}"] -== Asynchronous errata updates - -Security, bug fix, and enhancement updates for {product-title} {product-version} are released as asynchronous errata through the Red{nbsp}Hat Network. All {product-title} {product-version} errata is https://access.redhat.com/downloads/content/290/[available on the Red Hat Customer Portal]. See the https://access.redhat.com/support/policy/updates/openshift[{product-title} Life Cycle] for more information about asynchronous errata. - -Red{nbsp}Hat Customer Portal users can enable errata notifications in the account settings for Red{nbsp}Hat Subscription Management (RHSM). When errata notifications are enabled, users are notified through email whenever new errata relevant to their registered systems are released. - -[NOTE] -==== -Red{nbsp}Hat Customer Portal user accounts must have systems registered and consuming {product-title} entitlements for {product-title} errata notification emails to generate. -==== - -This section will continue to be updated over time to provide notes on enhancements and bug fixes for future asynchronous errata releases of {product-title} {product-version}. Versioned asynchronous releases, for example with the form {product-title} {product-version}.z, will be detailed in subsections. In addition, releases in which the errata text cannot fit in the space provided by the advisory will be detailed in subsections that follow. - -[IMPORTANT] -==== -For any {product-title} release, always review the instructions on xref:../updating/updating_a_cluster/updating-cluster-web-console.adoc#updating-cluster-web-console[updating your cluster] properly. -==== - -//Update with relevant advisory information -[id="ocp-4-18-1-ga_{context}"] -=== RHSA-2024:6122 - {product-title} {product-version}.1 image release, bug fix, and security update advisory - -Issued: 25 February 2025 - -{product-title} release {product-version}.1, which includes security updates, is now available. The list of bug fixes that are included in the update is documented in the link:https://access.redhat.com/errata/RHSA-2024:6122[RHSA-2024:6122] advisory. The RPM packages that are included in the update are provided by the link:https://access.redhat.com/errata/RHEA-2024:6126[RHEA-2024:6126] advisory. - -Space precluded documenting all of the container images for this release in the advisory. - -You can view the container images in this release by running the following command: - -[source,terminal] ----- -$ oc adm release info 4.18.1 --pullspecs ----- - -[id="ocp-4-18-1-updating_{context}"] -==== Updating -To update an {product-title} 4.17 cluster to this latest release, see xref:../updating/updating_a_cluster/updating-cluster-cli.adoc#updating-cluster-cli[Updating a cluster using the CLI]. - -//replace 4.y.z for the correct values for the release. You do not need to update oc to run this command. diff --git a/release_notes/ocp-4-19-release-notes.adoc b/release_notes/ocp-4-19-release-notes.adoc new file mode 100644 index 0000000000..2c5c846e44 --- /dev/null +++ b/release_notes/ocp-4-19-release-notes.adoc @@ -0,0 +1,1600 @@ +:_mod-docs-content-type: ASSEMBLY +[id="ocp-4-19-release-notes"] += {product-title} {product-version} release notes +include::_attributes/common-attributes.adoc[] +:context: release-notes + +toc::[] + +Red{nbsp}Hat {product-title} provides developers and IT organizations with a hybrid cloud application platform for deploying both new and existing applications on secure, scalable resources with minimal configuration and management. {product-title} supports a wide selection of programming languages and frameworks, such as Java, JavaScript, Python, Ruby, and PHP. + +Built on {op-system-base-full} and Kubernetes, {product-title} provides a more secure and scalable multitenant operating system for today's enterprise-class applications, while delivering integrated application runtimes and libraries. {product-title} enables organizations to meet security, privacy, compliance, and governance requirements. + +[id="ocp-4-19-about-this-release_{context}"] +== About this release + +// TODO: Update with the relevant information closer to release. +{product-title} (link:https://access.redhat.com/errata/RHSA-202X:XXXX[RHSA-202X:XXXX]) is now available. This release uses link:https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.31.md[Kubernetes 1.31] with CRI-O runtime. New features, changes, and known issues that pertain to {product-title} {product-version} are included in this topic. + +{product-title} {product-version} clusters are available at https://console.redhat.com/openshift. From the {hybrid-console}, you can deploy {product-title} clusters to either on-premises or cloud environments. + +// Double check OP system versions +{product-title} {product-version} is supported on {op-system-base-full} 8.8 and a later version of {op-system-base} 8 that is released before End of Life of {product-title} {product-version}. {product-title} {product-version} is also supported on {op-system-first}. To understand {op-system-base} versions used by {op-system}, see link:https://access.redhat.com/articles/6907891[{op-system-base} Versions Utilized by {op-system-first} and {product-title}] (Knowledgebase article). + +You must use {op-system} machines for the control plane, and you can use either {op-system} or {op-system-base} for compute machines. {op-system-base} machines are deprecated in {product-title} 4.16 and will be removed in a future release. +//Removed the note per https://issues.redhat.com/browse/GRPA-3517 + +//Even-numbered release lifecycle verbiage (Comment in for even-numbered releases) +Starting from {product-title} 4.14, the Extended Update Support (EUS) phase for even-numbered releases increases the total available lifecycle to 24 months on all supported architectures, including `x86_64`, 64-bit ARM (`aarch64`), {ibm-power-name} (`ppc64le`), and {ibm-z-name} (`s390x`) architectures. Beyond this, Red{nbsp}Hat also offers a 12-month additional EUS add-on, denoted as _Additional EUS Term 2_, that extends the total available lifecycle from 24 months to 36 months. The Additional EUS Term 2 is available on all architecture variants of {product-title}. For more information about support for all versions, see the link:https://access.redhat.com/support/policy/updates/openshift[Red Hat {product-title} Life Cycle Policy]. + +//Odd-numbered release lifecycle verbiage (Comment in for odd-numbered releases) +//// +The support lifecycle for odd-numbered releases, such as {product-title} {product-version}, on all supported architectures, including `x86_64`, 64-bit ARM (`aarch64`), {ibm-power-name} (`ppc64le`), and {ibm-z-name} (`s390x`) architectures is 18 months. For more information about support for all versions, see the link:https://access.redhat.com/support/policy/updates/openshift[Red Hat {product-title} Life Cycle Policy]. +//// + +Commencing with the {product-title} 4.14 release, Red{nbsp}Hat is simplifying the administration and management of Red{nbsp}Hat shipped cluster Operators with the introduction of three new life cycle classifications; Platform Aligned, Platform Agnostic, and Rolling Stream. These life cycle classifications provide additional ease and transparency for cluster administrators to understand the life cycle policies of each Operator and form cluster maintenance and upgrade plans with predictable support boundaries. For more information, see link:https://access.redhat.com/webassets/avalon/j/includes/session/scribe/?redirectTo=https%3A%2F%2Faccess.redhat.com%2Fsupport%2Fpolicy%2Fupdates%2Fopenshift_operators[OpenShift Operator Life Cycles]. + +// Added in 4.14. Language came directly from Kirsten Newcomer. +{product-title} is designed for FIPS. When running {op-system-base-full} or {op-system-first} booted in FIPS mode, {product-title} core components use the {op-system-base} cryptographic libraries that have been submitted to NIST for FIPS 140-2/140-3 Validation on only the `x86_64`, `ppc64le`, and `s390x` architectures. + +For more information about the NIST validation program, see link:https://csrc.nist.gov/Projects/cryptographic-module-validation-program/validated-modules[Cryptographic Module Validation Program]. For the latest NIST status for the individual versions of {op-system-base} cryptographic libraries that have been submitted for validation, see link:https://access.redhat.com/articles/2918071#fips-140-2-and-fips-140-3-2[Compliance Activities and Government Standards]. + +[id="ocp-4-19-add-on-support-status_{context}"] +== {product-title} layered and dependent component support and compatibility + +The scope of support for layered and dependent components of {product-title} changes independently of the {product-title} version. To determine the current support status and compatibility for an add-on, refer to its release notes. For more information, see the link:https://access.redhat.com/support/policy/updates/openshift[Red Hat {product-title} Life Cycle Policy]. + +[id="ocp-4-19-new-features-and-enhancements_{context}"] +== New features and enhancements + +This release adds improvements related to the following components and concepts: + +[id="ocp-release-notes-auth_{context}"] +=== Authentication and authorization + +[id="ocp-release-notes-backup-restore_{context}"] +=== Backup and restore + +[id="ocp-release-notes-builds_{context}"] +=== Builds + +[id="ocp-release-notes-cro_{context}"] +=== Cluster Resource Override Admission Operator + +[id="ocp-release-notes-edge-computing_{context}"] +=== Edge computing + +[id="ocp-release-notes-extensions_{context}"] +=== Extensions ({olmv1}) + +[id="ocp-release-notes-hcp_{context}"] +=== Hosted control planes + +Because {hcp} releases asynchronously from {product-title}, it has its own release notes. For more information, see xref:../hosted_control_planes/hosted-control-planes-release-notes.adoc#hosted-control-planes-release-notes[{hcp-capital} release notes]. + +[id="ocp-release-notes-ibm-power_{context}"] +=== {ibm-power-title} + +The {ibm-power-name} release on {product-title} {product-version} adds improvements and new capabilities to {product-title} components. + +This release introduces support for the following features on {ibm-power-title}: + +[id="ocp-release-notes-ibm-z_{context}"] +=== {ibm-z-title} and {ibm-linuxone-title} + +[discrete] +[id="ocp-release-notes-ibm-z-power-support-matrix_{context}"] +=== {ibm-power-title}, {ibm-z-title}, and {ibm-linuxone-title} support matrix + +Starting in {product-title} 4.14, Extended Update Support (EUS) is extended to the {ibm-power-name} and the {ibm-z-name} platform. For more information, see the link:https://access.redhat.com/support/policy/updates/openshift-eus[OpenShift EUS Overview]. + +.{product-title} features +[cols="3,1,1",options="header"] +|==== +|Feature |{ibm-power-name} |{ibm-z-name} and {ibm-linuxone-name} + +|Adding compute nodes to on-premise clusters using {oc-first} +|Supported +|Supported + +|Alternate authentication providers +|Supported +|Supported + +|Agent-based Installer +|Supported +|Supported + +|Assisted Installer +|Supported +|Supported + +|Automatic Device Discovery with Local Storage Operator +|Unsupported +|Supported + +|Automatic repair of damaged machines with machine health checking +|Unsupported +|Unsupported + +|Cloud controller manager for {ibm-cloud-name} +|Supported +|Unsupported + +|Controlling overcommit and managing container density on nodes +|Unsupported +|Unsupported + +|CPU manager +|Supported +|Supported + +|Cron jobs +|Supported +|Supported + +|Descheduler +|Supported +|Supported + +|Egress IP +|Supported +|Supported + +|Encrypting data stored in etcd +|Supported +|Supported + +|FIPS cryptography +|Supported +|Supported + +|Helm +|Supported +|Supported + +|Horizontal pod autoscaling +|Supported +|Supported + +|Hosted control planes +|Supported +|Supported + +|IBM Secure Execution +|Unsupported +|Supported + +|Installer-provisioned Infrastructure Enablement for {ibm-power-server-name} +|Supported +|Unsupported + +|Installing on a single node +|Supported +|Supported + +|IPv6 +|Supported +|Supported + +|Monitoring for user-defined projects +|Supported +|Supported + +|Multi-architecture compute nodes +|Supported +|Supported + +|Multi-architecture control plane +|Supported +|Supported + +|Multipathing +|Supported +|Supported + +|Network-Bound Disk Encryption - External Tang Server +|Supported +|Supported + +|Non-volatile memory express drives (NVMe) +|Supported +|Unsupported + +|nx-gzip for Power10 (Hardware Acceleration) +|Supported +|Unsupported + +|oc-mirror plugin +|Supported +|Supported + +|OpenShift CLI (`oc`) plugins +|Supported +|Supported + +|Operator API +|Supported +|Supported + +|OpenShift Virtualization +|Unsupported +|Supported + +|OVN-Kubernetes, including IPsec encryption +|Supported +|Supported + +|PodDisruptionBudget +|Supported +|Supported + +|Precision Time Protocol (PTP) hardware +|Unsupported +|Unsupported + +|{openshift-local-productname} +|Unsupported +|Unsupported + +|Scheduler profiles +|Supported +|Supported + +|Secure Boot +|Unsupported +|Supported + +|Stream Control Transmission Protocol (SCTP) +|Supported +|Supported + +|Support for multiple network interfaces +|Supported +|Supported + +|The `openshift-install` utility to support various SMT levels on {ibm-power-name} (Hardware Acceleration) +|Supported +|Supported + +|Three-node cluster support +|Supported +|Supported + +|Topology Manager +|Supported +|Unsupported + +|z/VM Emulated FBA devices on SCSI disks +|Unsupported +|Supported + +|4K FCP block device +|Supported +|Supported +|==== + +.Persistent storage options +[cols="2,1,1",options="header"] +|==== +|Feature |{ibm-power-name} |{ibm-z-name} and {ibm-linuxone-name} +|Persistent storage using iSCSI +|Supported ^[1]^ +|Supported ^[1]^,^[2]^ + +|Persistent storage using local volumes (LSO) +|Supported ^[1]^ +|Supported ^[1]^,^[2]^ + +|Persistent storage using hostPath +|Supported ^[1]^ +|Supported ^[1]^,^[2]^ + +|Persistent storage using Fibre Channel +|Supported ^[1]^ +|Supported ^[1]^,^[2]^ + +|Persistent storage using Raw Block +|Supported ^[1]^ +|Supported ^[1]^,^[2]^ + +|Persistent storage using EDEV/FBA +|Supported ^[1]^ +|Supported ^[1]^,^[2]^ +|==== +[.small] +-- +1. Persistent shared storage must be provisioned by using either {rh-storage-first} or other supported storage protocols. +2. Persistent non-shared storage must be provisioned by using local storage, such as iSCSI, FC, or by using LSO with DASD, FCP, or EDEV/FBA. +-- + +.Operators +[cols="2,1,1",options="header"] +|==== +|Feature |{ibm-power-name} |{ibm-z-name} and {ibm-linuxone-name} + +|{cert-manager-operator} +|Supported +|Supported + +|Cluster Logging Operator +|Supported +|Supported + +|Cluster Resource Override Operator +|Supported +|Supported + +|Compliance Operator +|Supported +|Supported + +|Cost Management Metrics Operator +|Supported +|Supported + +|File Integrity Operator +|Supported +|Supported + +|HyperShift Operator +|Supported +|Supported + +|{ibm-power-server-name} Block CSI Driver Operator +|Supported +|Unsupported + +|Ingress Node Firewall Operator +|Supported +|Supported + +|Local Storage Operator +|Supported +|Supported + +|MetalLB Operator +|Supported +|Supported + +|Network Observability Operator +|Supported +|Supported + +|NFD Operator +|Supported +|Supported + +|NMState Operator +|Supported +|Supported + +|OpenShift Elasticsearch Operator +|Supported +|Supported + +|Vertical Pod Autoscaler Operator +|Supported +|Supported +|==== + +.Multus CNI plugins +[cols="2,1,1",options="header"] +|==== +|Feature |{ibm-power-name} |{ibm-z-name} and {ibm-linuxone-name} + +|Bridge +|Supported +|Supported + +|Host-device +|Supported +|Supported + +|IPAM +|Supported +|Supported + +|IPVLAN +|Supported +|Supported +|==== + +.CSI Volumes +[cols="2,1,1",options="header"] +|==== +|Feature |{ibm-power-name} |{ibm-z-name} and {ibm-linuxone-name} + +|Cloning +|Supported +|Supported + +|Expansion +|Supported +|Supported + +|Snapshot +|Supported +|Supported +|==== + +[id="ocp-release-notes-insights-operator-enhancements_{context}"] +=== Insights Operator + +[id="ocp-release-notes-installation-and-update_{context}"] +=== Installation and update + +[id="ocp-release-notes-machine-config-operator_{context}"] +=== Machine Config Operator + +[id="ocp-release-notes-management-console_{context}"] +=== Management console + +[id="ocp-release-notes-monitoring_{context}"] +=== Monitoring + +[id="ocp-release-notes-network-observability-operator_{context}"] +=== Network Observability Operator + +[id="ocp-release-notes-networking_{context}"] +=== Networking + +[id="ocp-4-19-networking-gateway-api-controller_{context}"] +==== Support for using the Gateway API to configure cluster Ingress traffic +We will have details here when {product-title} {product-version} is released. + +[id="ocp-4-19-networking-gateway-api-crd-lifecycle_{context}"] +==== Support for managing the Gateway API custom resource definition (CRD) lifecycle +We will have details here when {product-title} {product-version} is released. + +[id="ocp-release-notes-nodes_{context}"] +=== Nodes + +[id="ocp-release-notes-postinstallation-configuration_{context}"] +=== Postinstallation configuration + +[id="ocp-release-notes-openshift-cli_{context}"] +=== OpenShift CLI (oc) + +[id="ocp-release-notes-olm_{context}"] +=== Operator lifecycle + +[id="ocp-release-notes-osdk_{context}"] +=== Operator development + +[id="ocp-release-notes-machine-management_{context}"] +=== Machine management + +[id="ocp-release-notes-oci_{context}"] +=== {oci-first} + +[id="ocp-postinstallation-configuration_{context}"] +=== Postinstallation configuration + +[id="ocp-release-notes-rhcos_{context}"] +=== {op-system-first} + +[id="ocp-release-notes-registry_{context}"] +=== Registry + +[id="ocp-release-notes-scalability-and-performance_{context}"] +=== Scalability and performance + +[id="ocp-release-notes-etcd-certificates_{context}"] +=== Security + +[id="ocp-release-notes-storage_{context}"] +=== Storage + +[id="ocp-release-notes-web-console_{context}"] +=== Web console + +[id="ocp-administrator-perspective_{context}"] +==== Administrator perspective + +[id="ocp-developer-perspective_{context}"] +==== Developer Perspective + +This release introduces the following updates to the *Developer* perspective of the web console: + +[id="ocp-4-19-notable-technical-changes_{context}"] +== Notable technical changes + +[id="ocp-4-19-deprecated-removed-features_{context}"] +== Deprecated and removed features + +Some features available in previous releases have been deprecated or removed. + +Deprecated functionality is still included in {product-title} and continues to be supported; however, it will be removed in a future release of this product and is not recommended for new deployments. For the most recent list of major functionality deprecated and removed within {product-title} {product-version}, refer to the table below. Additional details for more functionality that has been deprecated and removed are listed after the table. + +In the following tables, features are marked with the following statuses: + +* _Not Available_ +* _Technology Preview_ +* _General Availability_ +* _Deprecated_ +* _Removed_ + +[discrete] +[id="ocp-release-note-bare-metal-dep-rem_{context}"] +=== Bare metal monitoring deprecated and removed features + +.Bare Metal Event Relay Operator tracker +[cols="4,1,1,1",options="header"] +|==== +|Feature |4.17 |4.18 |4.19 + +|Bare Metal Event Relay Operator +|Removed +|Removed +|Removed +|==== + +[discrete] +[id="ocp-release-note-images-dep-rem_{context}"] +=== Images deprecated and removed features + +.Images deprecated and removed tracker +[cols="4,1,1,1",options="header"] +|==== +|Feature |4.17 |4.18 |4.19 + +|Cluster Samples Operator +|Deprecated +|Deprecated +|Deprecated +|==== + +[discrete] +[id="ocp-release-note-install-dep-rem_{context}"] +=== Installation deprecated and removed features + +.Installation deprecated and removed tracker +[cols="4,1,1,1",options="header"] +|==== +|Feature |4.17 |4.18 |4.19 + +|`--cloud` parameter for `oc adm release extract` +|Deprecated +|Deprecated +|Deprecated + +|CoreDNS wildcard queries for the `cluster.local` domain +|Deprecated +|Deprecated +|Deprecated + +|`compute.platform.openstack.rootVolume.type` for {rh-openstack} +|Deprecated +|Deprecated +|Deprecated + +|`controlPlane.platform.openstack.rootVolume.type` for {rh-openstack} +|Deprecated +|Deprecated +|Deprecated + +|`ingressVIP` and `apiVIP` settings in the `install-config.yaml` file for installer-provisioned infrastructure clusters +|Deprecated +|Deprecated +|Deprecated + +|Package-based {op-system-base} compute machines +|Deprecated +|Deprecated +|Deprecated + +|`platform.aws.preserveBootstrapIgnition` parameter for {aws-first} +|Deprecated +|Deprecated +|Deprecated + +|Installing a cluster on {aws-short} with compute nodes in {aws-short} Outposts +|Deprecated +|Deprecated +|Deprecated +|==== + +[discrete] +=== Machine management deprecated and removed features + +.Machine management deprecated and removed tracker +[cols="4,1,1,1",options="header"] +|==== +|Feature |4.17 |4.18 |4.19 + +|Placeholder +|Status +|Status +|Status + +|==== + +[discrete] +[id="ocp-release-note-monitoring-dep-rem_{context}"] +=== Monitoring deprecated and removed features + +.Monitoring deprecated and removed tracker +[cols="4,1,1,1",options="header"] +|==== +|Feature |4.17 |4.18 |4.19 +|==== + +[discrete] +[id="ocp-release-note-networking-dep-rem_{context}"] +=== Networking deprecated and removed features + +.Networking deprecated and removed tracker +[cols="4,1,1,1",options="header"] +|==== +|Feature |4.17 |4.18 |4.19 + +|OpenShift SDN network plugin +|Removed +|Removed +|Removed + +|iptables +|Deprecated +|Deprecated +|Deprecated + +|==== + +[discrete] +[id="ocp-release-note-node-dep-rem_{context}"] +=== Node deprecated and removed features + +.Node deprecated and removed tracker +[cols="4,1,1,1",options="header"] +|==== +|Feature |4.17 |4.18 |4.19 + +|`ImageContentSourcePolicy` (ICSP) objects +|Deprecated +|Deprecated +|Deprecated + +|Kubernetes topology label `failure-domain.beta.kubernetes.io/zone` +|Deprecated +|Deprecated +|Deprecated + +|Kubernetes topology label `failure-domain.beta.kubernetes.io/region` +|Deprecated +|Deprecated +|Deprecated + +|cgroup v1 +|Deprecated +|Deprecated +|Deprecated +|==== + +[discrete] +[id="ocp-release-note-cli-dep-rem_{context}"] +=== OpenShift CLI (oc) deprecated and removed features + +.OpenShift CLI (oc) deprecated and removed tracker +[cols="4,1,1,1",options="header"] +|==== +|Feature |4.17 |4.18 |4.19 + +|oc-mirror plugin v1 +|General Availability +|Deprecated +|Deprecated +|==== + +[discrete] +[id="ocp-release-note-operators-dep-rem_{context}"] +=== Operator lifecycle and development deprecated and removed features + +// "Operator lifecycle" refers to OLMv0 and "development" refers to Operator SDK + +.Operator lifecycle and development deprecated and removed tracker +[cols="4,1,1,1",options="header"] +|==== +|Feature |4.17 |4.18 |4.19 + +|Operator SDK +|Deprecated +|Deprecated +|Deprecated + +|Scaffolding tools for Ansible-based Operator projects +|Deprecated +|Deprecated +|Deprecated + +|Scaffolding tools for Helm-based Operator projects +|Deprecated +|Deprecated +|Deprecated + +|Scaffolding tools for Go-based Operator projects +|Deprecated +|Deprecated +|Deprecated + +|Scaffolding tools for Hybrid Helm-based Operator projects +|Deprecated +|Removed +|Removed + +|Scaffolding tools for Java-based Operator projects +|Deprecated +|Removed +|Removed + +// Do not remove the SQLite database... entry until otherwise directed by the Operator Framework PM +|SQLite database format for Operator catalogs +|Deprecated +|Deprecated +|Deprecated +|==== + +[discrete] +[id="ocp-hardware-an-driver-dep-rem_{context}"] +=== Specialized hardware and driver enablement deprecated and removed features + +.Specialized hardware and driver enablement deprecated and removed tracker +[cols="4,1,1,1",options="header"] +|==== +|Feature |4.17 |4.18 |4.19 +|==== + +[discrete] +=== Storage deprecated and removed features + +.Storage deprecated and removed tracker +[cols="4,1,1,1",options="header"] +|==== +|Feature |4.17 |4.18 |4.19 + +|AliCloud Disk CSI Driver Operator +|Removed +|Removed +|Removed + +|Shared Resources CSI Driver Operator +|Deprecated +|Removed +|Removed +|==== + +[discrete] +[id="ocp-clusters-dep-rem_{context}"] +=== Updating clusters deprecated and removed features + +.Updating clusters deprecated and removed tracker +[cols="4,1,1,1",options="header"] +|==== +|Feature |4.17 |4.18 |4.19 +|==== + +[discrete] +[id="ocp-release-note-web-console-dep-rem_{context}"] +=== Web console deprecated and removed features + +.Web console deprecated and removed tracker +[cols="4,1,1,1",options="header"] +|==== +|Feature |4.17 |4.18 |4.19 + +|Patternfly 4 +|Deprecated +|Deprecated +|Deprecated + +|React Router 5 +|Deprecated +|Deprecated +|Deprecated +|==== + +[discrete] +[id="ocp-release-note-workloads-dep-rem_{context}"] +=== Workloads deprecated and removed features + +.Workloads deprecated and removed tracker +[cols="4,1,1,1",options="header"] +|==== +|Feature |4.17 |4.18 |4.19 + +|`DeploymentConfig` objects +|Deprecated +|Deprecated +|Deprecated +|==== + +[id="ocp-4-19-deprecated-features_{context}"] +=== Deprecated features + +[id="ocp-4-19-removed-features_{context}"] +=== Removed features + +[id="ocp-4-19-future-deprecation_{context}"] +=== Notice of future deprecation + +[id="ocp-4-19-bug-fixes_{context}"] +== Bug fixes +//Bug fix work for TELCODOCS-750 +//Bare Metal Hardware Provisioning / OS Image Provider +//Bare Metal Hardware Provisioning / baremetal-operator +//Bare Metal Hardware Provisioning / cluster-baremetal-operator +//Bare Metal Hardware Provisioning / ironic" +//CNF Platform Validation +//Cloud Native Events / Cloud Event Proxy +//Cloud Native Events / Cloud Native Events +//Cloud Native Events / Hardware Event Proxy +//Cloud Native Events +//Driver Toolkit +//Installer / Assisted installer +//Installer / OpenShift on Bare Metal IPI +//Networking / ptp +//Node Feature Discovery Operator +//Performance Addon Operator +//Telco Edge / HW Event Operator +//Telco Edge / RAN +//Telco Edge / TALO +//Telco Edge / ZTP + +[discrete] +[id="ocp-release-note-api-auth-bug-fixes_{context}"] +==== API Server and Authentication + +[discrete] +[id="ocp-release-note-bare-metal-hardware-bug-fixes_{context}"] +==== Bare Metal Hardware Provisioning + +[discrete] +[id="ocp-release-note-builds-bug-fixes_{context}"] +==== Builds + +[discrete] +[id="ocp-release-note-cloud-compute-bug-fixes_{context}"] +==== Cloud Compute + +[discrete] +[id="ocp-release-note-cloud-cred-operator-bug-fixes_{context}"] +==== Cloud Credential Operator + +[discrete] +[id="ocp-release-note-cluster-override-admin-operator-bug-fixes_{context}"] +==== Cluster Resource Override Admission Operator + +[discrete] +[id="ocp-release-note-cluster-version-operator-bug-fixes_{context}"] +==== Cluster Version Operator + +[discrete] +[id="ocp-release-note-dev-console-bug-fixes_{context}"] +==== Developer Console + +[discrete] +[id="ocp-release-note-driver-toolkit-bug-fixes_{context}"] +==== Driver ToolKit (DTK) + +[discrete] +[id="ocp-release-note-cloud-etcd-operator-bug-fixes_{context}"] +==== etcd Cluster Operator + +[discrete] +[id="ocp-release-note-image-registry-bug-fixes_{context}"] +==== Image Registry + +[discrete] +[id="ocp-release-note-installer-bug-fixes_{context}"] +==== Installer + +[discrete] +[id="ocp-release-note-insights-operator-bug-fixes_{context}"] +==== Insights Operator + +[discrete] +[id="ocp-release-note-kube-controller-bug-fixes_{context}"] +==== Kubernetes Controller Manager + +[discrete] +[id="ocp-release-note-kube-scheduler-bug-fixes_{context}"] +==== Kubernetes Scheduler + +[discrete] +[id="ocp-release-note-machine-config-operator-bug-fixes_{context}"] +==== Machine Config Operator + +[discrete] +[id="ocp-release-note-management-console-bug-fixes_{context}"] +==== Management Console + +[discrete] +[id="ocp-release-note-monitoring-bug-fixes_{context}"] +==== Monitoring + +[discrete] +[id="ocp-release-note-networking-bug-fixes_{context}"] +==== Networking + +[discrete] +[id="ocp-release-note-node-bug-fixes_{context}"] +==== Node + +[discrete] +[id="ocp-release-note-node-tuning-operator-bug-fixes_{context}"] +==== Node Tuning Operator (NTO) + +[discrete] +[id="ocp-release-note-observability-bug-fixes_{context}"] +==== Observability + +[discrete] +[id="ocp-release-note-oc-mirror-bug-fixes_{context}"] +==== oc-mirror + +[discrete] +[id="ocp-release-note-openshift-cli-bug-fixes_{context}"] +==== OpenShift CLI (oc) + +[discrete] +[id="ocp-release-note-olm-bug-fixes_{context}"] +==== Operator Lifecycle Manager (OLM) + +[discrete] +[id="ocp-release-note-openshift-api-server-bug-fixes_{context}"] +==== OpenShift API server + +[discrete] +[id="ocp-release-note-pao-bug-fixes_{context}"] +==== Performance Addon Operator + +[discrete] +[id="ocp-release-note-rhcos-bug-fixes_{context}"] +==== {op-system-first} + +[discrete] +[id="ocp-release-note-scalability-and-performance-bug-fixes_{context}"] +==== Scalability and performance + +[discrete] +[id="ocp-release-note-storage-bug-fixes_{context}"] +==== Storage + +[discrete] +[id="ocp-release-note-windows-containers-bug-fixes_{context}"] +==== Windows containers + +[id="ocp-4-19-technology-preview-tables_{context}"] +== Technology Preview features status + +Some features in this release are currently in Technology Preview. These experimental features are not intended for production use. Note the following scope of support on the Red{nbsp}Hat Customer Portal for these features: + +link:https://access.redhat.com/support/offerings/techpreview[Technology Preview Features Support Scope] + +In the following tables, features are marked with the following statuses: + +* _Not Available_ +* _Technology Preview_ +* _General Availability_ +* _Deprecated_ +* _Removed_ + + +[discrete] +[id="ocp-release-notes-auth-tech-preview_{context}"] +=== Authentication and authorization Technology Preview features + +.Authentication and authorization Technology Preview tracker +[cols="4,1,1,1",options="header"] +|==== +|Feature |4.17 |4.18 |4.19 + +|Pod security admission restricted enforcement +|Technology Preview +|Technology Preview +|Technology Preview + +|==== + +[discrete] +[id="ocp-release-notesedge-computing-tp-features_{context}"] +=== Edge computing Technology Preview features + +.Edge computing Technology Preview tracker +[cols="4,1,1,1",options="header"] +|==== +|Feature |4.17 |4.18 |4.19 + +|Accelerated provisioning of {ztp} +|Technology Preview +|Technology Preview +|Technology Preview + +|Enabling disk encryption with TPM and PCR protection +|Technology Preview +|Technology Preview +|Technology Preview + +|==== + +[discrete] +[id="ocp-release-notes-installing-tech-preview_{context}"] +=== Installation Technology Preview features + +.Installation Technology Preview tracker +[cols="4,1,1,1",options="header"] +|==== +|Feature |4.17 |4.18 |4.19 + +// All GA in 4.17 notes for oci-first +|Adding kernel modules to nodes with kvc +|Technology Preview +|Technology Preview +|Technology Preview + +|Enabling NIC partitioning for SR-IOV devices +|General Availability +|General Availability +|General Availability + +|User-defined labels and tags for {gcp-first} +|General Availability +|General Availability +|General Availability + +|Installing a cluster on Alibaba Cloud by using Assisted Installer +|Technology Preview +|Technology Preview +|Technology Preview + +|Mount shared entitlements in BuildConfigs in RHEL +|Technology Preview +|Technology Preview +|Technology Preview + +|Selectable Cluster Inventory +|Technology Preview +|Technology Preview +|Technology Preview + +|Installing a cluster on {gcp-short} using the Cluster API implementation +|General Availability +|General Availability +|General Availability + +|Installing a cluster on {vmw-full} with multiple network interface controllers +|Not Available +|Technology Preview +|Technology Preview +|==== + +[discrete] +[id="ocp-release-notes-mco-tech-preview_{context}"] +=== Machine Config Operator Technology Preview features + +.Machine Config Operator Technology Preview tracker +[cols="4,1,1,1",options="header"] +|==== +|Feature |4.17 |4.18 |4.19 + +|Improved MCO state reporting (`oc get machineconfigpool`) +|Technology Preview +|Technology Preview +|Technology Preview + +|On-cluster RHCOS image layering +|Technology Preview +|Technology Preview +|Technology Preview + +|Node disruption policies +|General Availability +|General Availability +|General Availability + +|Updating boot images for GCP clusters +|General Availability +|General Availability +|General Availability + +|Updating boot images for AWS clusters +|Technology Preview +|General Availability +|General Availability +|==== + +[discrete] +[id="ocp-release-notes-machine-management-tech-preview_{context}"] +=== Machine management Technology Preview features + +.Machine management Technology Preview tracker +[cols="4,1,1,1",options="header"] +|==== +|Feature |4.17 |4.18 |4.19 + +|Managing machines with the Cluster API for {aws-full} +|Technology Preview +|Technology Preview +|Technology Preview + +|Managing machines with the Cluster API for {gcp-full} +|Technology Preview +|Technology Preview +|Technology Preview + +|Managing machines with the Cluster API for {azure-full} +|Not Available +|Technology Preview +|Technology Preview + +|Managing machines with the Cluster API for {vmw-full} +|Technology Preview +|Technology Preview +|Technology Preview + +|Cloud controller manager for {ibm-power-server-name} +|Technology Preview +|Technology Preview +|Technology Preview + +|Adding multiple subnets to an existing {vmw-full} cluster by using compute machine sets +|Not Available +|Technology Preview +|Technology Preview +|==== + +[discrete] +[id="ocp-release-notes-monitoring-tech-preview_{context}"] +=== Monitoring Technology Preview features + +.Monitoring Technology Preview tracker +[cols="4,1,1,1",options="header"] +|==== +|Feature |4.17 |4.18 |4.19 + +|Metrics Collection Profiles +|Technology Preview +|Technology Preview +|Technology Preview + +|==== + +[discrete] +[id="ocp-release-notes-web-console-tech-preview_{context}"] +=== Web console Technology Preview features + +.Web console Technology Preview tracker +[cols="4,1,1,1",options="header"] +|==== +|Feature |4.17 |4.18 |4.19 + +|{ols-official} in the {product-title} web console +|Technology Preview +|Technology Preview +|Technology Peview +|==== + +[discrete] +[id="ocp-release-notes-multi-arch-tech-preview_{context}"] +=== Multi-Architecture Technology Preview features + +.Multi-Architecture Technology Preview tracker +[cols="4,1,1,1",options="header"] +|==== +|Feature |4.17 |4.18 |4.19 + +|`kdump` on `arm64` architecture +|Technology Preview +|Technology Preview +|Technology Preview + +|`kdump` on `s390x` architecture +|Technology Preview +|Technology Preview +|Technology Preview + +|`kdump` on `ppc64le` architecture +|Technology Preview +|Technology Preview +|Technology Preview + +|Support for configuring the image stream import mode behavior +|Not Available +|Technology Preview +|Technology Preview +|==== + +[discrete] +[id="ocp-release-notes-networking-tech-preview_{context}"] +=== Networking Technology Preview features + +.Networking Technology Preview tracker +[cols="4,1,1,1",options="header"] +|==== +|Feature |4.17 |4.18 |4.19 + +|eBPF manager Operator +|Technology Preview +|Technology Preview +|Technology Preview + +|Advertise using L2 mode the MetalLB service from a subset of nodes, using a specific pool of IP addresses +|Technology Preview +|Technology Preview +|Technology Preview + +|Updating the interface-specific safe sysctls list +|Technology Preview +|Technology Preview +|Technology Preview + +|Egress service custom resource +|Technology Preview +|Technology Preview +|Technology Preview + +|VRF specification in `BGPPeer` custom resource +|Technology Preview +|Technology Preview +|Technology Preview + +|VRF specification in `NodeNetworkConfigurationPolicy` custom resource +|Technology Preview +|Technology Preview +|Technology Preview + +|Host network settings for SR-IOV VFs +|General Availability +|General Availability +|General Availability + +|Integration of MetalLB and FRR-K8s +|General Availability +|General Availability +|General Availability + +|Automatic leap seconds handling for PTP grandmaster clocks +|General Availability +|General Availability +|General Availability + +|PTP events REST API v2 +|General Availability +|General Availability +|General Availability + +|Customized `br-ex` bridge needed by OVN-Kuberenetes to use NMState +|General Availability +|General Availability +|General Availability + +|Live migration to OVN-Kubernetes from OpenShift SDN +|General Availability +|Not Available +|Not Available + +|User-defined network segmentation +|Technology Preview +|Technology Preview +|General Availablity + +|Dynamic configuration manager +|Not Available +|Technology Preview +|Technology Preview + +|SR-IOV Network Operator support for Intel C741 Emmitsburg Chipset +|Not Available +|Technology Preview +|Technology Preview + +|Gateway API and Istio for Ingress management +|Not Available +|Not Available +|General Availability +|==== + +[discrete] +[id="ocp-release-notes-nodes-tech-preview_{context}"] +=== Node Technology Preview features + +.Nodes Technology Preview tracker +[cols="4,1,1,1",options="header"] +|==== +|Feature |4.17 |4.18 |4.19 + +|`MaxUnavailableStatefulSet` featureset +|Technology Preview +|Technology Preview +|Technology Preview + +|sigstore support +|Technology Preview +|Technology Preview +|Technology Preview + +|==== + +[discrete] +[id="ocp-release-notes-oc-cli-tech-preview_{context}"] +=== OpenShift CLI (oc) Technology Preview features + +.OpenShift CLI (`oc`) Technology Preview tracker +[cols="4,1,1,1",options="header"] +|==== +|Feature |4.17 |4.18 |4.19 + +|oc-mirror plugin v2 +|Technology Preview +|General Availability +|General Availability + +|oc-mirror plugin v2 enclave support +|Technology Preview +|General Availability +|General Availability + +|oc-mirror plugin v2 delete functionality +|Technology Preview +|General Availability +|General Availability +|==== + +[discrete] +[id="ocp-release-notes-extensions-tech-preview_{context}"] +=== Extensions Technology Preview features + +// "Extensions" refers to OLMv1 + +.Extensions Technology Preview tracker +[cols="4,1,1,1",options="header"] +|==== +|Feature |4.17 |4.18 |4.19 + +|{olmv1-first} +|Technology Preview +|General Availability +|General Availability + +|{olmv1} runtime validation of container images using sigstore signatures +|Not Available +|Technology Preview +|Technology Preview +|==== + +[discrete] +[id="ocp-release-notes-operator-lifecycle-tech-preview_{context}"] +=== Operator lifecycle and development Technology Preview features + +// "Operator lifecycle" refers to OLMv0 and "development" refers to Operator SDK + +.Operator lifecycle and development Technology Preview tracker +[cols="4,1,1,1",options="header"] +|==== +|Feature |4.17 |4.18 |4.19 + +|{olmv1-first} +|Technology Preview +|General Availability +|General Availability + +|Scaffolding tools for Hybrid Helm-based Operator projects +|Deprecated +|Removed +|Removed + +|Scaffolding tools for Java-based Operator projects +|Deprecated +|Removed +|Removed +|==== + +[discrete] +[id="ocp-release-notes-rhcos-tech-preview_{context}"] +=== {rh-openstack-first} Technology Preview features + +.{rh-openstack} Technology Preview tracker +[cols="4,1,1,1",options="header"] +|==== +|Feature |4.17 |4.18 |4.19 + +|{rh-openstack} integration into the {cluster-capi-operator} +|Technology Preview +|Technology Preview +|Technology Preview + +|Control plane with `rootVolumes` and `etcd` on local disk +|General Availability +|General Availability +|General Availability +|==== + +[discrete] +[id="ocp-release-notes-scalability-tech-preview_{context}"] +=== Scalability and performance Technology Preview features + +.Scalability and performance Technology Preview tracker +[cols="4,1,1,1",options="header"] +|==== +|Feature |4.17 |4.18 |4.19 + +|{factory-prestaging-tool} +|Technology Preview +|Technology Preview +|Technology Preview + +|Hyperthreading-aware CPU manager policy +|Technology Preview +|Technology Preview +|Technology Preview + +|Mount namespace encapsulation +|Technology Preview +|Technology Preview +|Technology Preview + +|Node Observability Operator +|Technology Preview +|Technology Preview +|Technology Preview + +|Increasing the etcd database size +|Technology Preview +|Technology Preview +|Technology Preview + +|Using {rh-rhacm} `PolicyGenerator` resources to manage {ztp} cluster policies +|Technology Preview +|Technology Preview +|Technology Preview + +|Pinned Image Sets +|Technology Preview +|Technology Preview +|Technology Preview +|==== + +[discrete] +[id="ocp-release-notes-special-hardware-tech-preview_{context}"] +=== Specialized hardware and driver enablement Technology Preview features + +.Specialized hardware and driver enablement Technology Preview tracker +[cols="4,1,1,1",options="header"] +|==== +|Feature |4.17 |4.18 |4.19 +|==== + +[discrete] +[id="ocp-release-notes-storage-tech-preview_{context}"] +=== Storage Technology Preview features + +.Storage Technology Preview tracker +[cols="4,1,1,1",options="header"] +|==== +|Feature |4.17 |4.18 |4.19 + +|AWS EFS storage CSI usage metrics +|General Availability +|General Availability +|General Availability + +|Automatic device discovery and provisioning with Local Storage Operator +|Technology Preview +|Technology Preview +|Technology Preview + +|Azure File CSI snapshot support +|Technology Preview +|Technology Preview +|Technology Preview + +|Shared Resources CSI Driver in OpenShift Builds +|Technology Preview +|Technology Preview +|Technology Preview + +|{secrets-store-operator} +|Technology Preview +|General Availability +|General Availability + +|CIFS/SMB CSI Driver Operator +|Technology Preview +|General Availability +|General Availability + +|VMware vSphere multiple vCenter support +|Technology Preview +|General Availability +|General Availability + +|Disabling/enabling storage on vSphere +|Technology Preview +|Technology Preview +|Technology Preview + +|RWX/RWO SELinux Mount +|Developer Preview +|Developer Preview +|Developer Preview + +|Migrating CNS Volumes Between Datastores +|Developer Preview +|Developer Preview +|Developer Preview + +|CSI volume group snapshots +|Not Available +|Technology Preview +|Technology Preview + +|GCP PD supports C3/N4 instance types and hyperdisk-balanced disks +|Not Available +|General Availability +|General Availability + +|GCP Filestore supports Workload Identity +|General Availability +|General Availability +|General Availability + +|OpenStack Manila support for CSI resize +|Not Available +|General Availability +|General Availability +|==== + +[id="ocp-4-19-known-issues_{context}"] +== Known issues + +[id="ocp-telco-ran-4-19-known-issues_{context}"] + +[id="ocp-telco-core-4-19-known-issues_{context}"] + +[id="ocp-nodes-4-19-known-issues_{context}"] + +[id="ocp-storage-core-4-19-known-issues_{context}"] + +[id="ocp-hosted-control-planes-4-19-known-issues_{context}"] + +[id="ocp-4-19-asynchronous-errata-updates_{context}"] +== Asynchronous errata updates + +Security, bug fix, and enhancement updates for {product-title} {product-version} are released as asynchronous errata through the Red{nbsp}Hat Network. All {product-title} {product-version} errata is https://access.redhat.com/downloads/content/290/[available on the Red Hat Customer Portal]. See the https://access.redhat.com/support/policy/updates/openshift[{product-title} Life Cycle] for more information about asynchronous errata. + +Red{nbsp}Hat Customer Portal users can enable errata notifications in the account settings for Red{nbsp}Hat Subscription Management (RHSM). When errata notifications are enabled, users are notified through email whenever new errata relevant to their registered systems are released. + +[NOTE] +==== +Red{nbsp}Hat Customer Portal user accounts must have systems registered and consuming {product-title} entitlements for {product-title} errata notification emails to generate. +==== + +This section will continue to be updated over time to provide notes on enhancements and bug fixes for future asynchronous errata releases of {product-title} {product-version}. Versioned asynchronous releases, for example with the form {product-title} {product-version}.z, will be detailed in subsections. In addition, releases in which the errata text cannot fit in the space provided by the advisory will be detailed in subsections that follow. + +[IMPORTANT] +==== +For any {product-title} release, always review the instructions on xref:../updating/updating_a_cluster/updating-cluster-web-console.adoc#updating-cluster-web-console[updating your cluster] properly. +==== + +//Update with relevant advisory information +[id="ocp-4-19-0-ga_{context}"] +=== RHXA-2025:XXXX - {product-title} {product-version}.0 image release, bug fix, and security update advisory + +Issued: DAY-MONTH-YEAR + +{product-title} release {product-version}.0, which includes security updates, is now available. The list of bug fixes that are included in the update is documented in the link:https://access.redhat.com/errata/RHXA-2025:XXXX[RHXA-2025:XXXX] advisory. The RPM packages that are included in the update are provided by the link:https://access.redhat.com/errata/RHXA-2025:XXXX[RHXA-2025:XXXX] advisory. + +Space precluded documenting all of the container images for this release in the advisory. + +You can view the container images in this release by running the following command: + +[source,terminal] +---- +$ oc adm release info 4.19.0 --pullspecs +---- + +[id="ocp-4-19-0-updating_{context}"] +==== Updating +To update an {product-title} 4.17 cluster to this latest release, see xref:../updating/updating_a_cluster/updating-cluster-cli.adoc#updating-cluster-cli[Updating a cluster using the CLI]. + +//replace 4.y.z for the correct values for the release. You do not need to update oc to run this command. diff --git a/virt/release_notes/virt-4-18-release-notes.adoc b/virt/release_notes/virt-4-19-release-notes.adoc similarity index 99% rename from virt/release_notes/virt-4-18-release-notes.adoc rename to virt/release_notes/virt-4-19-release-notes.adoc index ce4fef09c9..b1361468cb 100644 --- a/virt/release_notes/virt-4-18-release-notes.adoc +++ b/virt/release_notes/virt-4-19-release-notes.adoc @@ -1,8 +1,8 @@ :_mod-docs-content-type: ASSEMBLY -[id="virt-4-18-release-notes"] +[id="virt-4-19-release-notes"] = {VirtProductName} release notes include::_attributes/common-attributes.adoc[] -:context: virt-4-18-release-notes +:context: virt-4-19-release-notes toc::[]