1
0
mirror of https://github.com/openshift/openshift-docs.git synced 2026-02-05 12:46:18 +01:00

Merge pull request #103418 from lahinson/osdocs-17564-hide-hcp-4.21

[OSDOCS-17564]: Hide HCP content for 4.21
This commit is contained in:
Michael Burke
2025-12-05 16:34:07 -05:00
committed by GitHub
19 changed files with 317 additions and 299 deletions

View File

@@ -2528,122 +2528,122 @@ Distros: openshift-enterprise, openshift-origin
Topics:
- Name: Hosted control planes release notes
File: hosted-control-planes-release-notes
- Name: Hosted control planes overview
File: index
- Name: Preparing to deploy hosted control planes
Dir: hcp-prepare
Topics:
- Name: Requirements for hosted control planes
File: hcp-requirements
- Name: Sizing guidance for hosted control planes
File: hcp-sizing-guidance
- Name: Overriding resouce utilization measurements
File: hcp-override-resource-util
- Name: Installing the hosted control plane command-line interface
File: hcp-cli
- Name: Distributing hosted cluster workloads
File: hcp-distribute-workloads
- Name: Enabling or disabling the hosted control planes feature
File: hcp-enable-disable
- Name: Deploying hosted control planes
Dir: hcp-deploy
Topics:
- Name: Deploying hosted control planes on AWS
File: hcp-deploy-aws
- Name: Deploying hosted control planes on bare metal
File: hcp-deploy-bm
- Name: Deploying hosted control planes on OpenShift Virtualization
File: hcp-deploy-virt
- Name: Deploying hosted control planes on non-bare-metal agent machines
File: hcp-deploy-non-bm
- Name: Deploying hosted control planes on IBM Z
File: hcp-deploy-ibmz
- Name: Deploying hosted control planes on IBM Power
File: hcp-deploy-ibm-power
- Name: Deploying hosted control planes on OpenStack
File: hcp-deploy-openstack
- Name: Managing hosted control planes
Dir: hcp-manage
Topics:
- Name: Managing hosted control planes on AWS
File: hcp-manage-aws
- Name: Managing hosted control planes on bare metal
File: hcp-manage-bm
- Name: Managing hosted control planes on OpenShift Virtualization
File: hcp-manage-virt
- Name: Managing hosted control planes on non-bare-metal agent machines
File: hcp-manage-non-bm
- Name: Managing hosted control planes on IBM Power
File: hcp-manage-ibm-power
- Name: Managing hosted control planes on OpenStack
File: hcp-manage-openstack
- Name: Deploying hosted control planes in a disconnected environment
Dir: hcp-disconnected
Topics:
- Name: Introduction to hosted control planes in a disconnected environment
File: hcp-deploy-dc
- Name: Deploying hosted control planes on OpenShift Virtualization in a disconnected environment
File: hcp-deploy-dc-virt
- Name: Deploying hosted control planes on bare metal in a disconnected environment
File: hcp-deploy-dc-bm
- Name: Deploying hosted control planes on IBM Z in a disconnected environment
File: disconnected-install-ibmz-hcp
- Name: Monitoring user workload in a disconnected environment
File: hcp-dc-monitor
- Name: Configuring certificates for hosted control planes
File: hcp-certificates
- Name: Updating hosted control planes
File: hcp-updating
- Name: High availability for hosted control planes
Dir: hcp_high_availability
Topics:
- Name: About high availability for hosted control planes
File: about-hcp-ha
- Name: Recovering a failing etcd cluster
File: hcp-recovering-etcd-cluster
- Name: Backing up and restoring etcd in an on-premise environment
File: hcp-backup-restore-on-premise
- Name: Backing up and restoring etcd on AWS
File: hcp-backup-restore-aws
- Name: Backing up and restoring a hosted cluster on OpenShift Virtualization
File: hcp-backup-restore-virt
- Name: Disaster recovery for a hosted cluster in AWS
File: hcp-disaster-recovery-aws
- Name: Disaster recovery for a hosted cluster by using OADP
File: hcp-disaster-recovery-oadp
- Name: Automated disaster recovery for a hosted cluster by using OADP
File: hcp-disaster-recovery-oadp-auto
- Name: Authentication and authorization for hosted control planes
File: hcp-authentication-authorization
- Name: Handling machine configuration for hosted control planes
File: hcp-machine-config
- Name: Using feature gates in a hosted cluster
File: hcp-using-feature-gates
- Name: Observability for hosted control planes
File: hcp-observability
- Name: Networking for hosted control planes
File: hcp-networking
- Name: Troubleshooting hosted control planes
File: hcp-troubleshooting
- Name: Destroying a hosted cluster
Dir: hcp-destroy
Topics:
- Name: Destroying a hosted cluster on AWS
File: hcp-destroy-aws
- Name: Destroying a hosted cluster on bare metal
File: hcp-destroy-bm
- Name: Destroying a hosted cluster on OpenShift Virtualization
File: hcp-destroy-virt
- Name: Destroying a hosted cluster on IBM Z
File: hcp-destroy-ibmz
- Name: Destroying a hosted cluster on IBM Power
File: hcp-destroy-ibm-power
- Name: Destroying a hosted cluster on OpenStack
File: hcp-destroy-openstack
- Name: Destroying a hosted cluster on non-bare-metal agent machines
File: hcp-destroy-non-bm
- Name: Manually importing a hosted cluster
File: hcp-import
# - Name: Hosted control planes overview
# File: index
# - Name: Preparing to deploy hosted control planes
# Dir: hcp-prepare
# Topics:
# - Name: Requirements for hosted control planes
# File: hcp-requirements
# - Name: Sizing guidance for hosted control planes
# File: hcp-sizing-guidance
# - Name: Overriding resouce utilization measurements
# File: hcp-override-resource-util
# - Name: Installing the hosted control plane command-line interface
# File: hcp-cli
# - Name: Distributing hosted cluster workloads
# File: hcp-distribute-workloads
# - Name: Enabling or disabling the hosted control planes feature
# File: hcp-enable-disable
# - Name: Deploying hosted control planes
# Dir: hcp-deploy
# Topics:
# - Name: Deploying hosted control planes on AWS
# File: hcp-deploy-aws
# - Name: Deploying hosted control planes on bare metal
# File: hcp-deploy-bm
# - Name: Deploying hosted control planes on OpenShift Virtualization
# File: hcp-deploy-virt
# - Name: Deploying hosted control planes on non-bare-metal agent machines
# File: hcp-deploy-non-bm
# - Name: Deploying hosted control planes on IBM Z
# File: hcp-deploy-ibmz
# - Name: Deploying hosted control planes on IBM Power
# File: hcp-deploy-ibm-power
# - Name: Deploying hosted control planes on OpenStack
# File: hcp-deploy-openstack
# - Name: Managing hosted control planes
# Dir: hcp-manage
# Topics:
# - Name: Managing hosted control planes on AWS
# File: hcp-manage-aws
# - Name: Managing hosted control planes on bare metal
# File: hcp-manage-bm
# - Name: Managing hosted control planes on OpenShift Virtualization
# File: hcp-manage-virt
# - Name: Managing hosted control planes on non-bare-metal agent machines
# File: hcp-manage-non-bm
# - Name: Managing hosted control planes on IBM Power
# File: hcp-manage-ibm-power
# - Name: Managing hosted control planes on OpenStack
# File: hcp-manage-openstack
# - Name: Deploying hosted control planes in a disconnected environment
# Dir: hcp-disconnected
# Topics:
# - Name: Introduction to hosted control planes in a disconnected environment
# File: hcp-deploy-dc
# - Name: Deploying hosted control planes on OpenShift Virtualization in a disconnected environment
# File: hcp-deploy-dc-virt
# - Name: Deploying hosted control planes on bare metal in a disconnected environment
# File: hcp-deploy-dc-bm
# - Name: Deploying hosted control planes on IBM Z in a disconnected environment
# File: disconnected-install-ibmz-hcp
# - Name: Monitoring user workload in a disconnected environment
# File: hcp-dc-monitor
# - Name: Configuring certificates for hosted control planes
# File: hcp-certificates
# - Name: Updating hosted control planes
# File: hcp-updating
# - Name: High availability for hosted control planes
# Dir: hcp_high_availability
# Topics:
# - Name: About high availability for hosted control planes
# File: about-hcp-ha
# - Name: Recovering a failing etcd cluster
# File: hcp-recovering-etcd-cluster
# - Name: Backing up and restoring etcd in an on-premise environment
# File: hcp-backup-restore-on-premise
# - Name: Backing up and restoring etcd on AWS
# File: hcp-backup-restore-aws
# - Name: Backing up and restoring a hosted cluster on OpenShift Virtualization
# File: hcp-backup-restore-virt
# - Name: Disaster recovery for a hosted cluster in AWS
# File: hcp-disaster-recovery-aws
# - Name: Disaster recovery for a hosted cluster by using OADP
# File: hcp-disaster-recovery-oadp
# - Name: Automated disaster recovery for a hosted cluster by using OADP
# File: hcp-disaster-recovery-oadp-auto
# - Name: Authentication and authorization for hosted control planes
# File: hcp-authentication-authorization
# - Name: Handling machine configuration for hosted control planes
# File: hcp-machine-config
# - Name: Using feature gates in a hosted cluster
# File: hcp-using-feature-gates
# - Name: Observability for hosted control planes
# File: hcp-observability
# - Name: Networking for hosted control planes
# File: hcp-networking
# - Name: Troubleshooting hosted control planes
# File: hcp-troubleshooting
# - Name: Destroying a hosted cluster
# Dir: hcp-destroy
# Topics:
# - Name: Destroying a hosted cluster on AWS
# File: hcp-destroy-aws
# - Name: Destroying a hosted cluster on bare metal
# File: hcp-destroy-bm
# - Name: Destroying a hosted cluster on OpenShift Virtualization
# File: hcp-destroy-virt
# - Name: Destroying a hosted cluster on IBM Z
# File: hcp-destroy-ibmz
# - Name: Destroying a hosted cluster on IBM Power
# File: hcp-destroy-ibm-power
# - Name: Destroying a hosted cluster on OpenStack
# File: hcp-destroy-openstack
# - Name: Destroying a hosted cluster on non-bare-metal agent machines
# File: hcp-destroy-non-bm
# - Name: Manually importing a hosted cluster
# File: hcp-import
---
Name: Nodes
Dir: nodes

View File

@@ -35,12 +35,12 @@ endif::openshift-dedicated,openshift-rosa[]
include::modules/architecture-machine-roles.adoc[leveloffset=+1]
// This additional resource does not apply to OSD/ROSA
ifndef::openshift-dedicated,openshift-rosa[]
[role="_additional-resources"]
.Additional resources
* xref:../hosted_control_planes/index.adoc#hcp-overview[{hcp-capital} overview]
endif::openshift-dedicated,openshift-rosa[]
// // This additional resource does not apply to OSD/ROSA
// ifndef::openshift-dedicated,openshift-rosa[]
// [role="_additional-resources"]
// .Additional resources
// * xref:../hosted_control_planes/index.adoc#hcp-overview[{hcp-capital} overview]
// endif::openshift-dedicated,openshift-rosa[]
include::modules/operators-overview.adoc[leveloffset=+1]

View File

@@ -61,12 +61,12 @@ Operators are important components in {product-title} because they provide the f
* Manage over-the-air updates
* Ensure applications stay in the specified state
ifndef::openshift-dedicated,openshift-rosa[]
[role="_additional-resources"]
.Additional resources
// ifndef::openshift-dedicated,openshift-rosa[]
// [role="_additional-resources"]
// .Additional resources
* xref:../hosted_control_planes/index.adoc#hcp-overview[{hcp-capital} overview]
endif::openshift-dedicated,openshift-rosa[]
// * xref:../hosted_control_planes/index.adoc#hcp-overview[{hcp-capital} overview]
// endif::openshift-dedicated,openshift-rosa[]
[id="about-containerized-applications-for-developers"]
== About containerized applications for developers

View File

@@ -16,7 +16,7 @@ One of the challenges of scaling Kubernetes environments is managing the lifecyc
When you enable multicluster engine on {product-title}, you gain the following capabilities:
* xref:../hosted_control_planes/index.adoc#hcp-overview[{hcp-capital}], which is a feature that is based on the HyperShift project. With a centralized hosted control plane, you can operate {product-title} clusters in a hyperscale manner.
* {hcp-capital}, which is a feature that is based on the HyperShift project. With a centralized hosted control plane, you can operate {product-title} clusters in a hyperscale manner.
* Hive, which provisions self-managed {product-title} clusters to the hub and completes the initial configurations for those clusters.
* klusterlet agent, which registers managed clusters to the hub.
* Infrastructure Operator, which manages the deployment of the Assisted Service to orchestrate on-premise bare metal and vSphere installations of {product-title}, such as {sno} on bare metal. The Infrastructure Operator includes xref:../edge_computing/ztp-deploying-far-edge-clusters-at-scale.adoc#ztp-challenges-of-far-edge-deployments_ztp-deploying-far-edge-clusters-at-scale[{ztp-first}], which fully automates cluster creation on bare metal and vSphere provisioning with GitOps workflows to manage deployments and configuration changes.

View File

@@ -26,10 +26,10 @@ After you have an etcd backup, you can xref:../../backup_and_restore/control_pla
// Backing up etcd data
include::modules/backup-etcd.adoc[leveloffset=+1]
[role="_additional-resources"]
[id="additional-resources_backup-etcd"]
== Additional resources
* xref:../../hosted_control_planes/hcp_high_availability/hcp-recovering-etcd-cluster.adoc#hcp-recovering-etcd-cluster[Recovering an unhealthy etcd cluster]
// [role="_additional-resources"]
// [id="additional-resources_backup-etcd"]
// == Additional resources
// * xref:../../hosted_control_planes/hcp_high_availability/hcp-recovering-etcd-cluster.adoc#hcp-recovering-etcd-cluster[Recovering an unhealthy etcd cluster]
// Creating automated etcd backups
include::modules/etcd-creating-automated-backups.adoc[leveloffset=+1]

View File

@@ -26,9 +26,9 @@ After you have an etcd backup, you can xref:../../backup_and_restore/control_pla
// Backing up etcd data
include::modules/backup-etcd.adoc[leveloffset=+1]
[role="_additional-resources"]
.Additional resources
* xref:../../hosted_control_planes/hcp_high_availability/hcp-recovering-etcd-cluster.adoc#hcp-recovering-etcd-cluster[Recovering an unhealthy etcd cluster]
// [role="_additional-resources"]
// .Additional resources
// * xref:../../hosted_control_planes/hcp_high_availability/hcp-recovering-etcd-cluster.adoc#hcp-recovering-etcd-cluster[Recovering an unhealthy etcd cluster]
// Creating automated etcd backups
include::modules/etcd-creating-automated-backups.adoc[leveloffset=+1]

View File

@@ -6,171 +6,173 @@ include::_attributes/common-attributes.adoc[]
toc::[]
Release notes contain information about new and deprecated features, changes, and known issues.
include::snippets/hcp-snippet.adoc[]
[id="hcp-4-20-release-notes_{context}"]
== {hcp-capital} release notes for {product-title} 4.20
// Release notes contain information about new and deprecated features, changes, and known issues.
With this release, {hcp} for {product-title} 4.20 is available. {hcp-capital} for {product-title} 4.20 supports {mce} version 2.10.
// [id="hcp-4-20-release-notes_{context}"]
// == {hcp-capital} release notes for {product-title} 4.20
[id="hcp-4-20-new-features-and-enhancements_{context}"]
=== New features and enhancements
// With this release, {hcp} for {product-title} 4.20 is available. {hcp-capital} for {product-title} 4.20 supports {mce} version 2.10.
[id="hcp-4-20-scale-up-only_{context}"]
==== Scaling up workloads in a hosted cluster
// [id="hcp-4-20-new-features-and-enhancements_{context}"]
// === New features and enhancements
You can now only scale up workloads by using the `ScaleUpOnly` behavior, without scaling down the workloads in your hosted cluster. For more information, see xref:../hosted_control_planes/hcp-machine-config.adoc#scale-up-autoscaler-hcp_hcp-machine-config[Scaling up workloads in a hosted cluster].
// [id="hcp-4-20-scale-up-only_{context}"]
// ==== Scaling up workloads in a hosted cluster
[id="hcp-4-20-scale-up-down_{context}"]
==== Scaling up and down workloads in a hosted cluster
// You can now only scale up workloads by using the `ScaleUpOnly` behavior, without scaling down the workloads in your hosted cluster. For more information, see xref:../hosted_control_planes/hcp-machine-config.adoc#scale-up-autoscaler-hcp_hcp-machine-config[Scaling up workloads in a hosted cluster].
You can now scale up and down the workloads by using the `ScaleUpAndScaleDown` behavior in your hosted cluster. For more information, see xref:../hosted_control_planes/hcp-machine-config.adoc#scale-up-down-autoscaler-hcp_hcp-machine-config[Scaling up and down workloads in a hosted cluster].
// [id="hcp-4-20-scale-up-down_{context}"]
// ==== Scaling up and down workloads in a hosted cluster
[id="hcp-4-20-balance-ignored-labels_{context}"]
==== Balancing ignored labels in a hosted cluster
// You can now scale up and down the workloads by using the `ScaleUpAndScaleDown` behavior in your hosted cluster. For more information, see xref:../hosted_control_planes/hcp-machine-config.adoc#scale-up-down-autoscaler-hcp_hcp-machine-config[Scaling up and down workloads in a hosted cluster].
After scaling up your node pools, you can now set `balancingIgnoredLabels` to evenly distribute the machines across node pools. For more information, see xref:../hosted_control_planes/hcp-machine-config.adoc#balance-ignored-labels-autoscaler-hcp_hcp-machine-config[Balancing ignored labels in a hosted cluster].
// [id="hcp-4-20-balance-ignored-labels_{context}"]
// ==== Balancing ignored labels in a hosted cluster
[id="hcp-4-20-priority-expander_{context}"]
==== Setting the priority expander in a hosted cluster
// After scaling up your node pools, you can now set `balancingIgnoredLabels` to evenly distribute the machines across node pools. For more information, see xref:../hosted_control_planes/hcp-machine-config.adoc#balance-ignored-labels-autoscaler-hcp_hcp-machine-config[Balancing ignored labels in a hosted cluster].
You can now create high priority machines before low priority machines by configuring the priority expander in your hosted cluster. For more information, see xref:../hosted_control_planes/hcp-machine-config.adoc#priority-expander-autoscaler-hcp_hcp-machine-config[Setting the priority expander in a hosted cluster].
// [id="hcp-4-20-priority-expander_{context}"]
// ==== Setting the priority expander in a hosted cluster
[id="hcp-4-20-ibm-z-disconnected_{context}"]
==== {hcp-capital} on {ibm-z-title} in a disconnected environment is Generally Available
// You can now create high priority machines before low priority machines by configuring the priority expander in your hosted cluster. For more information, see xref:../hosted_control_planes/hcp-machine-config.adoc#priority-expander-autoscaler-hcp_hcp-machine-config[Setting the priority expander in a hosted cluster].
As of this release, {hcp} on {ibm-z-title} in a disconnected environment is a General Availablilty feature. For more information, see xref:../hosted_control_planes/hcp-disconnected/disconnected-install-ibmz-hcp.adoc[Deploying {hcp} on {ibm-z-title} in a disconnected environment].
// [id="hcp-4-20-ibm-z-disconnected_{context}"]
// ==== {hcp-capital} on {ibm-z-title} in a disconnected environment is Generally Available
//[id="hcp-4-20-internal-subnets-hcp_{context}"]
//==== Configuring internal subnets for hosted clusters
// As of this release, {hcp} on {ibm-z-title} in a disconnected environment is a General Availablilty feature. For more information, see xref:../hosted_control_planes/hcp-disconnected/disconnected-install-ibmz-hcp.adoc[Deploying {hcp} on {ibm-z-title} in a disconnected environment].
//In hosted clusters, you can configure internal IPv4 subnets that the OVN-Kubernetes network plugin uses to provide flexibility and avoid classless inter-domain routing (CIDR) conflicts. For more information, see xref:../hosted_control_planes/hcp-networking.adoc#hcp-custom-ovn-subnets_hcp-networking[Configuring internal OVN IPv4 subnets for hosted clusters].
// //[id="hcp-4-20-internal-subnets-hcp_{context}"]
// //==== Configuring internal subnets for hosted clusters
[id="bug-fixes-hcp-rn-4-20_{context}"]
=== Bug fixes
// //In hosted clusters, you can configure internal IPv4 subnets that the OVN-Kubernetes network plugin uses to provide flexibility and avoid classless inter-domain routing (CIDR) conflicts. For more information, see xref:../hosted_control_planes/hcp-networking.adoc#hcp-custom-ovn-subnets_hcp-networking[Configuring internal OVN IPv4 subnets for hosted clusters].
* Before this update, the SAN validation for custom certificates in `hc.spec.configuration.apiServer.servingCerts.namedCertificates` did not properly handle wildcard DNS patterns, such as `\*.example.com`. As a consequence, the wildcard DNS patterns in custom certificates could conflict with internal Kubernetes API server certificate SANs without being detected, leading to certificate validation failures and potential deployment issues. This release provides enhanced DNS SAN conflict detection to include RFC-compliant wildcard support, implementing bidirectional conflict validation that properly handles wildcard patterns such as `*.example.com` matching `sub.example.com`. As a result, wildcard DNS patterns are now properly validated, preventing certificate conflicts and ensuring more reliable hosted cluster deployments with wildcard certificate support. (link:https://issues.redhat.com/browse/OCPBUGS-60381[OCPBUGS-60381])
// [id="bug-fixes-hcp-rn-4-20_{context}"]
// === Bug fixes
* Before this update, the Azure cloud provider did not set the default ping target, `HTTP:10256/healthz`, for the Azure load balancer. Instead, services of the `LoadBalancer` type that ran on Azure had a ping target of `TCP:30810`. As a consequence, the health probes for cluster-wide services were non-functional, and during upgrades, they experienced downtime. With this release, the `ClusterServiceLoadBalancerHealthProbeMode` property of the cloud configuration is set to `shared`. As a result, load balancers in Azure have the correct health check ping target, `HTTP:10256/healthz`, which points to `kube-proxy` health endpoints that run on nodes. (link:https://issues.redhat.com/browse/OCPBUGS-58031[OCPBUGS-58031])
// * Before this update, the SAN validation for custom certificates in `hc.spec.configuration.apiServer.servingCerts.namedCertificates` did not properly handle wildcard DNS patterns, such as `\*.example.com`. As a consequence, the wildcard DNS patterns in custom certificates could conflict with internal Kubernetes API server certificate SANs without being detected, leading to certificate validation failures and potential deployment issues. This release provides enhanced DNS SAN conflict detection to include RFC-compliant wildcard support, implementing bidirectional conflict validation that properly handles wildcard patterns such as `*.example.com` matching `sub.example.com`. As a result, wildcard DNS patterns are now properly validated, preventing certificate conflicts and ensuring more reliable hosted cluster deployments with wildcard certificate support. (link:https://issues.redhat.com/browse/OCPBUGS-60381[OCPBUGS-60381])
* Before this update, the HyperShift Operator failed to clear the `user-ca-bundle` config map after the removal of the `additionalTrustBundle` parameter from the `HostedCluster` resource. As a consequence, the `user-ca-bundle` config map was not updated, resulting in failure to generate ignition payloads. With this release, the HyperShift Operator actively removes the `user-ca-bundle` config map from the control plane namespace when it is removed from the `HostedCluster` resource. As a result, the `user-ca-bundle` config map is now correctly cleared, enabling the generation of ignition payloads. (link:https://issues.redhat.com/browse/OCPBUGS-57336[OCPBUGS-57336])
// * Before this update, the Azure cloud provider did not set the default ping target, `HTTP:10256/healthz`, for the Azure load balancer. Instead, services of the `LoadBalancer` type that ran on Azure had a ping target of `TCP:30810`. As a consequence, the health probes for cluster-wide services were non-functional, and during upgrades, they experienced downtime. With this release, the `ClusterServiceLoadBalancerHealthProbeMode` property of the cloud configuration is set to `shared`. As a result, load balancers in Azure have the correct health check ping target, `HTTP:10256/healthz`, which points to `kube-proxy` health endpoints that run on nodes. (link:https://issues.redhat.com/browse/OCPBUGS-58031[OCPBUGS-58031])
* Before this update, if you tried to create a hosted cluster on AWS when the Kubernetes API server service publishing strategy was `LoadBalancer` with `PublicAndPrivate` endpoint access, a private router admitted the OAuth route even though the External DNS Operator did not register a DNS record. As a consequence, the private router did not properly resolve the route URL and the OAuth server was inaccessible. The Console Cluster Operator also failed to start, and the hosted cluster installation failed. With this release, a private router admits the OAuth route only when the external DNS is defined. Otherwise, the router admits the route in the management cluster. As a result, the OAuth route is accessible, the Console Cluster Operator properly starts, and the hosted cluster installation succeeds. (link:https://issues.redhat.com/browse/OCPBUGS-56914[OCPBUGS-56914])
// * Before this update, the HyperShift Operator failed to clear the `user-ca-bundle` config map after the removal of the `additionalTrustBundle` parameter from the `HostedCluster` resource. As a consequence, the `user-ca-bundle` config map was not updated, resulting in failure to generate ignition payloads. With this release, the HyperShift Operator actively removes the `user-ca-bundle` config map from the control plane namespace when it is removed from the `HostedCluster` resource. As a result, the `user-ca-bundle` config map is now correctly cleared, enabling the generation of ignition payloads. (link:https://issues.redhat.com/browse/OCPBUGS-57336[OCPBUGS-57336])
* Before this release, when an IDMS or ICSP in the management OpenShift cluster defined a source that pointed to registry.redhat.io or registry.redhat.io/redhat, and the mirror registry did not contain the required OLM catalog images, provisioning for the `HostedCluster` resource stalled due to unauthorized image pulls. As a consequence, the `HostedCluster` resource was not deployed, and it remained blocked, where it could not pull essential catalog images from the mirrored registry. With this release, if a required image cannot be pulled due to authorization errors, the provisioning now explicitly fails. The logic for registry override is improved to allow matches on the root of the registry, such as registry.redhat.io, for OLM CatalogSource image resolution. A fallback mechanism is also introduced to use the original `ImageReference` if the registry override does not yield a working image. As a result, the `HostedCluster` resource can be deployed successfully, even in scenarios where the mirror registry lacks the required OLM catalog images, as the system correctly falls back to pulling from the original source when appropriate. (link:https://issues.redhat.com/browse/OCPBUGS-56492[OCPBUGS-56492])
// * Before this update, if you tried to create a hosted cluster on AWS when the Kubernetes API server service publishing strategy was `LoadBalancer` with `PublicAndPrivate` endpoint access, a private router admitted the OAuth route even though the External DNS Operator did not register a DNS record. As a consequence, the private router did not properly resolve the route URL and the OAuth server was inaccessible. The Console Cluster Operator also failed to start, and the hosted cluster installation failed. With this release, a private router admits the OAuth route only when the external DNS is defined. Otherwise, the router admits the route in the management cluster. As a result, the OAuth route is accessible, the Console Cluster Operator properly starts, and the hosted cluster installation succeeds. (link:https://issues.redhat.com/browse/OCPBUGS-56914[OCPBUGS-56914])
* Before this update, the AWS Cloud Provider did not set the default ping target, `HTTP:10256/healthz`, for the AWS load balancer. For services of the `LoadBalancer` type that run on AWS, the load balancer object created in AWS had a ping target of `TCP:32518`. As a consequence, the health probes for cluster-wide services were non-functional, and during upgrades, those services were down. With this release, the `ClusterServiceLoadBalancerHealthProbeMode` property of the cloud configuration is set to `Shared`. This cloud configuration is passed to the AWS Cloud Provider. As a result, the load balancers in AWS have the correct health check ping target, `HTTP:10256/healthz`, which points to the `kube-proxy` health endpoints that are running on nodes. (link:https://issues.redhat.com/browse/OCPBUGS-56011[OCPBUGS-56011])
// * Before this release, when an IDMS or ICSP in the management OpenShift cluster defined a source that pointed to registry.redhat.io or registry.redhat.io/redhat, and the mirror registry did not contain the required OLM catalog images, provisioning for the `HostedCluster` resource stalled due to unauthorized image pulls. As a consequence, the `HostedCluster` resource was not deployed, and it remained blocked, where it could not pull essential catalog images from the mirrored registry. With this release, if a required image cannot be pulled due to authorization errors, the provisioning now explicitly fails. The logic for registry override is improved to allow matches on the root of the registry, such as registry.redhat.io, for OLM CatalogSource image resolution. A fallback mechanism is also introduced to use the original `ImageReference` if the registry override does not yield a working image. As a result, the `HostedCluster` resource can be deployed successfully, even in scenarios where the mirror registry lacks the required OLM catalog images, as the system correctly falls back to pulling from the original source when appropriate. (link:https://issues.redhat.com/browse/OCPBUGS-56492[OCPBUGS-56492])
* Before this update, when you disabled the image registry capability by using the `--disable-cluster-capabilities` option, {hcp} still required you to configure a managed identity for the image registry. In this release, when the image registry is disabled, the image registry managed identity configuration is optional. (link:https://issues.redhat.com/browse/OCPBUGS-55892[OCPBUGS-55892])
// * Before this update, the AWS Cloud Provider did not set the default ping target, `HTTP:10256/healthz`, for the AWS load balancer. For services of the `LoadBalancer` type that run on AWS, the load balancer object created in AWS had a ping target of `TCP:32518`. As a consequence, the health probes for cluster-wide services were non-functional, and during upgrades, those services were down. With this release, the `ClusterServiceLoadBalancerHealthProbeMode` property of the cloud configuration is set to `Shared`. This cloud configuration is passed to the AWS Cloud Provider. As a result, the load balancers in AWS have the correct health check ping target, `HTTP:10256/healthz`, which points to the `kube-proxy` health endpoints that are running on nodes. (link:https://issues.redhat.com/browse/OCPBUGS-56011[OCPBUGS-56011])
* Before this update, the `ImageDigestMirrorSet` (IDMS) and `ImageContentSourcePolicy` (ICSP) resources from the management cluster were processed without considering that someone might specify only the root registry name as a mirror or source for image replacement. As a consequence, the IDMS and ICSP entries that used only the root registry name did not work as expected. In this release, the mirror replacement logic now correctly handles cases where only the root registry name is provided. As a result, the issue no longer occurs, and root registry mirror replacements are now supported. (link:https://issues.redhat.com/browse/OCPBUGS-54483[OCPBUGS-54483])
// * Before this update, when you disabled the image registry capability by using the `--disable-cluster-capabilities` option, {hcp} still required you to configure a managed identity for the image registry. In this release, when the image registry is disabled, the image registry managed identity configuration is optional. (link:https://issues.redhat.com/browse/OCPBUGS-55892[OCPBUGS-55892])
* Before this update, {hcp} did not correctly persist registry metadata and release image provider caches in the `HostedCluster` resource. As a consequence, caches for release and image metadata reset on `HostedCluster` controller reconciliation. This release introduces a common registry provider which is used by the `HostedCluster` resource to fix cache loss. This reduces the number of image pulls and network traffic, thus improving overall performance. (link:https://issues.redhat.com/browse/OCPBUGS-53259[OCPBUGS-53259])
// * Before this update, the `ImageDigestMirrorSet` (IDMS) and `ImageContentSourcePolicy` (ICSP) resources from the management cluster were processed without considering that someone might specify only the root registry name as a mirror or source for image replacement. As a consequence, the IDMS and ICSP entries that used only the root registry name did not work as expected. In this release, the mirror replacement logic now correctly handles cases where only the root registry name is provided. As a result, the issue no longer occurs, and root registry mirror replacements are now supported. (link:https://issues.redhat.com/browse/OCPBUGS-54483[OCPBUGS-54483])
* Before this update, when you configured an OIDC provider for a `HostedCluster` resource with an OIDC client that did not specify a client secret, the system automatically generated a default secret name. As a consequence, you could not configure OIDC public clients, which are not supposed to use secrets. This release fixes the issue. If no client secret is provided, no default secret name is generated, enabling proper support for public clients. (link:https://issues.redhat.com/browse/OCPBUGS-58149[OCPBUGS-58149])
// * Before this update, {hcp} did not correctly persist registry metadata and release image provider caches in the `HostedCluster` resource. As a consequence, caches for release and image metadata reset on `HostedCluster` controller reconciliation. This release introduces a common registry provider which is used by the `HostedCluster` resource to fix cache loss. This reduces the number of image pulls and network traffic, thus improving overall performance. (link:https://issues.redhat.com/browse/OCPBUGS-53259[OCPBUGS-53259])
* Before this update, multiple mirror images caused a hosted control plane payload error due to failed image lookup. As a consequence, users could not create hosted clusters. With this release, the hosted control plane payload now supports multiple mirrors, avoiding errors when a primary mirror is unavailable. As a result, users can create hosted clusters. (link:https://issues.redhat.com/browse/OCPBUGS-54720[OCPBUGS-54720])
// * Before this update, when you configured an OIDC provider for a `HostedCluster` resource with an OIDC client that did not specify a client secret, the system automatically generated a default secret name. As a consequence, you could not configure OIDC public clients, which are not supposed to use secrets. This release fixes the issue. If no client secret is provided, no default secret name is generated, enabling proper support for public clients. (link:https://issues.redhat.com/browse/OCPBUGS-58149[OCPBUGS-58149])
* Before this update, when a hosted cluster was upgraded to multiple versions over time, the version history in the `HostedCluster` resource sometimes exceeded 10 entries. However, the API had a strict validation limit of 10 items maximum for the version history field. As a consequence, users could not edit or update their `HostedCluster` resources when the version history exceeded 10 entries. Operations such as adding annotations (for example, for cluster size overrides) or performing maintenance tasks like resizing request serving nodes failed with a validation error: "status.version.history: Too many: 11: must have at most 10 items". This error prevented ROSA SREs from performing critical maintenance operations that might impact customer API access.
+
With this release, the maximum items validation constraint has been removed from the version history field in the `HostedCluster` API, allowing the history to grow beyond 10 entries without triggering validation errors. As a result, `HostedCluster` resources can now be edited and updated regardless of how many entries exist in the version history, so that administrators can perform necessary maintenance operations on clusters that have undergone multiple version upgrades. (link:https://issues.redhat.com/browse/OCPBUGS-58200[OCPBUGS-58200])
// * Before this update, multiple mirror images caused a hosted control plane payload error due to failed image lookup. As a consequence, users could not create hosted clusters. With this release, the hosted control plane payload now supports multiple mirrors, avoiding errors when a primary mirror is unavailable. As a result, users can create hosted clusters. (link:https://issues.redhat.com/browse/OCPBUGS-54720[OCPBUGS-54720])
* Before this update, following a CLI refactoring, the `MarkPersistentFlagRequired` function stopped working correctly. The `--name` and `--pull-secret` flags, which are critical for cluster creation, were marked as required, but the validation was not being enforced. As a consequence, users could run the `hypershift create cluster` command without providing the required `--name` or `--pull-secret` flags, and the CLI would not immediately alert them that these required flags were missing. This could lead to misconfigured deployments and confusing error messages later in the process.
+
This release adds an explicit validation in the `RawCreateOptions.Validate()` function to check for the presence of the `--name` and `--pull-secret` flags, returning clear error messages when either flag is missing. Additionally, the default "example" value is removed from the name field to ensure proper validation. As a result, when users attempt to create a cluster without the required `--name` or `--pull-secret` flags, they now receive immediate, clear error messages indicating which required flag is missing (for example, "Error: --name is required" or "Error: --pull-secret is required"), preventing misconfigured deployments and improving the user experience. (link:https://issues.redhat.com/browse/OCPBUGS-37323[OCPBUGS-37323])
// * Before this update, when a hosted cluster was upgraded to multiple versions over time, the version history in the `HostedCluster` resource sometimes exceeded 10 entries. However, the API had a strict validation limit of 10 items maximum for the version history field. As a consequence, users could not edit or update their `HostedCluster` resources when the version history exceeded 10 entries. Operations such as adding annotations (for example, for cluster size overrides) or performing maintenance tasks like resizing request serving nodes failed with a validation error: "status.version.history: Too many: 11: must have at most 10 items". This error prevented ROSA SREs from performing critical maintenance operations that might impact customer API access.
// +
// With this release, the maximum items validation constraint has been removed from the version history field in the `HostedCluster` API, allowing the history to grow beyond 10 entries without triggering validation errors. As a result, `HostedCluster` resources can now be edited and updated regardless of how many entries exist in the version history, so that administrators can perform necessary maintenance operations on clusters that have undergone multiple version upgrades. (link:https://issues.redhat.com/browse/OCPBUGS-58200[OCPBUGS-58200])
* Before this update, a variable shadowing bug in the `GetSupportedOCPVersions()` function caused the `supportedVersions` variable to be incorrectly assigned using `:=` instead of `=`, creating a local variable that was immediately discarded rather than updating the intended outer scope variable. As a consequence, when users ran the `hypershift version` command with the HyperShift Operator deployed, the CLI would either display `<unknown>` for the Server Version or panic with a "nil pointer dereference" error, preventing users from verifying the deployed HyperShift Operator version.
+
This release corrects the variable assignment from `supportedVersions :=` to `supportedVersions =` in the `GetSupportedOCPVersions()` function to properly assign the config map to the outer scope variable, ensuring the supported versions data is correctly populated. As a result, the `hypershift version` command now correctly displays the Server Version (for example, "Server Version: f001510b35842df352d1ab55d961be3fdc2dae32") when the HyperShift Operator is deployed, so that users can verify the running operator version and supported {product-title} versions. (link:https://issues.redhat.com/browse/OCPBUGS-57316[OCPBUGS-57316])
// * Before this update, following a CLI refactoring, the `MarkPersistentFlagRequired` function stopped working correctly. The `--name` and `--pull-secret` flags, which are critical for cluster creation, were marked as required, but the validation was not being enforced. As a consequence, users could run the `hypershift create cluster` command without providing the required `--name` or `--pull-secret` flags, and the CLI would not immediately alert them that these required flags were missing. This could lead to misconfigured deployments and confusing error messages later in the process.
// +
// This release adds an explicit validation in the `RawCreateOptions.Validate()` function to check for the presence of the `--name` and `--pull-secret` flags, returning clear error messages when either flag is missing. Additionally, the default "example" value is removed from the name field to ensure proper validation. As a result, when users attempt to create a cluster without the required `--name` or `--pull-secret` flags, they now receive immediate, clear error messages indicating which required flag is missing (for example, "Error: --name is required" or "Error: --pull-secret is required"), preventing misconfigured deployments and improving the user experience. (link:https://issues.redhat.com/browse/OCPBUGS-37323[OCPBUGS-37323])
* Before this update, the HyperShift Operator validated the Kubernetes API Server subject alternative names (SANs) in all cases. As a consequence, users sometimes experienced invalid API Server SANs during public key infrastructure (PKI) reconciliation. With this release, the Kubernetes API Server SANs are validated only if PKI reconciliation is not disabled. (link:https://issues.redhat.com/browse/OCPBUGS-56457[OCPBUGS-56457])
// * Before this update, a variable shadowing bug in the `GetSupportedOCPVersions()` function caused the `supportedVersions` variable to be incorrectly assigned using `:=` instead of `=`, creating a local variable that was immediately discarded rather than updating the intended outer scope variable. As a consequence, when users ran the `hypershift version` command with the HyperShift Operator deployed, the CLI would either display `<unknown>` for the Server Version or panic with a "nil pointer dereference" error, preventing users from verifying the deployed HyperShift Operator version.
// +
// This release corrects the variable assignment from `supportedVersions :=` to `supportedVersions =` in the `GetSupportedOCPVersions()` function to properly assign the config map to the outer scope variable, ensuring the supported versions data is correctly populated. As a result, the `hypershift version` command now correctly displays the Server Version (for example, "Server Version: f001510b35842df352d1ab55d961be3fdc2dae32") when the HyperShift Operator is deployed, so that users can verify the running operator version and supported {product-title} versions. (link:https://issues.redhat.com/browse/OCPBUGS-57316[OCPBUGS-57316])
* Before this update, the shared ingress controller did not handle the `HostedCluster.Spec.KubeAPIServerDNSName` field, so custom kube-apiserver DNS names were not added to the router configuration. As a consequence, traffic destined for the kube-apiserver on a hosted control plane that used a custom DNS name (via `HostedCluster.Spec.KubeAPIServerDNSName`) was not routed correctly, preventing the `KubeAPIExternalName` feature from working with platforms that use shared ingress.
+
This release adds handling for `HostedCluster.Spec.KubeAPIServerDNSName` in the shared ingress controller. When a hosted cluster specifies a custom kube-apiserver DNS name, the controller now automatically creates a route that directs traffic to the kube-apiserver service. As a result, traffic destined for custom kube-apiserver DNS names is now correctly routed by the shared ingress controller, enabling the `KubeAPIExternalName` feature to work on platforms that use shared ingress. (link:https://issues.redhat.com/browse/OCPBUGS-57790[OCPBUGS-57790])
// * Before this update, the HyperShift Operator validated the Kubernetes API Server subject alternative names (SANs) in all cases. As a consequence, users sometimes experienced invalid API Server SANs during public key infrastructure (PKI) reconciliation. With this release, the Kubernetes API Server SANs are validated only if PKI reconciliation is not disabled. (link:https://issues.redhat.com/browse/OCPBUGS-56457[OCPBUGS-56457])
[id="known-issues-hcp-rn-4-20_{context}"]
=== Known issues
// * Before this update, the shared ingress controller did not handle the `HostedCluster.Spec.KubeAPIServerDNSName` field, so custom kube-apiserver DNS names were not added to the router configuration. As a consequence, traffic destined for the kube-apiserver on a hosted control plane that used a custom DNS name (via `HostedCluster.Spec.KubeAPIServerDNSName`) was not routed correctly, preventing the `KubeAPIExternalName` feature from working with platforms that use shared ingress.
// +
// This release adds handling for `HostedCluster.Spec.KubeAPIServerDNSName` in the shared ingress controller. When a hosted cluster specifies a custom kube-apiserver DNS name, the controller now automatically creates a route that directs traffic to the kube-apiserver service. As a result, traffic destined for custom kube-apiserver DNS names is now correctly routed by the shared ingress controller, enabling the `KubeAPIExternalName` feature to work on platforms that use shared ingress. (link:https://issues.redhat.com/browse/OCPBUGS-57790[OCPBUGS-57790])
* If the annotation and the `ManagedCluster` resource name do not match, the {mce} console displays the cluster as `Pending import`. The cluster cannot be used by the {mce-short}. The same issue happens when there is no annotation and the `ManagedCluster` name does not match the `Infra-ID` value of the `HostedCluster` resource.
// [id="known-issues-hcp-rn-4-20_{context}"]
// === Known issues
* When you use the {mce} console to add a new node pool to an existing hosted cluster, the same version of {product-title} might appear more than once in the list of options. You can select any instance in the list for the version that you want.
// * If the annotation and the `ManagedCluster` resource name do not match, the {mce} console displays the cluster as `Pending import`. The cluster cannot be used by the {mce-short}. The same issue happens when there is no annotation and the `ManagedCluster` name does not match the `Infra-ID` value of the `HostedCluster` resource.
* When a node pool is scaled down to 0 workers, the list of hosts in the console still shows nodes in a `Ready` state. You can verify the number of nodes in two ways:
// * When you use the {mce} console to add a new node pool to an existing hosted cluster, the same version of {product-title} might appear more than once in the list of options. You can select any instance in the list for the version that you want.
** In the console, go to the node pool and verify that it has 0 nodes.
** On the command-line interface, run the following commands:
// * When a node pool is scaled down to 0 workers, the list of hosts in the console still shows nodes in a `Ready` state. You can verify the number of nodes in two ways:
*** Verify that 0 nodes are in the node pool by running the following command:
+
[source,terminal]
----
$ oc get nodepool -A
----
// ** In the console, go to the node pool and verify that it has 0 nodes.
// ** On the command-line interface, run the following commands:
*** Verify that 0 nodes are in the cluster by running the following command:
+
[source,terminal]
----
$ oc get nodes --kubeconfig
----
// *** Verify that 0 nodes are in the node pool by running the following command:
// +
// [source,terminal]
// ----
// $ oc get nodepool -A
// ----
*** Verify that 0 agents are reported as bound to the cluster by running the following command:
+
[source,terminal]
----
$ oc get agents -A
----
// *** Verify that 0 nodes are in the cluster by running the following command:
// +
// [source,terminal]
// ----
// $ oc get nodes --kubeconfig
// ----
* When you create a hosted cluster in an environment that uses the dual-stack network, you might encounter pods stuck in the `ContainerCreating` state. This issue occurs because the `openshift-service-ca-operator` resource cannot generate the `metrics-tls` secret that the DNS pods need for DNS resolution. As a result, the pods cannot resolve the Kubernetes API server. To resolve this issue, configure the DNS server settings for a dual stack network.
// *** Verify that 0 agents are reported as bound to the cluster by running the following command:
// +
// [source,terminal]
// ----
// $ oc get agents -A
// ----
* If you created a hosted cluster in the same namespace as its managed cluster, detaching the managed hosted cluster deletes everything in the managed cluster namespace including the hosted cluster. The following situations can create a hosted cluster in the same namespace as its managed cluster:
// * When you create a hosted cluster in an environment that uses the dual-stack network, you might encounter pods stuck in the `ContainerCreating` state. This issue occurs because the `openshift-service-ca-operator` resource cannot generate the `metrics-tls` secret that the DNS pods need for DNS resolution. As a result, the pods cannot resolve the Kubernetes API server. To resolve this issue, configure the DNS server settings for a dual stack network.
** You created a hosted cluster on the Agent platform through the {mce} console by using the default hosted cluster cluster namespace.
** You created a hosted cluster through the command-line interface or API by specifying the hosted cluster namespace to be the same as the hosted cluster name.
// * If you created a hosted cluster in the same namespace as its managed cluster, detaching the managed hosted cluster deletes everything in the managed cluster namespace including the hosted cluster. The following situations can create a hosted cluster in the same namespace as its managed cluster:
* When you use the console or API to specify an IPv6 address for the `spec.services.servicePublishingStrategy.nodePort.address` field of a hosted cluster, a full IPv6 address with 8 hextets is required. For example, instead of specifying `2620:52:0:1306::30`, you need to specify `2620:52:0:1306:0:0:0:30`.
// ** You created a hosted cluster on the Agent platform through the {mce} console by using the default hosted cluster cluster namespace.
// ** You created a hosted cluster through the command-line interface or API by specifying the hosted cluster namespace to be the same as the hosted cluster name.
[id="hcp-tech-preview-features_{context}"]
=== General Availability and Technology Preview features
// * When you use the console or API to specify an IPv6 address for the `spec.services.servicePublishingStrategy.nodePort.address` field of a hosted cluster, a full IPv6 address with 8 hextets is required. For example, instead of specifying `2620:52:0:1306::30`, you need to specify `2620:52:0:1306:0:0:0:30`.
Some features in this release are currently in Technology Preview. These experimental features are not intended for production use. For more information about the scope of support for these features, see link:https://access.redhat.com/support/offerings/techpreview[Technology Preview Features Support Scope] on the Red{nbsp}Hat Customer Portal.
// [id="hcp-tech-preview-features_{context}"]
// === General Availability and Technology Preview features
[IMPORTANT]
====
For {ibm-power-title} and {ibm-z-title}, the following exceptions apply:
// Some features in this release are currently in Technology Preview. These experimental features are not intended for production use. For more information about the scope of support for these features, see link:https://access.redhat.com/support/offerings/techpreview[Technology Preview Features Support Scope] on the Red{nbsp}Hat Customer Portal.
* For version 4.20 and later, you must run the control plane on machine types that are based on 64-bit x86 architecture or s390x architecture, and node pools on {ibm-power-title} or {ibm-z-title}.
* For version 4.19 and earlier, you must run the control plane on machine types that are based on 64-bit x86 architecture, and node pools on {ibm-power-title} or {ibm-z-title}.
====
// [IMPORTANT]
// ====
// For {ibm-power-title} and {ibm-z-title}, the following exceptions apply:
.{hcp-capital} GA and TP tracker
[cols="4,1,1,1",options="header"]
|===
|Feature |4.18 |4.19 |4.20
// * For version 4.20 and later, you must run the control plane on machine types that are based on 64-bit x86 architecture or s390x architecture, and node pools on {ibm-power-title} or {ibm-z-title}.
// * For version 4.19 and earlier, you must run the control plane on machine types that are based on 64-bit x86 architecture, and node pools on {ibm-power-title} or {ibm-z-title}.
// ====
|{hcp-capital} for {product-title} using non-bare-metal agent machines
|Technology Preview
|Technology Preview
|Technology Preview
// .{hcp-capital} GA and TP tracker
// [cols="4,1,1,1",options="header"]
// |===
// |Feature |4.18 |4.19 |4.20
|{hcp-capital} for {product-title} on {rh-openstack}
|Developer Preview
|Technology Preview
|Technology Preview
// |{hcp-capital} for {product-title} using non-bare-metal agent machines
// |Technology Preview
// |Technology Preview
// |Technology Preview
|Custom taints and tolerations
|Technology Preview
|Technology Preview
|Technology Preview
// |{hcp-capital} for {product-title} on {rh-openstack}
// |Developer Preview
// |Technology Preview
// |Technology Preview
|NVIDIA GPU devices on {hcp} for {VirtProductName}
|Technology Preview
|Technology Preview
|Technology Preview
// |Custom taints and tolerations
// |Technology Preview
// |Technology Preview
// |Technology Preview
|{hcp-capital} on {ibm-z-title} in a disconnected environment
|Technology Preview
|Technology Preview
|Generally Available
|===
// |NVIDIA GPU devices on {hcp} for {VirtProductName}
// |Technology Preview
// |Technology Preview
// |Technology Preview
// |{hcp-capital} on {ibm-z-title} in a disconnected environment
// |Technology Preview
// |Technology Preview
// |Generally Available
// |===

View File

@@ -17,11 +17,11 @@ See the following highly available {hcp} requirements, which were tested with {p
* Minimum vCPU: approximately 5.5 cores
* Minimum memory: approximately 19 GiB
[role="_additional-resources"]
.Additional resources
// [role="_additional-resources"]
// .Additional resources
* For more information about disabling the metric service monitoring, see xref:../../hosted_control_planes/hcp-prepare/hcp-override-resource-util.adoc#hcp-override-resource-util[Overriding resource utilization measurements].
* For more information about highly available {hcp} topology, see xref:../../hosted_control_planes/hcp-prepare/hcp-distribute-workloads.adoc#hcp-distribute-workloads[Distributing hosted cluster workloads].
// * For more information about disabling the metric service monitoring, see xref:../../hosted_control_planes/hcp-prepare/hcp-override-resource-util.adoc#hcp-override-resource-util[Overriding resource utilization measurements].
// * For more information about highly available {hcp} topology, see xref:../../hosted_control_planes/hcp-prepare/hcp-distribute-workloads.adoc#hcp-distribute-workloads[Distributing hosted cluster workloads].
include::modules/hcp-pod-limits.adoc[leveloffset=+1]

View File

@@ -127,7 +127,9 @@ For more information, see xref:../extensions/ce/managing-ce.adoc#olmv1-supported
[id="ocp-release-notes-hcp_{context}"]
=== Hosted control planes
Because {hcp} releases asynchronously from {product-title}, it has its own release notes. For more information, see xref:../hosted_control_planes/hosted-control-planes-release-notes.adoc#hosted-control-planes-release-notes[{hcp-capital} release notes].
include::snippets/hcp-snippet.adoc[]
// Because {hcp} releases asynchronously from {product-title}, it has its own release notes. For more information, see xref:../hosted_control_planes/hosted-control-planes-release-notes.adoc#hosted-control-planes-release-notes[{hcp-capital} release notes].
[id="ocp-release-notes-ibm-power_{context}"]
=== {ibm-power-title}

View File

@@ -30,7 +30,7 @@ include::modules/cnf-gathering-data-about-hosted-cluster-using-must-gather.adoc[
* xref:../support/gathering-cluster-data.adoc#nodes-nodes-managing[Gathering data about your cluster]
* xref:../hosted_control_planes/hcp-troubleshooting.adoc#hcp-must-gather-cli[Gathering data for a hosted cluster by using the CLI].
// * xref:../hosted_control_planes/hcp-troubleshooting.adoc#hcp-must-gather-cli[Gathering data for a hosted cluster by using the CLI].
include::modules/cnf-running-the-performance-creator-profile-hosted.adoc[leveloffset=+2]

View File

@@ -46,7 +46,7 @@ include::modules/cnf-gathering-data-about-cluster-using-must-gather.adoc[levelof
* xref:../support/gathering-cluster-data.adoc#nodes-nodes-managing[Gathering data about your cluster]
* xref:../hosted_control_planes/hcp-troubleshooting.adoc#hcp-must-gather-cli[Gathering data for a hosted cluster by using the CLI]
// * xref:../hosted_control_planes/hcp-troubleshooting.adoc#hcp-must-gather-cli[Gathering data for a hosted cluster by using the CLI]
include::modules/cnf-running-the-performance-creator-profile.adoc[leveloffset=+2]

View File

@@ -35,6 +35,6 @@ include::modules/node-tuning-hosted-cluster.adoc[leveloffset=+1]
include::modules/advanced-node-tuning-hosted-cluster.adoc[leveloffset=+1]
[role="_additional-resources"]
.Additional resources
* xref:../hosted_control_planes/index.adoc#hosted-control-planes-overview[{hcp-capital} overview]
// [role="_additional-resources"]
// .Additional resources
// * xref:../hosted_control_planes/index.adoc#hosted-control-planes-overview[{hcp-capital} overview]

View File

@@ -13,7 +13,9 @@ by one that is issued by a CA that clients trust.
[NOTE]
====
In hosted control plane clusters, you can add as many custom certificates to your Kubernetes API Server as you need. However, do not add a certificate for the endpoint that worker nodes use to communicate with the control plane. For more information, see xref:../../hosted_control_planes/hcp-deploy/hcp-deploy-bm.adoc#hcp-custom-cert_hcp-deploy-bm[Configuring a custom API server certificate in a hosted cluster].
In hosted control plane clusters, you can add as many custom certificates to your Kubernetes API Server as you need. However, do not add a certificate for the endpoint that worker nodes use to communicate with the control plane.
// For more information, see xref:../../hosted_control_planes/hcp-deploy/hcp-deploy-bm.adoc#hcp-custom-cert_hcp-deploy-bm[Configuring a custom API server certificate in a hosted cluster].
====
include::modules/customize-certificates-api-add-named.adoc[leveloffset=+1]

View File

@@ -51,7 +51,7 @@ include::modules/compliance-operator-hcp-install.adoc[leveloffset=+1]
.Additional resources
// 4.13+
* xref:../../../hosted_control_planes/index.adoc#hcp-overview[{hcp-capital} overview]
// * xref:../../../hosted_control_planes/index.adoc#hcp-overview[{hcp-capital} overview]
//
// 4.11-4.12, commenting out of 4.13-main
//* xref:../../../architecture/control-plane.adoc#hosted-control-planes-overview_control-plane[Overview of {hcp} (Technology Preview)]

10
snippets/hcp-snippet.adoc Normal file
View File

@@ -0,0 +1,10 @@
// Text snippet included in the following assemblies:
//
// * hosted_control_planes/hosted-control-planes-release-notes.adoc
:_mod-docs-content-type: SNIPPET
[IMPORTANT]
====
{hcp-capital} for {product-title} {product-version} is planned to be available with an upcoming release of {mce-short}. In the meantime, see the link:https://docs.redhat.com/en/documentation/openshift_container_platform/4.20/html/hosted_control_planes/hosted-control-planes-release-notes-1[{hcp} documentation for {product-title} 4.20].
====

View File

@@ -11,7 +11,7 @@ This document describes how to use volume group snapshots with supported Contain
:FeatureName: CSI volume group snapshots
include::snippets/technology-preview.adoc[leveloffset=+1]
To use this Technology Preview feature, you must xref:../../hosted_control_planes/hcp-using-feature-gates.adoc#hcp-enable-feature-sets_hcp-using-feature-gates[enable it using feature gates].
// To use this Technology Preview feature, you must xref:../../hosted_control_planes/hcp-using-feature-gates.adoc#hcp-enable-feature-sets_hcp-using-feature-gates[enable it using feature gates].
include::modules/persistent-storage-csi-group-snapshots-overview.adoc[leveloffset=+1]
@@ -26,4 +26,4 @@ include::modules/persistent-storage-csi-group-snapshots-restore.adoc[leveloffset
== Additional resources
* xref:../../storage/container_storage_interface/persistent-storage-csi-snapshots.adoc#persistent-storage-csi-snapshots[CSI volume snapshots]
* xref:../../hosted_control_planes/hcp-using-feature-gates.adoc#hcp-enable-feature-sets_hcp-using-feature-gates[Enabling features sets by using feature gates]
// * xref:../../hosted_control_planes/hcp-using-feature-gates.adoc#hcp-enable-feature-sets_hcp-using-feature-gates[Enabling features sets by using feature gates]

View File

@@ -44,11 +44,11 @@ include::modules/support-must-gather-targeted-collection-gathering-data.adoc[lev
endif::openshift-origin[]
ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[]
[role="_additional-resources"]
.Additional resources
* xref:../hosted_control_planes/hcp-troubleshooting.adoc#hosted-control-planes-troubleshooting_hcp-troubleshooting[Gathering information to troubleshoot {hcp}]
endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[]
// ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[]
// [role="_additional-resources"]
// .Additional resources
// * xref:../hosted_control_planes/hcp-troubleshooting.adoc#hosted-control-planes-troubleshooting_hcp-troubleshooting[Gathering information to troubleshoot {hcp}]
// endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[]
ifndef::openshift-origin[]
// Table of must-gather flags

View File

@@ -76,7 +76,9 @@ Version {product-version} of {product-title} requires VMware virtual hardware ve
[id="updating-clusters-overview-hosted-control-planes"]
== Updating {hcp}
xref:../hosted_control_planes/hcp-updating.adoc#hcp-updating_hcp-updating[Updating {hcp}]: On {hcp} for {product-title}, updates are decoupled between the control plane and the nodes. Your service cluster provider, which is the user that hosts the cluster control planes, can manage the updates as needed. The hosted cluster handles control plane updates, and node pools handle node updates. For more information, see the following information:
include::snippets/hcp-snippet.adoc[]
* xref:../hosted_control_planes/hcp-updating.adoc#hcp-updates-hosted-cluster_hcp-updating[Updates for the hosted cluster]
* xref:../hosted_control_planes/hcp-updating.adoc#hcp-update-node-pools_hcp-updating[Updating node pools in a hosted cluster]
// xref:../hosted_control_planes/hcp-updating.adoc#hcp-updating_hcp-updating[Updating {hcp}]: On {hcp} for {product-title}, updates are decoupled between the control plane and the nodes. Your service cluster provider, which is the user that hosts the cluster control planes, can manage the updates as needed. The hosted cluster handles control plane updates, and node pools handle node updates. For more information, see the following information:
// * xref:../hosted_control_planes/hcp-updating.adoc#hcp-updates-hosted-cluster_hcp-updating[Updates for the hosted cluster]
// * xref:../hosted_control_planes/hcp-updating.adoc#hcp-update-node-pools_hcp-updating[Updating node pools in a hosted cluster]

View File

@@ -291,38 +291,38 @@ a|* xref:../nodes/scheduling/nodes-scheduler-taints-tolerations.adoc#nodes-sched
* xref:../machine_management/creating-infrastructure-machinesets.adoc#creating-infrastructure-machinesets[Creating infrastructure machine sets]
|===
[id="self-managed-hcp"]
== {hcp-capital}
// [id="self-managed-hcp"]
// == {hcp-capital}
[options="header",cols="2*",width="%autowidth.stretch"]
|===
|Learn about {hcp} |Optional additional resources
// [options="header",cols="2*",width="%autowidth.stretch"]
// |===
// |Learn about {hcp} |Optional additional resources
| xref:../hosted_control_planes/index.adoc#hosted-control-planes-overview[Hosted control planes overview]
a|
xref:../hosted_control_planes/index.adoc#hosted-control-planes-version-support_hcp-overview[Versioning for {hcp}]
// | xref:../hosted_control_planes/index.adoc#hosted-control-planes-overview[Hosted control planes overview]
// a|
// xref:../hosted_control_planes/index.adoc#hosted-control-planes-version-support_hcp-overview[Versioning for {hcp}]
| Preparing to deploy
a| * xref:../hosted_control_planes/hcp-prepare/hcp-requirements.adoc#hcp-requirements[Requirements for {hcp}]
* xref:../hosted_control_planes/hcp-prepare/hcp-sizing-guidance.adoc#hcp-sizing-guidance[Sizing guidance for {hcp}]
* xref:../hosted_control_planes/hcp-prepare/hcp-override-resource-util.adoc#hcp-override-resource-util[Overriding resource utilization measurements]
* xref:../hosted_control_planes/hcp-prepare/hcp-cli.adoc#hcp-cli[Installing the {hcp} command-line interface]
* xref:../hosted_control_planes/hcp-prepare/hcp-distribute-workloads.adoc#hcp-distribute-workloads[Distributing hosted cluster workloads]
* xref:../hosted_control_planes/hcp-prepare/hcp-enable-disable.adoc#hcp-enable-disable[Enabling or disabling the {hcp} feature]
// | Preparing to deploy
// a| * xref:../hosted_control_planes/hcp-prepare/hcp-requirements.adoc#hcp-requirements[Requirements for {hcp}]
// * xref:../hosted_control_planes/hcp-prepare/hcp-sizing-guidance.adoc#hcp-sizing-guidance[Sizing guidance for {hcp}]
// * xref:../hosted_control_planes/hcp-prepare/hcp-override-resource-util.adoc#hcp-override-resource-util[Overriding resource utilization measurements]
// * xref:../hosted_control_planes/hcp-prepare/hcp-cli.adoc#hcp-cli[Installing the {hcp} command-line interface]
// * xref:../hosted_control_planes/hcp-prepare/hcp-distribute-workloads.adoc#hcp-distribute-workloads[Distributing hosted cluster workloads]
// * xref:../hosted_control_planes/hcp-prepare/hcp-enable-disable.adoc#hcp-enable-disable[Enabling or disabling the {hcp} feature]
| Deploying {hcp}
a| * xref:../hosted_control_planes/hcp-deploy/hcp-deploy-virt.adoc#hcp-deploy-virt[Deploying {hcp} on {VirtProductName}]
* xref:../hosted_control_planes/hcp-deploy/hcp-deploy-aws.adoc#hcp-deploy-aws[Deploying {hcp} on {aws-short}]
* xref:../hosted_control_planes/hcp-deploy/hcp-deploy-bm.adoc#hcp-deploy-bm[Deploying {hcp} on bare metal]
* xref:../hosted_control_planes/hcp-deploy/hcp-deploy-non-bm.adoc#hcp-deploy-non-bm[Deploying {hcp} on non-bare-metal agent machines]
* xref:../hosted_control_planes/hcp-deploy/hcp-deploy-ibmz.adoc#hcp-deploy-ibmz[Deploying {hcp} on {ibm-z-title}]
* xref:../hosted_control_planes/hcp-deploy/hcp-deploy-ibm-power.adoc#hcp-deploy-ibm-power[Deploying {hcp} on {ibm-power-title}]
// | Deploying {hcp}
// a| * xref:../hosted_control_planes/hcp-deploy/hcp-deploy-virt.adoc#hcp-deploy-virt[Deploying {hcp} on {VirtProductName}]
// * xref:../hosted_control_planes/hcp-deploy/hcp-deploy-aws.adoc#hcp-deploy-aws[Deploying {hcp} on {aws-short}]
// * xref:../hosted_control_planes/hcp-deploy/hcp-deploy-bm.adoc#hcp-deploy-bm[Deploying {hcp} on bare metal]
// * xref:../hosted_control_planes/hcp-deploy/hcp-deploy-non-bm.adoc#hcp-deploy-non-bm[Deploying {hcp} on non-bare-metal agent machines]
// * xref:../hosted_control_planes/hcp-deploy/hcp-deploy-ibmz.adoc#hcp-deploy-ibmz[Deploying {hcp} on {ibm-z-title}]
// * xref:../hosted_control_planes/hcp-deploy/hcp-deploy-ibm-power.adoc#hcp-deploy-ibm-power[Deploying {hcp} on {ibm-power-title}]
| Deploying {hcp} in a disconnected environment
a| * xref:../hosted_control_planes/hcp-disconnected/hcp-deploy-dc-bm.adoc#hcp-deploy-dc-bm[Deploying {hcp} on bare metal in a disconnected environment]
* xref:../hosted_control_planes/hcp-disconnected/hcp-deploy-dc-virt.adoc#hcp-deploy-dc-virt[Deploying {hcp} on {VirtProductName} in a disconnected environment]
// | Deploying {hcp} in a disconnected environment
// a| * xref:../hosted_control_planes/hcp-disconnected/hcp-deploy-dc-bm.adoc#hcp-deploy-dc-bm[Deploying {hcp} on bare metal in a disconnected environment]
// * xref:../hosted_control_planes/hcp-disconnected/hcp-deploy-dc-virt.adoc#hcp-deploy-dc-virt[Deploying {hcp} on {VirtProductName} in a disconnected environment]
| xref:../hosted_control_planes/hcp-troubleshooting.adoc#hcp-troubleshooting[Troubleshooting {hcp}]
a| xref:../hosted_control_planes/hcp-troubleshooting.adoc#hosted-control-planes-troubleshooting_hcp-troubleshooting[Gathering information to troubleshoot {hcp}]
// | xref:../hosted_control_planes/hcp-troubleshooting.adoc#hcp-troubleshooting[Troubleshooting {hcp}]
// a| xref:../hosted_control_planes/hcp-troubleshooting.adoc#hosted-control-planes-troubleshooting_hcp-troubleshooting[Gathering information to troubleshoot {hcp}]
|===
// |===