diff --git a/_topic_map.yml b/_topic_map.yml index d61fdde022..e1786c3fc4 100644 --- a/_topic_map.yml +++ b/_topic_map.yml @@ -2112,23 +2112,31 @@ Name: Backup and restore Dir: backup_and_restore Distros: openshift-origin,openshift-enterprise Topics: -- Name: Backing up etcd data - File: backing-up-etcd -- Name: Replacing an unhealthy etcd member - File: replacing-unhealthy-etcd-member - Name: Shutting down a cluster gracefully File: graceful-cluster-shutdown - Name: Restarting a cluster gracefully File: graceful-cluster-restart -- Name: Disaster recovery - Dir: disaster_recovery +# - Name: Application backup and restore +# Dir: application_backup_and_restore +# Topics: +# - Name: Application backup and restore +# File: placeholder +- Name: Control plane backup and restore + Dir: control_plane_backup_and_restore Topics: - - Name: About disaster recovery - File: about-disaster-recovery - - Name: Restoring to a previous cluster state - File: scenario-2-restoring-cluster-state - - Name: Recovering from expired control plane certificates - File: scenario-3-expired-certs + - Name: Backing up etcd data + File: backing-up-etcd + - Name: Replacing an unhealthy etcd member + File: replacing-unhealthy-etcd-member + - Name: Disaster recovery + Dir: disaster_recovery + Topics: + - Name: About disaster recovery + File: about-disaster-recovery + - Name: Restoring to a previous cluster state + File: scenario-2-restoring-cluster-state + - Name: Recovering from expired control plane certificates + File: scenario-3-expired-certs --- Name: Migrating from version 3 to 4 Dir: migrating_from_ocp_3_to_4 diff --git a/backup_and_restore/application_backup_and_restore/images b/backup_and_restore/application_backup_and_restore/images new file mode 120000 index 0000000000..5fa6987088 --- /dev/null +++ b/backup_and_restore/application_backup_and_restore/images @@ -0,0 +1 @@ +../../images \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/modules b/backup_and_restore/application_backup_and_restore/modules new file mode 120000 index 0000000000..8b0e854007 --- /dev/null +++ b/backup_and_restore/application_backup_and_restore/modules @@ -0,0 +1 @@ +../../modules \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/placeholder.adoc b/backup_and_restore/application_backup_and_restore/placeholder.adoc new file mode 100644 index 0000000000..7f6d147219 --- /dev/null +++ b/backup_and_restore/application_backup_and_restore/placeholder.adoc @@ -0,0 +1,6 @@ +[id="placeholder"] += Application backup and restore +include::modules/common-attributes.adoc[] +:context: oadp + +TBD diff --git a/backup_and_restore/backing-up-etcd.adoc b/backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc similarity index 75% rename from backup_and_restore/backing-up-etcd.adoc rename to backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc index 6ab603f134..cb007a604a 100644 --- a/backup_and_restore/backing-up-etcd.adoc +++ b/backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc @@ -22,10 +22,7 @@ Be sure to take an etcd backup after you upgrade your cluster. This is important Back up your cluster's etcd data by performing a single invocation of the backup script on a control plane host. Do not take a backup for each control plane host. ==== -After you have an etcd backup, you can xref:../backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[restore to a previous cluster state]. - -You can perform the xref:../backup_and_restore/backing-up-etcd.adoc#backing-up-etcd-data_backup-etcd[etcd data backup process] -on any control plane host that has a running etcd instance. +After you have an etcd backup, you can xref:../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[restore to a previous cluster state]. // Backing up etcd data include::modules/backup-etcd.adoc[leveloffset=+1] diff --git a/backup_and_restore/disaster_recovery/about-disaster-recovery.adoc b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/about-disaster-recovery.adoc similarity index 57% rename from backup_and_restore/disaster_recovery/about-disaster-recovery.adoc rename to backup_and_restore/control_plane_backup_and_restore/disaster_recovery/about-disaster-recovery.adoc index b4af98d859..1d4d469358 100644 --- a/backup_and_restore/disaster_recovery/about-disaster-recovery.adoc +++ b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/about-disaster-recovery.adoc @@ -16,26 +16,26 @@ state. Disaster recovery requires you to have at least one healthy control plane host. ==== -xref:../../backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[Restoring to a previous cluster state]:: +xref:../../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[Restoring to a previous cluster state]:: This solution handles situations where you want to restore your cluster to a previous state, for example, if an administrator deletes something critical. This also includes situations where you have lost the majority of your control plane hosts, leading to etcd quorum loss and the cluster going offline. As long as you have taken an etcd backup, you can follow this procedure to restore your cluster to a previous state. + -If applicable, you might also need to xref:../../backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc#dr-recovering-expired-certs[recover from expired control plane certificates]. +If applicable, you might also need to xref:../../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc#dr-recovering-expired-certs[recover from expired control plane certificates]. + [WARNING] ==== Restoring to a previous cluster state is a destructive and destablizing action to take on a running cluster. This procedure should only be used as a last resort. -Prior to performing a restore, see xref:../../backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-scenario-2-restoring-cluster-state-about_dr-restoring-cluster-state[About restoring cluster state] for more information on the impact to the cluster. +Prior to performing a restore, see xref:../../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-scenario-2-restoring-cluster-state-about_dr-restoring-cluster-state[About restoring cluster state] for more information on the impact to the cluster. ==== + [NOTE] ==== -If you have a majority of your masters still available and have an etcd quorum, then follow the procedure to xref:../../backup_and_restore/replacing-unhealthy-etcd-member.adoc#replacing-unhealthy-etcd-member[replace a single unhealthy etcd member]. +If you have a majority of your masters still available and have an etcd quorum, then follow the procedure to xref:../../../backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc#replacing-unhealthy-etcd-member[replace a single unhealthy etcd member]. ==== -xref:../../backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc#dr-recovering-expired-certs[Recovering from expired control plane certificates]:: +xref:../../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc#dr-recovering-expired-certs[Recovering from expired control plane certificates]:: This solution handles situations where your control plane certificates have expired. For example, if you shut down your cluster before the first certificate rotation, which occurs 24 hours after installation, your certificates will not diff --git a/backup_and_restore/disaster_recovery/images b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/images similarity index 100% rename from backup_and_restore/disaster_recovery/images rename to backup_and_restore/control_plane_backup_and_restore/disaster_recovery/images diff --git a/backup_and_restore/disaster_recovery/modules b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/modules similarity index 100% rename from backup_and_restore/disaster_recovery/modules rename to backup_and_restore/control_plane_backup_and_restore/disaster_recovery/modules diff --git a/backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc similarity index 66% rename from backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc rename to backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc index f2760ea13f..83380f6015 100644 --- a/backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc +++ b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc @@ -5,7 +5,7 @@ include::modules/common-attributes.adoc[] toc::[] -To restore the cluster to a previous state, you must have previously xref:../../backup_and_restore/backing-up-etcd.adoc#backing-up-etcd-data_backup-etcd[backed up etcd data] by creating a snapshot. You will use this snapshot to restore the cluster state. +To restore the cluster to a previous state, you must have previously xref:../../../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backing-up-etcd-data_backup-etcd[backed up etcd data] by creating a snapshot. You will use this snapshot to restore the cluster state. // About restoring to a previous cluster state include::modules/dr-restoring-cluster-state-about.adoc[leveloffset=+1] diff --git a/backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc similarity index 100% rename from backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc rename to backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc diff --git a/backup_and_restore/control_plane_backup_and_restore/images b/backup_and_restore/control_plane_backup_and_restore/images new file mode 120000 index 0000000000..5fa6987088 --- /dev/null +++ b/backup_and_restore/control_plane_backup_and_restore/images @@ -0,0 +1 @@ +../../images \ No newline at end of file diff --git a/backup_and_restore/control_plane_backup_and_restore/modules b/backup_and_restore/control_plane_backup_and_restore/modules new file mode 120000 index 0000000000..8b0e854007 --- /dev/null +++ b/backup_and_restore/control_plane_backup_and_restore/modules @@ -0,0 +1 @@ +../../modules \ No newline at end of file diff --git a/backup_and_restore/replacing-unhealthy-etcd-member.adoc b/backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc similarity index 56% rename from backup_and_restore/replacing-unhealthy-etcd-member.adoc rename to backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc index c3ca8a2e31..947f07433b 100644 --- a/backup_and_restore/replacing-unhealthy-etcd-member.adoc +++ b/backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc @@ -11,16 +11,16 @@ This process depends on whether the etcd member is unhealthy because the machine [NOTE] ==== -If you have lost the majority of your control plane hosts, leading to etcd quorum loss, then you must follow the disaster recovery procedure to xref:../backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[restore to a previous cluster state] instead of this procedure. +If you have lost the majority of your control plane hosts, leading to etcd quorum loss, then you must follow the disaster recovery procedure to xref:../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[restore to a previous cluster state] instead of this procedure. -If the control plane certificates are not valid on the member being replaced, then you must follow the procedure to xref:../backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc#dr-recovering-expired-certs[recover from expired control plane certificates] instead of this procedure. +If the control plane certificates are not valid on the member being replaced, then you must follow the procedure to xref:../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc#dr-recovering-expired-certs[recover from expired control plane certificates] instead of this procedure. If a control plane node is lost and a new one is created, the etcd cluster Operator handles generating the new TLS certificates and adding the node as an etcd member. ==== == Prerequisites -* Take an xref:../backup_and_restore/backing-up-etcd.adoc#backing-up-etcd-data_backup-etcd[etcd backup] prior to replacing an unhealthy etcd member. +* Take an xref:../../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backing-up-etcd-data_backup-etcd[etcd backup] prior to replacing an unhealthy etcd member. // Identifying an unhealthy etcd member include::modules/restore-identify-unhealthy-etcd-member.adoc[leveloffset=+1] @@ -32,8 +32,8 @@ include::modules/restore-determine-state-etcd-member.adoc[leveloffset=+1] Depending on the state of your unhealthy etcd member, use one of the following procedures: -* xref:../backup_and_restore/replacing-unhealthy-etcd-member.adoc#restore-replace-stopped-etcd-member_replacing-unhealthy-etcd-member[Replacing an unhealthy etcd member whose machine is not running or whose node is not ready] -* xref:../backup_and_restore/replacing-unhealthy-etcd-member.adoc#restore-replace-crashlooping-etcd-member_replacing-unhealthy-etcd-member[Replacing an unhealthy etcd member whose etcd pod is crashlooping] +* xref:../../backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc#restore-replace-stopped-etcd-member_replacing-unhealthy-etcd-member[Replacing an unhealthy etcd member whose machine is not running or whose node is not ready] +* xref:../../backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc#restore-replace-crashlooping-etcd-member_replacing-unhealthy-etcd-member[Replacing an unhealthy etcd member whose etcd pod is crashlooping] // Replacing an unhealthy etcd member whose machine is not running or whose node is not ready include::modules/restore-replace-stopped-etcd-member.adoc[leveloffset=+2] diff --git a/backup_and_restore/graceful-cluster-restart.adoc b/backup_and_restore/graceful-cluster-restart.adoc index 11f0322f09..808789858d 100644 --- a/backup_and_restore/graceful-cluster-restart.adoc +++ b/backup_and_restore/graceful-cluster-restart.adoc @@ -13,7 +13,7 @@ Even though the cluster is expected to be functional after the restart, the clus * Node failure due to hardware * Network connectivity issues -If your cluster fails to recover, follow the steps to xref:../backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[restore to a previous cluster state]. +If your cluster fails to recover, follow the steps to xref:../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[restore to a previous cluster state]. == Prerequisites @@ -24,4 +24,4 @@ include::modules/graceful-restart.adoc[leveloffset=+1] .Additional resources -* See xref:../backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[Restoring to a previous cluster state] for how to use an etcd backup to restore if your cluster failed to recover after restarting. +* See xref:../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[Restoring to a previous cluster state] for how to use an etcd backup to restore if your cluster failed to recover after restarting. diff --git a/backup_and_restore/graceful-cluster-shutdown.adoc b/backup_and_restore/graceful-cluster-shutdown.adoc index 06952895a0..adaf47b238 100644 --- a/backup_and_restore/graceful-cluster-shutdown.adoc +++ b/backup_and_restore/graceful-cluster-shutdown.adoc @@ -9,7 +9,7 @@ This document describes the process to gracefully shut down your cluster. You mi == Prerequisites -* Take an xref:../backup_and_restore/backing-up-etcd.adoc#backing-up-etcd-data_backup-etcd[etcd backup] prior to shutting down the cluster. +* Take an xref:../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backing-up-etcd-data_backup-etcd[etcd backup] prior to shutting down the cluster. // Shutting down the cluster include::modules/graceful-shutdown.adoc[leveloffset=+1] diff --git a/installing/installing_bare_metal/installing-bare-metal.adoc b/installing/installing_bare_metal/installing-bare-metal.adoc index 4e1970dc02..47192219c9 100644 --- a/installing/installing_bare_metal/installing-bare-metal.adoc +++ b/installing/installing_bare_metal/installing-bare-metal.adoc @@ -103,7 +103,7 @@ include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[lev .Additional resources -* See xref:../../backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc#dr-recovering-expired-certs[Recovering from expired control plane certificates] for more information about recovering kubelet certificates. +* See xref:../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc#dr-recovering-expired-certs[Recovering from expired control plane certificates] for more information about recovering kubelet certificates. [id="creating-machines-bare-metal"] == Installing {op-system} and starting the {product-title} bootstrap process diff --git a/installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc b/installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc index 994c372b8e..d2ec4c0d83 100644 --- a/installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc +++ b/installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc @@ -108,7 +108,7 @@ include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[lev .Additional resources -* See xref:../../backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc#dr-recovering-expired-certs[Recovering from expired control plane certificates] for more information about recovering kubelet certificates. +* See xref:../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc#dr-recovering-expired-certs[Recovering from expired control plane certificates] for more information about recovering kubelet certificates. include::modules/installation-special-config-chrony.adoc[leveloffset=+1] diff --git a/networking/ovn_kubernetes_network_provider/migrate-from-openshift-sdn.adoc b/networking/ovn_kubernetes_network_provider/migrate-from-openshift-sdn.adoc index a4bc6a3e3f..04a5135208 100644 --- a/networking/ovn_kubernetes_network_provider/migrate-from-openshift-sdn.adoc +++ b/networking/ovn_kubernetes_network_provider/migrate-from-openshift-sdn.adoc @@ -17,7 +17,7 @@ include::modules/nw-ovn-kubernetes-migration.adoc[leveloffset=+1] == Additional resources * xref:../../networking/cluster-network-operator.adoc#nw-operator-configuration-parameters-for-ovn-sdn_cluster-network-operator[Configuration parameters for the OVN-Kubernetes default CNI network provider] -* xref:../../backup_and_restore/backing-up-etcd.adoc#backup-etcd[Backing up etcd] +* xref:../../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backup-etcd[Backing up etcd] * xref:../../networking/network_policy/about-network-policy.adoc#about-network-policy[About network policy] * OVN-Kubernetes capabilities - xref:../../networking/ovn_kubernetes_network_provider/configuring-egress-ips-ovn.adoc#configuring-egress-ips-ovn[Configuring an egress IP address] diff --git a/nodes/nodes/nodes-nodes-rebooting.adoc b/nodes/nodes/nodes-nodes-rebooting.adoc index a33f329a17..60691f5e92 100644 --- a/nodes/nodes/nodes-nodes-rebooting.adoc +++ b/nodes/nodes/nodes-nodes-rebooting.adoc @@ -41,4 +41,4 @@ include::modules/nodes-nodes-rebooting-gracefully.adoc[leveloffset=+1] .Additional information -For information on etcd data backup, see xref:../../backup_and_restore/backing-up-etcd.adoc#backup-etcd[Backing up etcd data]. +For information on etcd data backup, see xref:../../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backup-etcd[Backing up etcd data]. diff --git a/scalability_and_performance/recommended-host-practices.adoc b/scalability_and_performance/recommended-host-practices.adoc index e4854d0d94..90bc8a617e 100644 --- a/scalability_and_performance/recommended-host-practices.adoc +++ b/scalability_and_performance/recommended-host-practices.adoc @@ -23,7 +23,7 @@ include::modules/master-node-sizing.adoc[leveloffset=+1] include::modules/increasing-aws-flavor-size.adoc[leveloffset=+2] .Additional resources -* xref:../backup_and_restore/backing-up-etcd.adoc#backing-up-etcd[Backing up etcd] +* xref:../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backing-up-etcd[Backing up etcd] include::modules/recommended-etcd-practices.adoc[leveloffset=+1] diff --git a/security/certificate_types_descriptions/control-plane-certificates.adoc b/security/certificate_types_descriptions/control-plane-certificates.adoc index 6baa7eda8b..3c9c5aa3ff 100644 --- a/security/certificate_types_descriptions/control-plane-certificates.adoc +++ b/security/certificate_types_descriptions/control-plane-certificates.adoc @@ -20,4 +20,4 @@ Control plane certificates are included in these namespaces: Control plane certificates are managed by the system and rotated automatically. -In the rare case that your control plane certificates have expired, see xref:../../backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc#dr-recovering-expired-certs[Recovering from expired control plane certificates]. +In the rare case that your control plane certificates have expired, see xref:../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc#dr-recovering-expired-certs[Recovering from expired control plane certificates]. diff --git a/security/certificate_types_descriptions/etcd-certificates.adoc b/security/certificate_types_descriptions/etcd-certificates.adoc index ee573d6126..190231600e 100644 --- a/security/certificate_types_descriptions/etcd-certificates.adoc +++ b/security/certificate_types_descriptions/etcd-certificates.adoc @@ -29,4 +29,4 @@ etcd certificates are used for encrypted communication between etcd member peers [discrete] === Additional resources -* xref:../../backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[Restoring to a previous cluster state] +* xref:../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[Restoring to a previous cluster state] diff --git a/updating/updating-cluster-between-minor.adoc b/updating/updating-cluster-between-minor.adoc index 1a11110189..90defe44b5 100644 --- a/updating/updating-cluster-between-minor.adoc +++ b/updating/updating-cluster-between-minor.adoc @@ -16,7 +16,7 @@ Use the web console or `oc adm upgrade channel __` to change the update * Have access to the cluster as a user with `admin` privileges. See xref:../authentication/using-rbac.adoc[Using RBAC to define and apply permissions]. -* Have a recent xref:../backup_and_restore/backing-up-etcd.adoc#backup-etcd[etcd backup] in case your upgrade fails and you must xref:../backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[restore your cluster to a previous state]. +* Have a recent xref:../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backup-etcd[etcd backup] in case your upgrade fails and you must xref:../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[restore your cluster to a previous state]. + {product-title} 4.9 requires an upgrade from etcd version 3.4 to 3.5. If the etcd Operator halts the upgrade, an alert is triggered. To clear this alert, ensure that you have a current etcd backup and restart the upgrade using the `--force` flag. + diff --git a/updating/updating-cluster-cli.adoc b/updating/updating-cluster-cli.adoc index 72e05be3d3..ee4ba16dcf 100644 --- a/updating/updating-cluster-cli.adoc +++ b/updating/updating-cluster-cli.adoc @@ -11,7 +11,7 @@ You can update, or upgrade, an {product-title} cluster within a minor version by * Have access to the cluster as a user with `admin` privileges. See xref:../authentication/using-rbac.adoc[Using RBAC to define and apply permissions]. -* Have a recent xref:../backup_and_restore/backing-up-etcd.adoc#backup-etcd[etcd backup] in case your upgrade fails and you must xref:../backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[restore your cluster to a previous state]. +* Have a recent xref:../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backup-etcd[etcd backup] in case your upgrade fails and you must xref:../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[restore your cluster to a previous state]. * Ensure all Operators previously installed through Operator Lifecycle Manager (OLM) are updated to their latest version in their latest channel. Updating the Operators ensures they have a valid upgrade path when the default OperatorHub catalogs switch from the current minor version to the next during a cluster upgrade. See xref:../operators/admin/olm-upgrading-operators.adoc#olm-upgrading-operators[Upgrading installed Operators] for more information. * Ensure that all machine config pools (MCPs) are running and not paused. Nodes associated with a paused MCP are skipped during the update process. You can pause the MCPs if you are performing a canary rollout update strategy. * If your cluster uses manually maintained credentials, ensure that the Cloud Credential Operator (CCO) is in an upgradeable state. For more information, see _Upgrading clusters with manually maintained credentials_ for xref:../installing/installing_aws/manually-creating-iam.adoc#manually-maintained-credentials-upgrade_manually-creating-iam-aws[AWS], xref:../installing/installing_azure/manually-creating-iam-azure.adoc#manually-maintained-credentials-upgrade_manually-creating-iam-azure[Azure], or xref:../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-maintained-credentials-upgrade_manually-creating-iam-gcp[GCP]. diff --git a/updating/updating-cluster-rhel-compute.adoc b/updating/updating-cluster-rhel-compute.adoc index f434e2dc38..3868f39c5c 100644 --- a/updating/updating-cluster-rhel-compute.adoc +++ b/updating/updating-cluster-rhel-compute.adoc @@ -13,7 +13,7 @@ those machines. * Have access to the cluster as a user with `admin` privileges. See xref:../authentication/using-rbac.adoc[Using RBAC to define and apply permissions]. -* Have a recent xref:../backup_and_restore/backing-up-etcd.adoc#backup-etcd[etcd backup] in case your upgrade fails and you must xref:../backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[restore your cluster to a previous state]. +* Have a recent xref:../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backup-etcd[etcd backup] in case your upgrade fails and you must xref:../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[restore your cluster to a previous state]. * If your cluster uses manually maintained credentials, ensure that the Cloud Credential Operator (CCO) is in an upgradeable state. For more information, see _Upgrading clusters with manually maintained credentials_ for xref:../installing/installing_aws/manually-creating-iam.adoc#manually-maintained-credentials-upgrade_manually-creating-iam-aws[AWS], xref:../installing/installing_azure/manually-creating-iam-azure.adoc#manually-maintained-credentials-upgrade_manually-creating-iam-azure[Azure], or xref:../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-maintained-credentials-upgrade_manually-creating-iam-gcp[GCP]. * If your cluster uses manually maintained credentials with the AWS Secure Token Service (STS), obtain a copy of the `ccoctl` utility from the release image being upgraded to and use it to process any updated credentials. For more information, see xref:../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#sts-mode-upgrading[_Upgrading an OpenShift Container Platform cluster configured for manual mode with STS_]. diff --git a/updating/updating-cluster.adoc b/updating/updating-cluster.adoc index 9d78214ab3..98addada54 100644 --- a/updating/updating-cluster.adoc +++ b/updating/updating-cluster.adoc @@ -11,7 +11,7 @@ You can update, or upgrade, an {product-title} cluster by using the web console. * Have access to the cluster as a user with `admin` privileges. See xref:../authentication/using-rbac.adoc[Using RBAC to define and apply permissions]. -* Have a recent xref:../backup_and_restore/backing-up-etcd.adoc#backup-etcd[etcd backup] in case your upgrade fails and you must xref:../backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[restore your cluster to a previous state]. +* Have a recent xref:../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backup-etcd[etcd backup] in case your upgrade fails and you must xref:../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[restore your cluster to a previous state]. * Ensure that all machine config pools (MCPs) are running and not paused. Nodes associated with a paused MCP are skipped during the update process. You can pause the MCPs if you are performing a canary rollout update strategy. * If your cluster uses manually maintained credentials, ensure that the Cloud Credential Operator (CCO) is in an upgradeable state. For more information, see _Upgrading clusters with manually maintained credentials_ for xref:../installing/installing_aws/manually-creating-iam.adoc#manually-maintained-credentials-upgrade_manually-creating-iam-aws[AWS], xref:../installing/installing_azure/manually-creating-iam-azure.adoc#manually-maintained-credentials-upgrade_manually-creating-iam-azure[Azure], or xref:../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-maintained-credentials-upgrade_manually-creating-iam-gcp[GCP]. * If your cluster uses manually maintained credentials with the AWS Secure Token Service (STS), obtain a copy of the `ccoctl` utility from the release image being upgraded to and use it to process any updated credentials. For more information, see xref:../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#sts-mode-upgrading[_Upgrading an OpenShift Container Platform cluster configured for manual mode with STS_]. diff --git a/updating/updating-restricted-network-cluster.adoc b/updating/updating-restricted-network-cluster.adoc index a184306801..882b0a0584 100644 --- a/updating/updating-restricted-network-cluster.adoc +++ b/updating/updating-restricted-network-cluster.adoc @@ -18,7 +18,7 @@ If multiple clusters are present within the restricted network, mirror the requi * You must have the `oc` command-line interface (CLI) tool installed. * Have access to the cluster as a user with `admin` privileges. See xref:../authentication/using-rbac.adoc[Using RBAC to define and apply permissions]. -* Have a recent xref:../backup_and_restore/backing-up-etcd.adoc#backup-etcd[etcd backup] in case your upgrade fails and you must xref:../backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[restore your cluster to a previous state]. +* Have a recent xref:../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backup-etcd[etcd backup] in case your upgrade fails and you must xref:../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[restore your cluster to a previous state]. * Ensure that all machine config pools (MCPs) are running and not paused. Nodes associated with a paused MCP are skipped during the update process. You can pause the MCPs if you are performing a canary rollout update strategy. * If your cluster uses manually maintained credentials, ensure that the Cloud Credential Operator (CCO) is in an upgradeable state. For more information, see _Upgrading clusters with manually maintained credentials_ for xref:../installing/installing_aws/manually-creating-iam.adoc#manually-maintained-credentials-upgrade_manually-creating-iam-aws[AWS], xref:../installing/installing_azure/manually-creating-iam-azure.adoc#manually-maintained-credentials-upgrade_manually-creating-iam-azure[Azure], or xref:../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-maintained-credentials-upgrade_manually-creating-iam-gcp[GCP]. //STS is not currently supported in a restricted network environment, but the following bullet can be uncommented when that changes. diff --git a/welcome/learn_more_about_openshift.adoc b/welcome/learn_more_about_openshift.adoc index 9ad9de3281..b959b4ca0c 100644 --- a/welcome/learn_more_about_openshift.adoc +++ b/welcome/learn_more_about_openshift.adoc @@ -27,7 +27,7 @@ Use the following sections to find content to help you learn about and use {prod | link:https://access.redhat.com/support/policy/updates/openshift#ocp4_phases[{product-title} life cycle] | -| xref:../backup_and_restore/backing-up-etcd.adoc#backup-etcd[Backup and restore] +| xref:../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backup-etcd[Backup and restore] | | @@ -63,7 +63,7 @@ Use the following sections to find content to help you learn about and use {prod | | -| xref:../backup_and_restore/backing-up-etcd.adoc#backup-etcd[Backup and restore] +| xref:../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backup-etcd[Backup and restore] | |