diff --git a/_topic_map.yml b/_topic_map.yml index 120e12ef13..cad016694d 100644 --- a/_topic_map.yml +++ b/_topic_map.yml @@ -307,6 +307,21 @@ Topics: # - Name: Troubleshooting an update # File: updating-troubleshooting --- +Name: Post-installation configuration +Dir: post_installation_configuration +Distros: openshift-origin,openshift-enterprise,openshift-webscale +Topics: +- Name: Cluster tasks + File: cluster-tasks +- Name: Node tasks + File: node-tasks +- Name: Network configuration + File: network-configuration +- Name: Storage configuration + File: storage-configuration +- Name: Preparing for users + File: preparing-for-users +--- Name: Support Dir: support Distros: openshift-enterprise,openshift-webscale,openshift-online,openshift-dedicated diff --git a/modules/about-etcd-encryption.adoc b/modules/about-etcd-encryption.adoc index 362f602fa7..7371941485 100644 --- a/modules/about-etcd-encryption.adoc +++ b/modules/about-etcd-encryption.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * security/encrypting-etcd.adoc +// * post_installation_configuration/cluster-tasks.adoc [id="about-etcd_{context}"] = About etcd encryption diff --git a/modules/accessing-an-example-cluster-node-tuning-operator-specification.adoc b/modules/accessing-an-example-cluster-node-tuning-operator-specification.adoc index 79351311e4..e5302d9af6 100644 --- a/modules/accessing-an-example-cluster-node-tuning-operator-specification.adoc +++ b/modules/accessing-an-example-cluster-node-tuning-operator-specification.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * scalability_and_performance/using-node-tuning-operator.adoc +// * post_installation_configuration/node-tasks.adoc [id="accessing-an-example-node-tuning-operator-specification_{context}"] = Accessing an example Node Tuning Operator specification diff --git a/modules/authentication-kubeadmin.adoc b/modules/authentication-kubeadmin.adoc index 38619482ea..eea747aeab 100644 --- a/modules/authentication-kubeadmin.adoc +++ b/modules/authentication-kubeadmin.adoc @@ -1,6 +1,7 @@ // Module included in the following assmeblies: // // * authentication/removing-kubeadmin.adoc +// * post_installation_configuration/preparing-for-users.adoc [id="understanding-kubeadmin_{context}"] = The kubeadmin user diff --git a/modules/authentication-remove-kubeadmin.adoc b/modules/authentication-remove-kubeadmin.adoc index 6be52c14d3..3b675719b2 100644 --- a/modules/authentication-remove-kubeadmin.adoc +++ b/modules/authentication-remove-kubeadmin.adoc @@ -2,6 +2,7 @@ // // * authentication/understanding-authentication.adoc // * authentication/understanding-identity-provider.adoc +// * post_installation_configuration/preparing-for-users.adoc [id="removing-kubeadmin_{context}"] = Removing the kubeadmin user diff --git a/modules/available-persistent-storage-options.adoc b/modules/available-persistent-storage-options.adoc index fddc954b2c..26bbb5680a 100644 --- a/modules/available-persistent-storage-options.adoc +++ b/modules/available-persistent-storage-options.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * storage/optimizing-storage.adoc +// * post_installation_configuration/storage-configuration.adoc [id="available-persistent-storage-options_{context}"] = Available persistent storage options diff --git a/modules/backup-etcd.adoc b/modules/backup-etcd.adoc index 96aa04608c..1854126e0c 100644 --- a/modules/backup-etcd.adoc +++ b/modules/backup-etcd.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * disaster_recovery/backing-up-etcd.adoc +// * post_installation_configuration/cluster-tasks.adoc [id="backing-up-etcd-data_{context}"] = Backing up etcd data diff --git a/modules/baseline-router-performance.adoc b/modules/baseline-router-performance.adoc index 3eec35ca85..966de06a90 100644 --- a/modules/baseline-router-performance.adoc +++ b/modules/baseline-router-performance.adoc @@ -1,3 +1,7 @@ +// Module included in the following assemblies: +// * scalability_and_performance/routing-optimization.adoc +// * post_installation_configuration/network-configuration.adoc + [id="baseline-router-performance_{context}"] = Baseline Ingress Controller (router) performance diff --git a/modules/cluster-autoscaler-about.adoc b/modules/cluster-autoscaler-about.adoc index 744daaee5d..2a3ec2a675 100644 --- a/modules/cluster-autoscaler-about.adoc +++ b/modules/cluster-autoscaler-about.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * machine_management/applying-autoscaling.adoc +// * post_installation_configuration/cluster-tasks.adoc [id="cluster-autoscaler-about_{context}"] = About the ClusterAutoscaler diff --git a/modules/cluster-autoscaler-cr.adoc b/modules/cluster-autoscaler-cr.adoc index 0012ef890e..a97e3f4ac9 100644 --- a/modules/cluster-autoscaler-cr.adoc +++ b/modules/cluster-autoscaler-cr.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * machine_management/applying-autoscaling.adoc +// * post_installation_configuration/cluster-tasks.adoc [id="cluster-autoscaler-cr_{context}"] = ClusterAutoscaler resource definition diff --git a/modules/cluster-node-tuning-operator-default-profiles-set.adoc b/modules/cluster-node-tuning-operator-default-profiles-set.adoc index 3603ad688b..0cdc547eba 100644 --- a/modules/cluster-node-tuning-operator-default-profiles-set.adoc +++ b/modules/cluster-node-tuning-operator-default-profiles-set.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * scalability_and_performance/using-node-tuning-operator.adoc +// * post_installation_configuration/node-tasks.adoc [id="custom-tuning-default-profiles-set_{context}"] = Default profiles set on a cluster diff --git a/modules/configuring-huge-pages.adoc b/modules/configuring-huge-pages.adoc index 4ce0bcf09b..7ec8510df2 100644 --- a/modules/configuring-huge-pages.adoc +++ b/modules/configuring-huge-pages.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * scalability_and_performance/what-huge-pages-do-and-how-they-are-consumed-by-apps.adoc +// * post_installation_configuration/node-tasks.adoc [id="configuring-huge-pages_{context}"] = Configuring huge pages diff --git a/modules/create-a-kubeletconfig-crd-to-edit-kubelet-parameters.adoc b/modules/create-a-kubeletconfig-crd-to-edit-kubelet-parameters.adoc index 34bab92d52..6c168c1eb1 100644 --- a/modules/create-a-kubeletconfig-crd-to-edit-kubelet-parameters.adoc +++ b/modules/create-a-kubeletconfig-crd-to-edit-kubelet-parameters.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * scalability_and_performance/recommended-host-practices.adoc +// * post_installation_configuration/node-tasks.adoc [id="create-a-kubeletconfig-crd-to-edit-kubelet-parameters_{context}"] = Create a KubeletConfig CRD to edit kubelet parameters diff --git a/modules/custom-tuning-specification.adoc b/modules/custom-tuning-specification.adoc index 42ae77f8cb..b6bf501132 100644 --- a/modules/custom-tuning-specification.adoc +++ b/modules/custom-tuning-specification.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * scalability_and_performance/using-node-tuning-operator.adoc +// * post_installation_configuration/node-tasks.adoc [id="custom-tuning-specification_{context}"] = Custom tuning specification diff --git a/modules/deploying-resource.adoc b/modules/deploying-resource.adoc index bbd34896e3..cdf045b6d2 100644 --- a/modules/deploying-resource.adoc +++ b/modules/deploying-resource.adoc @@ -5,6 +5,7 @@ // Module included in the following assemblies: // // * machine_management/applying-autoscaling.adoc +// * post_installation_configuration/cluster-tasks.adoc diff --git a/modules/differences-between-machinesets-and-machineconfigpool.adoc b/modules/differences-between-machinesets-and-machineconfigpool.adoc new file mode 100644 index 0000000000..bf43f5e10f --- /dev/null +++ b/modules/differences-between-machinesets-and-machineconfigpool.adoc @@ -0,0 +1,19 @@ +// Module included in the following assemblies: +// +// * post_installation_configuration/node-tasks.adoc +// * post_installation_configuration/cluster-tasks.adoc + + +[id="differences-between-machinesets-and-machineconfigpool_{context}"] += Understanding the difference between MachineSets and the MachineConfigPool + +MachineSets describe {product-title} nodes with respect to the cloud or machine +provider. + +The MachineConfigPool allows MachineConfigController components to define and +provide the status of machines in the context of upgrades. + +The MachineConfigPool allows users to configure how upgrades are rolled out to the +{product-title} nodes in the MachineConfigPool. + +NodeSelector can be replaced with a reference to MachineSets. diff --git a/modules/disabling-etcd-encryption.adoc b/modules/disabling-etcd-encryption.adoc index 8b4c03ce18..d1b80b80b7 100644 --- a/modules/disabling-etcd-encryption.adoc +++ b/modules/disabling-etcd-encryption.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * security/encrypting-etcd.adoc +// * post_installation_configuration/cluster-tasks.adoc [id="disabling-etcd-encryption_{context}"] = Disabling etcd encryption diff --git a/modules/dr-restoring-cluster-state.adoc b/modules/dr-restoring-cluster-state.adoc index 11394ee3ab..87e95a2427 100644 --- a/modules/dr-restoring-cluster-state.adoc +++ b/modules/dr-restoring-cluster-state.adoc @@ -1,11 +1,13 @@ // Module included in the following assemblies: // // * disaster_recovery/scenario-2-restoring-cluster-state.adoc +// * post_installation_configuration/cluster-tasks.adoc + [id="dr-scenario-2-restoring-cluster-state_{context}"] = Restoring to a previous cluster state -You can use a saved etcd backup to restore back to a previous cluster state. You use the etcd backup to restore a single master host. Then the etcd cluster Operator handles scaling to the remaining master hosts. +You can use a saved etcd backup to restore back to a previous cluster state. You use the etcd backup to restore a single control plane host. Then the etcd cluster Operator handles scaling to the remaining master hosts. .Prerequisites @@ -15,29 +17,29 @@ You can use a saved etcd backup to restore back to a previous cluster state. You .Procedure -. Select a master host to use as the recovery host. This is the host that you will run the restore operation on. +. Select a control plane host to use as the recovery host. This is the host that you will run the restore operation on. -. Establish SSH connectivity to each of the master nodes, including the recovery host. +. Establish SSH connectivity to each of the control plane nodes, including the recovery host. + -The Kubernetes API server will become inaccessible once the restore process has started, so you cannot access the master nodes. For this reason, it is recommended to establish SSH connectivity to each master host in a separate terminal. +The Kubernetes API server becomes inaccessible after the restore process starts, so you cannot access the control plane nodes. For this reason, it is recommended to establish SSH connectivity to each control plane host in a separate terminal. + [IMPORTANT] ==== If you do not complete this step, you will not be able to access the master hosts to complete the restore procedure, and you will be unable to recover your cluster from this state. ==== -. Copy the etcd backup directory to the recovery master host. +. Copy the etcd backup directory to the recovery control plane host. + -This procedure assumes that you copied the `backup` directory containing the etcd snapshot and the resources for the static Pods to the `/home/core/` directory of your recovery master host. +This procedure assumes that you copied the `backup` directory containing the etcd snapshot and the resources for the static Pods to the `/home/core/` directory of your recovery control plane host. -. Stop the static Pods on all other master nodes. +. Stop the static Pods on all other control plane nodes. + [NOTE] ==== It is not required to manually stop the Pods on the recovery host. The recovery script will stop the Pods on the recovery host. ==== -.. Access a master host that is not the recovery host. +.. Access a control plane host that is not the recovery host. .. Move the existing etcd Pod file out of the kubelet manifest directory: + @@ -71,7 +73,7 @@ The output of this command should be empty. .. Repeat this step on each of the other master hosts that is not the recovery host. -. Access the recovery master host. +. Access the recovery control plane host. . If the cluster-wide proxy is enabled, be sure that you have exported the `NO_PROXY`, `HTTP_PROXY`, and `HTTPS_PROXY` environment variables. @@ -81,7 +83,7 @@ The output of this command should be empty. You can check whether the proxy is enabled by reviewing the output of `oc get proxy cluster -o yaml`. The proxy is enabled if the `httpProxy`, `httpsProxy`, and `noProxy` fields have values set. ==== -. Run the restore script on the recovery master host and pass in the path to the etcd backup directory: +. Run the restore script on the recovery control plane host and pass in the path to the etcd backup directory: + [source,terminal] ---- diff --git a/modules/dynamic-provisioning-about.adoc b/modules/dynamic-provisioning-about.adoc index 6b86d45405..93ec2b2edf 100644 --- a/modules/dynamic-provisioning-about.adoc +++ b/modules/dynamic-provisioning-about.adoc @@ -1,22 +1,23 @@ // Module included in the following assemblies: // -// storage/dynamic-provisioning.adoc +// * storage/dynamic-provisioning.adoc +// * post_installation_configuration/storage-configuration.adoc [id="about_{context}"] = About dynamic provisioning -The StorageClass resource object describes and classifies storage that can +The StorageClass resource object describes and classifies storage that can be requested, as well as provides a means for passing parameters for -dynamically provisioned storage on demand. StorageClass objects can also -serve as a management mechanism for controlling different levels of +dynamically provisioned storage on demand. StorageClass objects can also +serve as a management mechanism for controlling different levels of storage and access to the storage. Cluster Administrators (`cluster-admin`) - or Storage Administrators (`storage-admin`) define and create the -StorageClass objects that users can request without needing any intimate + or Storage Administrators (`storage-admin`) define and create the +StorageClass objects that users can request without needing any detailed knowledge about the underlying storage volume sources. -The {product-title} persistent volume framework enables this functionality -and allows administrators to provision a cluster with persistent storage. -The framework also gives users a way to request those resources without +The {product-title} persistent volume framework enables this functionality +and allows administrators to provision a cluster with persistent storage. +The framework also gives users a way to request those resources without having any knowledge of the underlying infrastructure. Many storage types are available for use as persistent volumes in diff --git a/modules/dynamic-provisioning-annotations.adoc b/modules/dynamic-provisioning-annotations.adoc index 69be8e76fd..5bf606f59f 100644 --- a/modules/dynamic-provisioning-annotations.adoc +++ b/modules/dynamic-provisioning-annotations.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies // // * storage/dynamic-provisioning.adoc +// * post_installation_configuration/storage-configuration.adoc [id="storage-class-annotations_{context}"] = StorageClass annotations @@ -26,7 +27,7 @@ metadata: ---- This enables any Persistent Volume Claim (PVC) that does not specify a -specific volume to automatically be provisioned through the +specific StorageClass to automatically be provisioned through the default StorageClass. [NOTE] diff --git a/modules/dynamic-provisioning-available-plugins.adoc b/modules/dynamic-provisioning-available-plugins.adoc index d7a9dbd08f..cf350de7b8 100644 --- a/modules/dynamic-provisioning-available-plugins.adoc +++ b/modules/dynamic-provisioning-available-plugins.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies // // * storage/dynamic-provisioning.adoc +// * post_installation_configuration/storage-configuration.adoc [id="available-plug-ins_{context}"] = Available dynamic provisioning plug-ins diff --git a/modules/dynamic-provisioning-aws-definition.adoc b/modules/dynamic-provisioning-aws-definition.adoc index 12bf2fd35b..557a2bbf15 100644 --- a/modules/dynamic-provisioning-aws-definition.adoc +++ b/modules/dynamic-provisioning-aws-definition.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * storage/dynamic-provisioning.adoc +// * post_installation_configuration/storage-configuration.adoc [id="aws-definition_{context}"] = AWS Elastic Block Store (EBS) object definition diff --git a/modules/dynamic-provisioning-azure-disk-definition.adoc b/modules/dynamic-provisioning-azure-disk-definition.adoc index eb518cf8cc..be4617f9e8 100644 --- a/modules/dynamic-provisioning-azure-disk-definition.adoc +++ b/modules/dynamic-provisioning-azure-disk-definition.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * storage/dynamic-provisioning.adoc +// * post_installation_configuration/storage-configuration.adoc [id="azure-disk-definition_{context}"] = Azure Disk object definition diff --git a/modules/dynamic-provisioning-azure-file-considerations.adoc b/modules/dynamic-provisioning-azure-file-considerations.adoc index d724549f91..6af400ac39 100644 --- a/modules/dynamic-provisioning-azure-file-considerations.adoc +++ b/modules/dynamic-provisioning-azure-file-considerations.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // storage/persistent_storage/persistent-storage-azure-file.adoc +// * post_installation_configuration/storage-configuration.adoc [id="azure-file-considerations_{context}"] = Considerations when using Azure File diff --git a/modules/dynamic-provisioning-azure-file-definition.adoc b/modules/dynamic-provisioning-azure-file-definition.adoc index e1716db950..f4714dd3dd 100644 --- a/modules/dynamic-provisioning-azure-file-definition.adoc +++ b/modules/dynamic-provisioning-azure-file-definition.adoc @@ -1,6 +1,8 @@ // Module included in the following assemblies // // * storage/dynamic-provisioning.adoc +// * post_installation_configuration/storage-configuration.adoc + [id="azure-file-definition_{context}"] = Azure File object definition diff --git a/modules/dynamic-provisioning-change-default-class.adoc b/modules/dynamic-provisioning-change-default-class.adoc index 20e3cf4aa2..a97d48cb02 100644 --- a/modules/dynamic-provisioning-change-default-class.adoc +++ b/modules/dynamic-provisioning-change-default-class.adoc @@ -2,6 +2,8 @@ // // * storage/dynamic-provisioning.adoc // * virt/virtual_machines/importing_vms/virt-importing-rhv-vm.adoc +// * post_installation_configuration/storage-configuration.adoc + [id="change-default-storage-class_{context}"] = Changing the default StorageClass diff --git a/modules/dynamic-provisioning-cinder-definition.adoc b/modules/dynamic-provisioning-cinder-definition.adoc index 712207f744..eec8fc4917 100644 --- a/modules/dynamic-provisioning-cinder-definition.adoc +++ b/modules/dynamic-provisioning-cinder-definition.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * storage/dynamic-provisioning.adoc +// * post_installation_configuration/storage-configuration.adoc [id="openstack-cinder-storage-class_{context}"] = {rh-openstack} Cinder object definition @@ -19,11 +20,10 @@ parameters: fsType: ext4 <3> ---- <1> Volume type created in Cinder. Default is empty. -<2> Availability Zone. If not specified, volumes are generally -round-robined across all active zones where the {product-title} cluster +<2> Availability Zone. If not specified, volumes are generally +round-robined across all active zones where the {product-title} cluster has a node. -<3> File system that is created on dynamically provisioned volumes. This -value is copied to the `fsType` field of dynamically provisioned -persistent volumes and the file system is created when the volume is +<3> File system that is created on dynamically provisioned volumes. This +value is copied to the `fsType` field of dynamically provisioned +persistent volumes and the file system is created when the volume is mounted for the first time. The default value is `ext4`. - diff --git a/modules/dynamic-provisioning-defining-storage-class.adoc b/modules/dynamic-provisioning-defining-storage-class.adoc index 31ff4dc20e..6d36535eb8 100644 --- a/modules/dynamic-provisioning-defining-storage-class.adoc +++ b/modules/dynamic-provisioning-defining-storage-class.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * storage/dynamic-provisioning.adoc +// * post_installation_configuration/storage-configuration.adoc [id="defining-storage-classes_{context}"] = Defining a StorageClass diff --git a/modules/dynamic-provisioning-gce-definition.adoc b/modules/dynamic-provisioning-gce-definition.adoc index 0ad429ced6..02b2d7a3be 100644 --- a/modules/dynamic-provisioning-gce-definition.adoc +++ b/modules/dynamic-provisioning-gce-definition.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * storage/dynamic-provisioning.adoc +// * post_installation_configuration/storage-configuration.adoc [id="gce-persistentdisk-storage-class_{context}"] = GCE PersistentDisk (gcePD) object definition diff --git a/modules/dynamic-provisioning-storage-class-definition.adoc b/modules/dynamic-provisioning-storage-class-definition.adoc index 5494437dce..992630be84 100644 --- a/modules/dynamic-provisioning-storage-class-definition.adoc +++ b/modules/dynamic-provisioning-storage-class-definition.adoc @@ -1,12 +1,13 @@ // Module included in the following assemblies: // // * storage/dynamic-provisioning.adoc +// * post_installation_configuration/storage-configuration.adoc [id="basic-storage-class-definition_{context}"] = Basic StorageClass object definition -The following resource shows the parameters and default values that you -use to configure a StorageClass. This example uses the AWS +The following resource shows the parameters and default values that you +use to configure a StorageClass. This example uses the AWS ElasticBlockStore (EBS) object definition. diff --git a/modules/dynamic-provisioning-vsphere-definition.adoc b/modules/dynamic-provisioning-vsphere-definition.adoc index 0b1c5fe578..e02e11d3f8 100644 --- a/modules/dynamic-provisioning-vsphere-definition.adoc +++ b/modules/dynamic-provisioning-vsphere-definition.adoc @@ -1,6 +1,8 @@ // Module included in the following definitions: // // * storage/dynamic-provisioning.adoc +// * post_installation_configuration/storage-configuration.adoc + [id="vsphere-definition_{context}"] = VMware vSphere object definition diff --git a/modules/enabling-etcd-encryption.adoc b/modules/enabling-etcd-encryption.adoc index d27256c049..d4d55520b2 100644 --- a/modules/enabling-etcd-encryption.adoc +++ b/modules/enabling-etcd-encryption.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * security/encrypting-etcd.adoc +// * post_installation_configuration/cluster-tasks.adoc [id="enabling-etcd-encryption_{context}"] = Enabling etcd encryption diff --git a/modules/how-huge-pages-are-consumed-by-apps.adoc b/modules/how-huge-pages-are-consumed-by-apps.adoc index 1c5ddaa47c..375f0d616d 100644 --- a/modules/how-huge-pages-are-consumed-by-apps.adoc +++ b/modules/how-huge-pages-are-consumed-by-apps.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * scalability_and_performance/what-huge-pages-do-and-how-they-are-consumed-by-apps.adoc +// * post_installation_configuration/node-tasks.adoc [id="how-huge-pages-are-consumed-by-apps_{context}"] = How huge pages are consumed by apps diff --git a/modules/identity-provider-default-CR.adoc b/modules/identity-provider-default-CR.adoc index 4f5b71643c..94ba523456 100644 --- a/modules/identity-provider-default-CR.adoc +++ b/modules/identity-provider-default-CR.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * authentication/understanding-identity-provider.adoc +// * post_installation_configuration/preparing-for-users.adoc [id="identity-provider-default-CR_{context}"] = Sample identity provider CR diff --git a/modules/identity-provider-overview.adoc b/modules/identity-provider-overview.adoc index fa03f5c9da..d933f1f69a 100644 --- a/modules/identity-provider-overview.adoc +++ b/modules/identity-provider-overview.adoc @@ -12,12 +12,13 @@ // * authentication/identity_providers/configuring-gitlab-identity-provider.adoc // * authentication/identity_providers/configuring-google-identity-provider.adoc // * authentication/identity_providers/configuring-oidc-identity-provider.adoc +// * post_installation_configuration/preparing-for-users.adoc [id="identity-provider-overview_{context}"] = About identity providers in {product-title} -By default, only a `kubeadmin` user exists on your cluster. To specify an -identity provider, you must create a Custom Resource (CR) that describes +By default, only a `kubeadmin` user exists on your cluster. To specify an +identity provider, you must create a Custom Resource (CR) that describes that identity provider and add it to the cluster. [NOTE] diff --git a/modules/identity-provider-parameters.adoc b/modules/identity-provider-parameters.adoc index f124685aea..98365bf21d 100644 --- a/modules/identity-provider-parameters.adoc +++ b/modules/identity-provider-parameters.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * authentication/understanding-identity-provider.adoc +// * post_installation_configuration/preparing-for-users.adoc [id="identity-provider-parameters_{context}"] = Identity provider parameters diff --git a/modules/images-configuration-cas.adoc b/modules/images-configuration-cas.adoc index a50b0dbb41..8b6aace914 100644 --- a/modules/images-configuration-cas.adoc +++ b/modules/images-configuration-cas.adoc @@ -2,6 +2,7 @@ // // * registry/configuring-registry-operator.adoc // * openshift_images/image-configuration.adoc +// * post_installation_configuration/preparing-for-users.adoc [id="images-configuration-cas_{context}"] = Configuring additional trust stores for image registry access diff --git a/modules/images-configuration-file.adoc b/modules/images-configuration-file.adoc index be862ac672..e0d01d854e 100644 --- a/modules/images-configuration-file.adoc +++ b/modules/images-configuration-file.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * openshift_images/image-configuration.adoc +// * post_installation_configuration/preparing-for-users.adoc [id="images-configuration-file_{context}"] = Configuring image settings @@ -68,5 +69,5 @@ pods. For instance, whether or not to allow insecure access. It does not contain configuration for the internal cluster registry. <5> `insecureRegistries`: Registries which do not have a valid TLS certificate or only support HTTP connections. -<6> `blockedRegistries`: Blacklisted for image pull and push actions. All other +<6> `blockedRegistries`: Denylisted for image pull and push actions. All other registries are allowed. diff --git a/modules/images-configuration-insecure.adoc b/modules/images-configuration-insecure.adoc index 9e3b487900..2084d30761 100644 --- a/modules/images-configuration-insecure.adoc +++ b/modules/images-configuration-insecure.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * openshift_images/image-configuration.adoc +// * post_installation_configuration/preparing-for-users.adoc [id="images-configuration-insecure_{context}"] = Importing insecure registries and blocking registries @@ -51,7 +52,7 @@ status: internalRegistryHostname: image-registry.openshift-image-registry.svc:5000 ---- <1> Specify an insecure registry. -<2> Specify registries that should be blacklisted for image pull and push actions. All other +<2> Specify registries that should be denylisted for image pull and push actions. All other registries are allowed. Either `blockedRegistries` or `allowedRegistries` can be set, but not both. <3> Specify registries that should be permitted for image pull and push actions. All other registries are denied. Either `blockedRegistries` or `allowedRegistries` can be set, but not both. + diff --git a/modules/images-configuration-parameters.adoc b/modules/images-configuration-parameters.adoc index ad8efbea03..3efc21696d 100644 --- a/modules/images-configuration-parameters.adoc +++ b/modules/images-configuration-parameters.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * openshift_images/image-configuration.adoc +// * post_installation_configuration/preparing-for-users.adoc [id="images-configuration-parameters_{context}"] = Image controller configuration parameters @@ -58,10 +59,10 @@ field in ImageStreams. The value must be in `hostname[:port]` format. `insecureRegistries`: Registries which do not have a valid TLS certificate or only support HTTP connections. -`blockedRegistries`: Blacklisted for image pull and push actions. All other +`blockedRegistries`: Denylisted for image pull and push actions. All other registries are allowed. -`allowedRegistries`: Whitelisted for image pull and push actions. All other +`allowedRegistries`: Allowlisted for image pull and push actions. All other registries are blocked. Only one of `blockedRegistries` or `allowedRegistries` may be set diff --git a/modules/images-configuration-registry-mirror.adoc b/modules/images-configuration-registry-mirror.adoc index 574095260f..6d077c6dfb 100644 --- a/modules/images-configuration-registry-mirror.adoc +++ b/modules/images-configuration-registry-mirror.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * openshift_images/image-configuration.adoc +// * post_installation_configuration/preparing-for-users.adoc [id="images-configuration-registry-mirror_{context}"] = Configuring image registry repository mirroring @@ -69,8 +70,8 @@ on a Red Hat Enterprise Linux [source,terminal] ---- $ skopeo copy \ - docker://registry.access.redhat.com/ubi8/ubi-minimal@sha256:c505667389712dc337986e29ffcb65116879ef27629dc3ce6e1b17727c06e78f \ - docker://example.io/ubi8/ubi-minimal +docker://registry.access.redhat.com/ubi8/ubi-minimal@sha256:c505667389712dc337986e29ffcb65116879ef27629dc3ce6e1b17727c06e78f \ +docker://example.io/example/ubi-minimal ---- + In this example, you have a container image registry that is named diff --git a/modules/infrastructure-components.adoc b/modules/infrastructure-components.adoc index ba936db182..5a262256ff 100644 --- a/modules/infrastructure-components.adoc +++ b/modules/infrastructure-components.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * machine_management/creating-infrastructure-machinesets.adoc +// * post_installation_configuration/cluster-tasks.adoc [id="infrastructure-components_{context}"] = {product-title} infrastructure components @@ -15,4 +16,4 @@ The following {product-title} components are infrastructure components: * Service brokers Any node that runs any other container, pod, or component is a worker node that -your subscription must cover. \ No newline at end of file +your subscription must cover. diff --git a/modules/installation-approve-csrs.adoc b/modules/installation-approve-csrs.adoc index 213efb9a1e..90d3824b09 100644 --- a/modules/installation-approve-csrs.adoc +++ b/modules/installation-approve-csrs.adoc @@ -12,6 +12,7 @@ // * installing/installing_ibm_z/installing-ibm-z.adoc // * machine_management/adding-rhel-compute.adoc // * machine_management/more-rhel-compute.adoc +// * post_installation_configuration/node-tasks.adoc ifeval::["{context}" == "installing-ibm-z"] :ibm-z: diff --git a/modules/machine-autoscaler-about.adoc b/modules/machine-autoscaler-about.adoc index 0b13ebe150..39cc052e15 100644 --- a/modules/machine-autoscaler-about.adoc +++ b/modules/machine-autoscaler-about.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * machine_management/applying-autoscaling.adoc +// * post_installation_configuration/cluster-tasks.adoc [id="machine-autoscaler-about_{context}"] = About the MachineAutoscaler diff --git a/modules/machine-autoscaler-cr.adoc b/modules/machine-autoscaler-cr.adoc index c3d24a8f51..bad691c52d 100644 --- a/modules/machine-autoscaler-cr.adoc +++ b/modules/machine-autoscaler-cr.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * machine_management/applying-autoscaling.adoc +// * post_installation_configuration/cluster-tasks.adoc [id="machine-autoscaler-cr_{context}"] = MachineAutoscaler resource definition @@ -29,7 +30,7 @@ which MachineSet this MachineAutoscaler scales, specify or include the name of the MachineSet to scale. The MachineSet name takes the following form: `--` <2> Specify the minimum number Machines of the specified type that must remain in the -specified zone after the ClusterAutoscaler initiates cluster scaling. If running in AWS, GCP, or Azure, this value can be set to `0`. For other providers, do not set this value to `0`. +specified zone after the ClusterAutoscaler initiates cluster scaling. If running in AWS, GCP, or Azure, this value can be set to `0`. For other providers, do not set this value to `0`. <3> Specify the maximum number Machines of the specified type that the ClusterAutoscaler can deploy in the specified AWS zone after it initiates cluster scaling. Ensure that the `maxNodesTotal` value in the `ClusterAutoscaler` definition is large enough to allow the MachineAutoScaler to deploy this number of machines. <4> In this section, provide values that describe the existing MachineSet to diff --git a/modules/machine-health-checks-about.adoc b/modules/machine-health-checks-about.adoc index 1785448215..f34382c0fd 100644 --- a/modules/machine-health-checks-about.adoc +++ b/modules/machine-health-checks-about.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * machine_management/deploying-machine-health-checks.adoc +// * post_installation_configuration/node-tasks.adoc [id="machine-health-checks-about_{context}"] = About MachineHealthChecks diff --git a/modules/machine-health-checks-creating.adoc b/modules/machine-health-checks-creating.adoc index fef8900775..0671cef15a 100644 --- a/modules/machine-health-checks-creating.adoc +++ b/modules/machine-health-checks-creating.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * machine_management/deploying-machine-health-checks.adoc +// * post_installation_configuration/node-tasks.adoc [id="machine-health-checks-creating_{context}"] = Creating a MachineHealthCheck resource diff --git a/modules/machine-health-checks-resource.adoc b/modules/machine-health-checks-resource.adoc index 99c2e9513e..b6c2ba8d97 100644 --- a/modules/machine-health-checks-resource.adoc +++ b/modules/machine-health-checks-resource.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * machine_management/deploying-machine-health-checks.adoc +// * post_installation_configuration/node-tasks.adoc [id="machine-health-checks-resource_{context}"] diff --git a/modules/machine-user-provisioned-limitations.adoc b/modules/machine-user-provisioned-limitations.adoc index ebb9445149..d18ee70cea 100644 --- a/modules/machine-user-provisioned-limitations.adoc +++ b/modules/machine-user-provisioned-limitations.adoc @@ -5,6 +5,7 @@ // * machine_management/creating-machinesets.adoc // * machine_management/deploying-machine-health-checks.adoc // * machine_management/manually-scaling-machinesets.adoc +// * post_installation_configuration/node-tasks.adoc [IMPORTANT] ==== diff --git a/modules/machineset-creating.adoc b/modules/machineset-creating.adoc index 0ebc41229e..192f7a9948 100644 --- a/modules/machineset-creating.adoc +++ b/modules/machineset-creating.adoc @@ -5,6 +5,7 @@ // * machine_management/creating_machinesets/creating-machineset-azure.adoc // * machine_management/creating_machinesets/creating-machineset-gcp.adoc // * machine_management/creating_machinesets/creating-machineset-osp.adoc +// * post_installation_configuration/cluster-tasks.adoc [id="machineset-creating_{context}"] = Creating a MachineSet diff --git a/modules/machineset-manually-scaling.adoc b/modules/machineset-manually-scaling.adoc index c01ae14843..4b9cd786ab 100644 --- a/modules/machineset-manually-scaling.adoc +++ b/modules/machineset-manually-scaling.adoc @@ -1,11 +1,17 @@ // Module included in the following assemblies: // // * machine_management/manually-scaling-machineset.adoc +// * post_installation_configuration/cluster-tasks.adoc [id="machineset-manually-scaling_{context}"] = Scaling a MachineSet manually -If you must add or remove an instance of a machine in a MachineSet, you can manually scale the MachineSet. +If you must add or remove an instance of a machine in a MachineSet, you can +manually scale the MachineSet. + +This guidance is relevant to fully automated, installer provisioned +infrastructure installations. Customized, user provisioned infrastructure +installations does not have MachineSets. .Prerequisites diff --git a/modules/machineset-yaml-aws.adoc b/modules/machineset-yaml-aws.adoc index 0644a176db..ec60721f76 100644 --- a/modules/machineset-yaml-aws.adoc +++ b/modules/machineset-yaml-aws.adoc @@ -2,6 +2,7 @@ // // * machine_management/creating-infrastructure-machinesets.adoc // * machine_management/creating_machinesets/creating-machineset-aws.adoc +// * post_installation_configuration/cluster-tasks.adoc [id="machineset-yaml-aws_{context}"] = Sample YAML for a MachineSet Custom Resource on AWS diff --git a/modules/machineset-yaml-azure.adoc b/modules/machineset-yaml-azure.adoc index 399859e115..aeb306c855 100644 --- a/modules/machineset-yaml-azure.adoc +++ b/modules/machineset-yaml-azure.adoc @@ -2,6 +2,7 @@ // // * machine_management/creating-infrastructure-machinesets.adoc // * machine_management/creating-machineset-azure.adoc +// * post_installation_configuration/cluster-tasks.adoc [id="machineset-yaml-azure_{context}"] = Sample YAML for a MachineSet Custom Resource on Azure diff --git a/modules/machineset-yaml-gcp.adoc b/modules/machineset-yaml-gcp.adoc index 366bcc60f9..507c2ae035 100644 --- a/modules/machineset-yaml-gcp.adoc +++ b/modules/machineset-yaml-gcp.adoc @@ -2,6 +2,7 @@ // // * machine_management/creating-infrastructure-machinesets.adoc // * machine_management/creating-machineset-gcp.adoc +// * post_installation_configuration/cluster-tasks.adoc [id="machineset-yaml-gcp_{context}"] = Sample YAML for a MachineSet Custom Resource on GCP diff --git a/modules/master-node-sizing.adoc b/modules/master-node-sizing.adoc index 69dd2bb599..a49b4dc1cf 100644 --- a/modules/master-node-sizing.adoc +++ b/modules/master-node-sizing.adoc @@ -1,11 +1,12 @@ // Module included in the following assemblies: // // * scalability_and_performance/recommended-host-practices.adoc +// * post_installation_configuration/node-tasks.adoc [id="master-node-sizing_{context}"] -= Master node sizing += Control plane node sizing -The master node resource requirements depend on the number of nodes in the cluster. The following master node size recommendations are based on the results of control plane density focused testing. +The control plane node resource requirements depend on the number of nodes in the cluster. The following control plane node size recommendations are based on the results of control plane density focused testing. [options="header",cols="3*"] |=== @@ -27,7 +28,7 @@ The master node resource requirements depend on the number of nodes in the clust [IMPORTANT] ==== -Because you cannot modify the master node size in a running {product-title} {product-version} cluster, you must estimate your total node count and use the suggested master size during installation. +Because you cannot modify the control plane node size in a running {product-title} {product-version} cluster, you must estimate your total node count and use the suggested control plane node size during installation. ==== [NOTE] diff --git a/modules/modifying-template-for-new-projects.adoc b/modules/modifying-template-for-new-projects.adoc index 2185e7ec89..0e758a9237 100644 --- a/modules/modifying-template-for-new-projects.adoc +++ b/modules/modifying-template-for-new-projects.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * applications/projects/configuring-project-creation.adoc +// * post_installation_configuration/network-configuration.adoc [id="modifying-template-for-new-projects_{context}"] = Modifying the template for new projects diff --git a/modules/node-tuning-operator-supported-tuned-daemon-plug-ins.adoc b/modules/node-tuning-operator-supported-tuned-daemon-plug-ins.adoc index 39ac15a9cc..6579ab201a 100644 --- a/modules/node-tuning-operator-supported-tuned-daemon-plug-ins.adoc +++ b/modules/node-tuning-operator-supported-tuned-daemon-plug-ins.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * scalability_and_performance/using-node-tuning-operator.adoc +// * post_installation_configuration/node-tasks.adoc [id="supported-tuned-daemon-plug-ins_{context}"] = Supported Tuned daemon plug-ins diff --git a/modules/node-tuning-operator.adoc b/modules/node-tuning-operator.adoc index e7e13d5426..a2f7e4697b 100644 --- a/modules/node-tuning-operator.adoc +++ b/modules/node-tuning-operator.adoc @@ -1,7 +1,8 @@ // Module included in the following assemblies: // // * scalability_and_performance/using-node-tuning-operator.adoc -//* operators/operator-reference.adoc +// * operators/operator-reference.adoc +// * post_installation_configuration/node-tasks.adoc ifeval::["{context}" == "red-hat-operators"] :operators: diff --git a/modules/nodes-cluster-enabling-features-cluster.adoc b/modules/nodes-cluster-enabling-features-cluster.adoc index 605c0ff843..3fb7062196 100644 --- a/modules/nodes-cluster-enabling-features-cluster.adoc +++ b/modules/nodes-cluster-enabling-features-cluster.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // -// * nodes/nodes-cluster-disabling-features.adoc +// * nodes/nodes-cluster-enabling-features.adoc +// * post_installation_configuration/cluster-tasks.adoc [id="nodes-cluster-enabling-features-cluster_{context}"] = Enabling Technology Preview features using FeatureGates diff --git a/modules/nodes-cluster-node-overcommit.adoc b/modules/nodes-cluster-node-overcommit.adoc index 2ee61a22f6..4c09d4c871 100644 --- a/modules/nodes-cluster-node-overcommit.adoc +++ b/modules/nodes-cluster-node-overcommit.adoc @@ -1,13 +1,11 @@ // Module included in the following assemblies: // // * nodes/clusters/nodes-cluster-overcommit.adoc +// * post_installation_configuration/node-tasks.adoc [id="nodes-cluster-node-overcommit_{context}"] -= Node-level overcommit += Node-level overcommit -You can use various ways to control overcommit on specific nodes, such as quality of service (QOS) +You can use various ways to control overcommit on specific nodes, such as quality of service (QOS) guarantees, CPU limits, or reserve resources. You can also disable overcommit for specific nodes and specific projects. - - - diff --git a/modules/nodes-cluster-overcommit-configure-nodes.adoc b/modules/nodes-cluster-overcommit-configure-nodes.adoc index e18501049b..7918ec6e2e 100644 --- a/modules/nodes-cluster-overcommit-configure-nodes.adoc +++ b/modules/nodes-cluster-overcommit-configure-nodes.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * nodes/nodes-cluster-overcommit.adoc +// * post_installation_configuration/node-tasks.adoc [id="nodes-cluster-overcommit-configure-nodes_{context}"] diff --git a/modules/nodes-cluster-overcommit-node-disable.adoc b/modules/nodes-cluster-overcommit-node-disable.adoc index 11cbe8242f..5efe6a6051 100644 --- a/modules/nodes-cluster-overcommit-node-disable.adoc +++ b/modules/nodes-cluster-overcommit-node-disable.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * nodes/nodes-cluster-overcommit.adoc +// * post_installation_configuration/node-tasks.adoc [id="nodes-cluster-overcommit-node-disable_{context}"] = Disabling overcommitment for a node @@ -15,4 +16,3 @@ To disable overcommitment in a node run the following command on that node: ---- $ sysctl -w vm.overcommit_memory=0 ---- - diff --git a/modules/nodes-cluster-overcommit-node-enforcing.adoc b/modules/nodes-cluster-overcommit-node-enforcing.adoc index 5d2af8f8fe..9cc54486e4 100644 --- a/modules/nodes-cluster-overcommit-node-enforcing.adoc +++ b/modules/nodes-cluster-overcommit-node-enforcing.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * nodes/nodes-cluster-overcommit.adoc +// * post_installation_configuration/node-tasks.adoc [id="nodes-cluster-overcommit-node-enforcing_{context}"] diff --git a/modules/nodes-cluster-overcommit-node-resources.adoc b/modules/nodes-cluster-overcommit-node-resources.adoc index 75d70a7364..3149307918 100644 --- a/modules/nodes-cluster-overcommit-node-resources.adoc +++ b/modules/nodes-cluster-overcommit-node-resources.adoc @@ -1,18 +1,19 @@ // Module included in the following assemblies: // // * nodes/nodes-cluster-overcommit.adoc +// * post_installation_configuration/node-tasks.adoc [id="nodes-cluster-overcommit-node-resources_{context}"] = Reserving resources for system processes -To provide more reliable scheduling and minimize node resource overcommitment, -each node can reserve a portion of its resources for use by system daemons -that are required to run on your node for your cluster to function (*sshd*, etc.). +To provide more reliable scheduling and minimize node resource overcommitment, +each node can reserve a portion of its resources for use by system daemons +that are required to run on your node for your cluster to function. In particular, it is recommended that you reserve resources for incompressible resources such as memory. .Procedure To explicitly reserve resources for non-pod processes, allocate node resources by specifying resources -available for scheduling. +available for scheduling. For more details, see Allocating Resources for Nodes. diff --git a/modules/nodes-cluster-overcommit-project-disable.adoc b/modules/nodes-cluster-overcommit-project-disable.adoc index a76f35e876..3173c6d36b 100644 --- a/modules/nodes-cluster-overcommit-project-disable.adoc +++ b/modules/nodes-cluster-overcommit-project-disable.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * nodes/nodes-cluster-overcommit.adoc +// * post_installation_configuration/node-tasks.adoc [id="nodes-cluster-overcommit-project-disable_{context}"] = Disabling overcommitment for a project diff --git a/modules/nodes-cluster-overcommit-qos-about.adoc b/modules/nodes-cluster-overcommit-qos-about.adoc index 0e6c800c75..7b6ed9242b 100644 --- a/modules/nodes-cluster-overcommit-qos-about.adoc +++ b/modules/nodes-cluster-overcommit-qos-about.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * nodes/nodes-cluster-overcommit.adoc +// * post_installation_configuration/node-tasks.adoc [id="nodes-cluster-overcommit-qos-about_{context}"] = Understanding overcomitment and quality of service classes diff --git a/modules/nodes-cluster-overcommit-resource-requests.adoc b/modules/nodes-cluster-overcommit-resource-requests.adoc index d54736fdc7..1326962550 100644 --- a/modules/nodes-cluster-overcommit-resource-requests.adoc +++ b/modules/nodes-cluster-overcommit-resource-requests.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * nodes/nodes-cluster-overcommit.adoc +// * post_installation_configuration/node-tasks.adoc [id="nodes-cluster-overcommit-resource-requests_{context}"] = Resource requests and overcommitment diff --git a/modules/nodes-cluster-overcommit-resources-containers.adoc b/modules/nodes-cluster-overcommit-resources-containers.adoc index f420d50c8e..04ee5ab289 100644 --- a/modules/nodes-cluster-overcommit-resources-containers.adoc +++ b/modules/nodes-cluster-overcommit-resources-containers.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * nodes/nodes-cluster-overcommit.adoc +// * post_installation_configuration/node-tasks.adoc [id="nodes-cluster-overcommit-reserving-memory_{context}"] = Understanding compute resources and containers diff --git a/modules/nodes-cluster-project-overcommit.adoc b/modules/nodes-cluster-project-overcommit.adoc index dfbb409232..d72f351485 100644 --- a/modules/nodes-cluster-project-overcommit.adoc +++ b/modules/nodes-cluster-project-overcommit.adoc @@ -1,9 +1,10 @@ // Module included in the following assemblies: // // * nodes/clusters/nodes-cluster-overcommit.adoc +// * post_installation_configuration/node-tasks.adoc [id="nodes-cluster-project-overcommit_{context}"] -= Project-level limits += Project-level limits To help control overcommit, you can set per-project resource limit ranges, specifying memory and CPU limits and defaults for a project that overcommit @@ -11,6 +12,4 @@ cannot exceed. For information on project-level resource limits, see Additional Resources. -Alternatively, you can disable overcommitment for specific projects. - - +Alternatively, you can disable overcommitment for specific projects. diff --git a/modules/nodes-cluster-resource-configure.adoc b/modules/nodes-cluster-resource-configure.adoc index 2ef39f6009..e6ef819c1b 100644 --- a/modules/nodes-cluster-resource-configure.adoc +++ b/modules/nodes-cluster-resource-configure.adoc @@ -1,21 +1,22 @@ // Module included in the following assemblies: // // * nodes/clusters/nodes-cluster-overcommit.adoc +// * post_installation_configuration/node-tasks.adoc [id="nodes-cluster-resource-configure_{context}"] -= Configuring cluster-level overcommit += Configuring cluster-level overcommit -The Cluster Resource Override Operator requires a `ClusterResourceOverride` custom resource (CR) +The Cluster Resource Override Operator requires a `ClusterResourceOverride` custom resource (CR) and a label for each project where you want the Operator to control overcommit. .Prerequisites * The Cluster Resource Override Operator has no effect if limits have not -been set on containers. You must specify default limits for a project using a LimitRange +been set on containers. You must specify default limits for a project using a LimitRange object or configure limits in Pod specs in order for the overrides to apply. -.Procedure +.Procedure To modify cluster-level overcommit: @@ -36,7 +37,7 @@ spec: <2> Optional. Specify the percentage to override the container CPU limit, if used, between 1-100. The default is 25. <3> Optional. Specify the percentage to override the container memory limit, if used. Scaling 1Gi of RAM at 100 percent is equal to 1 CPU core. This is processed prior to overriding the CPU request, if configured. The default is 200. -. Ensure the following label has been added to the Namespace object for each project where you want the Cluster Resource Override Operator to control overcommit: +. Ensure the following label has been added to the Namespace object for each project where you want the Cluster Resource Override Operator to control overcommit: + [source,yaml] ---- @@ -53,4 +54,3 @@ metadata: ---- <1> Add this label to each project. - diff --git a/modules/nodes-cluster-resource-override-deploy-cli.adoc b/modules/nodes-cluster-resource-override-deploy-cli.adoc index 99abd19983..ccfb99249f 100644 --- a/modules/nodes-cluster-resource-override-deploy-cli.adoc +++ b/modules/nodes-cluster-resource-override-deploy-cli.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * nodes/clusters/nodes-cluster-overcommit.adoc +// * post_installation_configuration/node-tasks.adoc [id="nodes-cluster-resource-override-deploy-cli_{context}"] = Installing the Cluster Resource Override Operator using the CLI diff --git a/modules/nodes-cluster-resource-override-deploy-console.adoc b/modules/nodes-cluster-resource-override-deploy-console.adoc index b882e78709..4b2e67268a 100644 --- a/modules/nodes-cluster-resource-override-deploy-console.adoc +++ b/modules/nodes-cluster-resource-override-deploy-console.adoc @@ -1,16 +1,17 @@ // Module included in the following assemblies: // // * nodes/clusters/nodes-cluster-overcommit.adoc +// * post_installation_configuration/node-tasks.adoc [id="nodes-cluster-resource-override-deploy-console_{context}"] = Installing the Cluster Resource Override Operator using the web console -You can use the {product-title} web console to install the Cluster Resource Override Operator to help control overcommit in your cluster. +You can use the {product-title} web console to install the Cluster Resource Override Operator to help control overcommit in your cluster. .Prerequisites * The Cluster Resource Override Operator has no effect if limits have not -been set on containers. You must specify default limits for a project using a LimitRange +been set on containers. You must specify default limits for a project using a LimitRange object or configure limits in Pod specs in order for the overrides to apply. .Procedure @@ -29,7 +30,7 @@ To install the Cluster Resource Override Operator using the {product-title} web .. Choose *ClusterResourceOverride Operator* from the list of available Operators and click *Install*. -.. On the *Create Operator Subscription* page, make sure *A specific Namespace on the cluster* is selected for *Installation Mode*. +.. On the *Create Operator Subscription* page, make sure *A specific Namespace on the cluster* is selected for *Installation Mode*. .. Make sure *clusterresourceoverride-operator* is selected for *Installed Namespace*. @@ -123,6 +124,6 @@ metadata: labels: clusterresourceoverrides.admission.autoscaling.openshift.io: enabled <1> ----- +---- <1> Add the `clusterresourceoverrides.admission.autoscaling.openshift.io: enabled` label to the Namespace. //// diff --git a/modules/nodes-cluster-resource-override.adoc b/modules/nodes-cluster-resource-override.adoc index c929741e27..77e09a440e 100644 --- a/modules/nodes-cluster-resource-override.adoc +++ b/modules/nodes-cluster-resource-override.adoc @@ -1,14 +1,15 @@ // Module included in the following assemblies: // // * nodes/clusters/nodes-cluster-overcommit.adoc +// * post_installation_configuration/node-tasks.adoc [id="nodes-cluster-resource-override_{context}"] -= Cluster-level overcommit using the Cluster Resource Override Operator += Cluster-level overcommit using the Cluster Resource Override Operator The Cluster Resource Override Operator is an admission webhook that allows you to control the level of overcommit and manage container density across all the nodes in your cluster. The Operator controls how nodes in specific projects can exceed defined memory and CPU limits. -You must install the Cluster Resource Override Operator using the {product-title} console or CLI as shown in the following sections. +You must install the Cluster Resource Override Operator using the {product-title} console or CLI as shown in the following sections. During the installation, you create a `ClusterResourceOverride` custom resource (CR), where you set the level of overcommit, as shown in the following example: @@ -31,12 +32,12 @@ spec: [NOTE] ==== The Cluster Resource Override Operator overrides have no effect if limits have not -been set on containers. Create a LimitRange object with default limits per individual project +been set on containers. Create a LimitRange object with default limits per individual project or configure limits in Pod specs in order for the overrides to apply. ==== When configured, overrides can be enabled per-project by applying the following -label to the Namespace object for each project: +label to the Namespace object for each project: [source,yaml] ---- @@ -53,4 +54,4 @@ metadata: ---- -The Operator watches for the `ClusterResourceOverride` CR and ensures that the `ClusterResourceOverride` admission webhook is installed into the same namespace as the Operator. +The Operator watches for the `ClusterResourceOverride` CR and ensures that the `ClusterResourceOverride` admission webhook is installed into the same namespace as the Operator. diff --git a/modules/nodes-nodes-garbage-collection-configuring.adoc b/modules/nodes-nodes-garbage-collection-configuring.adoc index 57f800e431..ed2f582cbc 100644 --- a/modules/nodes-nodes-garbage-collection-configuring.adoc +++ b/modules/nodes-nodes-garbage-collection-configuring.adoc @@ -2,6 +2,7 @@ // Module included in the following assemblies: // // * nodes/nodes-nodes-garbage-collection.adoc +// * post_installation_configuration/node-tasks.adoc [id="nodes-nodes-garbage-collection-configuring_{context}"] = Configuring garbage collection for containers and images diff --git a/modules/nodes-nodes-garbage-collection-containers.adoc b/modules/nodes-nodes-garbage-collection-containers.adoc index b56e0446a9..e93c07376e 100644 --- a/modules/nodes-nodes-garbage-collection-containers.adoc +++ b/modules/nodes-nodes-garbage-collection-containers.adoc @@ -1,6 +1,8 @@ // Module included in the following assemblies: // // * nodes/nodes-nodes-garbage-collection.adoc +// * post_installation_configuration/node-tasks.adoc + [id="nodes-nodes-garbage-collection-containers_{context}"] = Understanding how terminated containers are removed though garbage collection diff --git a/modules/nodes-nodes-garbage-collection-images.adoc b/modules/nodes-nodes-garbage-collection-images.adoc index fab69a5475..6bbf9d4a31 100644 --- a/modules/nodes-nodes-garbage-collection-images.adoc +++ b/modules/nodes-nodes-garbage-collection-images.adoc @@ -1,12 +1,13 @@ // Module included in the following assemblies: // // * nodes/nodes-nodes-garbage-collection.adoc +// * post_installation_configuration/node-tasks.adoc [id="nodes-nodes-garbage-collection-images_{context}"] = Understanding how images are removed though garbage collection Image garbage collection relies on disk usage as reported by *cAdvisor* on the -node to decide which images to remove from the node. +node to decide which images to remove from the node. The policy for image garbage collection is based on two conditions: @@ -16,7 +17,7 @@ garbage collection. The default is *85*. * The percent of disk usage (expressed as an integer) to which image garbage collection attempts to free. Default is *80*. -For image garbage collection, you can modify any of the following variables using +For image garbage collection, you can modify any of the following variables using a Custom Resource. .Variables for configuring image garbage collection @@ -51,4 +52,3 @@ stamp. Once the collection starts, the oldest images get deleted first until the stopping criterion is met. - diff --git a/modules/nodes-nodes-managing-max-pods-proc.adoc b/modules/nodes-nodes-managing-max-pods-proc.adoc index 4c29884ed8..cde20b152d 100644 --- a/modules/nodes-nodes-managing-max-pods-proc.adoc +++ b/modules/nodes-nodes-managing-max-pods-proc.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * nodes/nodes-nodes-managing-max-pods.adoc +// * post_installation_configuration/node-tasks.adoc [id="nodes-nodes-managing-max-pods-about_{context}"] = Configuring the maximum number of Pods per Node diff --git a/modules/nodes-pods-plugins-about.adoc b/modules/nodes-pods-plugins-about.adoc index f5d2959a97..5b678252c8 100644 --- a/modules/nodes-pods-plugins-about.adoc +++ b/modules/nodes-pods-plugins-about.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * nodes/nodes-pods-plugin.adoc +// * post_installation_configuration/node-tasks.adoc [id="nodes-pods-plugins-about_{context}"] = Understanding device plug-ins @@ -71,4 +72,3 @@ file system, as well as socket creation, they must be run in a privileged security context. * More specific details regarding deployment steps can be found with each device plug-in implementation. - diff --git a/modules/nodes-pods-plugins-device-mgr.adoc b/modules/nodes-pods-plugins-device-mgr.adoc index fe02e8a56c..95202dab60 100644 --- a/modules/nodes-pods-plugins-device-mgr.adoc +++ b/modules/nodes-pods-plugins-device-mgr.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * nodes/nodes-pods-plugins.adoc +// * post_installation_configuration/node-tasks.adoc [id="nodes-pods-plugins-device-mgr_{context}"] = Understanding the Device Manager diff --git a/modules/nodes-pods-plugins-install.adoc b/modules/nodes-pods-plugins-install.adoc index 68cf1ab5ec..fd7c9cc8e3 100644 --- a/modules/nodes-pods-plugins-install.adoc +++ b/modules/nodes-pods-plugins-install.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * nodes/nodes-pods-plugins.adoc +// * post_installation_configuration/node-tasks.adoc [id="nodes-pods-plugins-install_{context}"] = Enabling Device Manager @@ -32,7 +33,7 @@ For example: [source,terminal] ---- Name: 00-worker -Namespace: +Namespace: Labels: machineconfiguration.openshift.io/role=worker <1> ---- <1> Label required for the device manager. diff --git a/modules/nodes-pods-pod-disruption-about.adoc b/modules/nodes-pods-pod-disruption-about.adoc index 1cc8a8120c..8777aa12d0 100644 --- a/modules/nodes-pods-pod-disruption-about.adoc +++ b/modules/nodes-pods-pod-disruption-about.adoc @@ -2,14 +2,15 @@ // // * nodes/nodes-pods-configuring.adoc // * nodes/nodes-cluster-pods-configuring +// * post_installation_configuration/cluster-tasks.adoc [id="nodes-pods-configuring-pod-distruption-about_{context}"] -= Understanding how to use pod disruption budgets to specify the number of pods that must be up += Understanding how to use Pod disruption budgets to specify the number of Pods that must be up A _pod disruption budget_ is part of the link:http://kubernetes.io/docs/admin/disruptions/[Kubernetes] API, which can be managed with `oc` commands like other object types. They -allow the specification of safety constraints on pods during operations, such as +allow the specification of safety constraints on Pods during operations, such as draining a node for maintenance. `PodDisruptionBudget` is an API object that specifies the minimum number or @@ -20,19 +21,19 @@ upgrade) and is only honored on voluntary evictions (not on node failures). A `PodDisruptionBudget` object's configuration consists of the following key parts: -* A label selector, which is a label query over a set of pods. -* An availability level, which specifies the minimum number of pods that must be +* A label selector, which is a label query over a set of Pods. +* An availability level, which specifies the minimum number of Pods that must be available simultaneously, either: -** `minAvailable` is the number of pods must always be available, even during a disruption. +** `minAvailable` is the number of Pods must always be available, even during a disruption. ** `maxUnavailable` is the number of Pods can be unavailable during a disruption. [NOTE] ==== -A `maxUnavailable` of `0%` or `0` or a `minAvailable` of `100%` or equal to the number of replicas, -is permitted, but can block nodes from being drained. +A `maxUnavailable` of `0%` or `0` or a `minAvailable` of `100%` or equal to the number of replicas +is permitted but can block nodes from being drained. ==== -You can check for pod disruption budgets across all projects with the following: +You can check for Pod disruption budgets across all projects with the following: [source,terminal] ---- @@ -48,10 +49,10 @@ test-project my-pdb 2 foo=bar ---- The `PodDisruptionBudget` is considered healthy when there are at least -`minAvailable` pods running in the system. Every pod above that limit can be evicted. +`minAvailable` Pods running in the system. Every Pod above that limit can be evicted. [NOTE] ==== -Depending on your pod priority and preemption settings, -lower-priority pods might be removed despite their pod disruption budget requirements. +Depending on your Pod priority and preemption settings, +lower-priority Pods might be removed despite their Pod disruption budget requirements. ==== diff --git a/modules/nodes-pods-pod-disruption-configuring.adoc b/modules/nodes-pods-pod-disruption-configuring.adoc index 8799e01f34..8f8e0f11f7 100644 --- a/modules/nodes-pods-pod-disruption-configuring.adoc +++ b/modules/nodes-pods-pod-disruption-configuring.adoc @@ -2,6 +2,7 @@ // // * nodes/nodes-pods-configuring.adoc // * nodes/nodes-cluster-pods-configuring +// * post_installation_configuration/cluster-tasks.adoc [id="nodes-pods-pod-disruption-configuring_{context}"] = Specifying the number of pods that must be up with pod disruption budgets @@ -44,7 +45,7 @@ metadata: spec: maxUnavailable: 25% <2> selector: <3> - matchLabels: + matchLabels: foo: bar ---- <1> `PodDisruptionBudget` is part of the `policy/v1beta1` API group. diff --git a/modules/nodes-scheduler-taints-tolerations-about.adoc b/modules/nodes-scheduler-taints-tolerations-about.adoc index 5ca83dd431..fd85bb3376 100644 --- a/modules/nodes-scheduler-taints-tolerations-about.adoc +++ b/modules/nodes-scheduler-taints-tolerations-about.adoc @@ -1,6 +1,9 @@ // Module included in the following assemblies: // // * nodes/scheduling/nodes-scheduler-taints-tolerations.adoc +// * nodes/nodes-scheduler-taints-tolerations.adoc +// * post_installation_configuration/node-tasks.adoc + [id="nodes-scheduler-taints-tolerations-about_{context}"] = Understanding taints and tolerations @@ -76,9 +79,9 @@ The following taints are built into kubernetes: * `node.cloudprovider.kubernetes.io/uninitialized`: When the node controller is started with an external cloud provider, this taint is set on a node to mark it as unusable. After a controller from the cloud-controller-manager initializes this node, the kubelet removes this taint. [id="nodes-scheduler-taints-tolerations-about-seconds_{context}"] -== Understanding how to use toleration seconds to delay pod evictions +== Understanding how to use toleration seconds to delay Pod evictions -You can specify how long a Pod can remain bound to a node before being evicted by specifying the `tolerationSeconds` parameter in the Pod specification. If a taint with the `NoExecute` effect is added to a node, any Pods that do not tolerate the taint are evicted immediately (Pods that do tolerate the taint are not evicted). However, if a Pod that to be evicted has the `tolerationSeconds` parameter, the Pod is not evicted until that time period expires. +You can specify how long a Pod can remain bound to a node before being evicted by specifying the `tolerationSeconds` parameter in the Pod specification. If a taint with the `NoExecute` effect is added to a node, any Pods that do not tolerate the taint are evicted immediately. Pods that do tolerate the taint are not evicted. However, if a Pod that does tolerate the taint has the `tolerationSeconds` parameter, the Pod is not evicted until that time period expires. .Example output [source,yaml] @@ -149,26 +152,26 @@ In this case, the Pod cannot be scheduled onto the node, because there is no tol one of the three that is not tolerated by the Pod. [id="nodes-scheduler-taints-tolerations-about-prevent_{context}"] -== Preventing pod eviction for node problems +== Preventing Pod eviction for node problems The Taint-Based Evictions feature, enabled by default, adds a taint with the `NoExecute` effect to nodes that are not ready or are unreachable. This allows you to specify how long a Pod should remain bound to a node that becomes unreachable or not ready, rather than using the default of five minutes. For example, you might want to allow a Pod on an unreachable node if the workload is safe to remain running while a networking issue resolves. -If a node enters a not ready state, the node controller adds the `node.kubernetes.io/not-ready:NoExecute` taint to the node. If a node enters an unreachable state, the the node controller adds the `node.kubernetes.io/unreachable:NoExecute` taint to the node. +If a node enters a not ready state, the node controller adds the `node.kubernetes.io/not-ready:NoExecute` taint to the node. If a node enters an unreachable state, the node controller adds the `node.kubernetes.io/unreachable:NoExecute` taint to the node. -The `NoExecute` taint affects Pods that are already running on the node as follows: +The `NoExecute` taint affects Pods that are already running on the node in the following ways: * Pods that do not tolerate the taint are evicted immediately. * Pods that tolerate the taint without specifying `tolerationSeconds` in their toleration specification remain bound forever. * Pods that tolerate the taint with a specified `tolerationSeconds` remain bound for the specified amount of time. [id="nodes-scheduler-taints-tolerations-about-taintNodesByCondition_{context}"] -== Understanding pod scheduling and node conditions (Taint Node by Condition) +== Understanding Pod scheduling and node conditions (Taint Node by Condition) -{product-title} automatically taints nodes that report conditions such as memory pressure and disk pressure. If a node reports a condition, a taint is added until the condition clears. The taints have the `NoSchedule` effect, which means no Pod can be scheduled on the node, unless the Pod has a matching toleration. This feature, *Taint Nodes By Condition*, is enabled by default. - -The scheduler checks for these taints on nodes before scheduling Pods. If the taint is present, the Pod is scheduled on a different node. Because the scheduler checks for taints and not the actual Node conditions, you configure the scheduler to ignore some of these node conditions by adding appropriate Pod tolerations. +{product-title} automatically taints nodes that report conditions such as memory pressure and disk pressure. If a node reports a condition, a taint is added until the condition clears. The taints have the `NoSchedule` effect, which means no Pod can be scheduled on the node unless the Pod has a matching toleration. This feature, *Taint Nodes By Condition*, is enabled by default. -The DaemonSet controller automatically adds the following tolerations to all daemons, to ensure backward compatibility: +The scheduler checks for these taints on nodes before scheduling Pods. If the taint is present, the Pod is scheduled on a different node. Because the scheduler checks for taints and not the actual node conditions, you configure the scheduler to ignore some of these node conditions by adding appropriate Pod tolerations. + +To ensure backward compatibility, the DaemonSet controller automatically adds the following tolerations to all daemons: * node.kubernetes.io/memory-pressure * node.kubernetes.io/disk-pressure @@ -181,7 +184,7 @@ You can also add arbitrary tolerations to DaemonSets. [id="nodes-scheduler-taints-tolerations-about-taintBasedEvictions_{context}"] == Understanding evicting pods by condition (Taint-Based Evictions) -The Taint-Based Evictions feature, enabled by default, evicts Pods from a node that experiences specific conditions, such as `not-ready` and `unreachable`. +The Taint-Based Evictions feature, enabled by default, evicts Pods from a node that experiences specific conditions, such as `not-ready` and `unreachable`. When a node experiences one of these conditions, {product-title} automatically adds taints to the node, and starts evicting and rescheduling the Pods on different nodes. Taint Based Evictions has a `NoExecute` effect, where any Pod that does not tolerate the taint will be evicted immediately and any Pod that does tolerate the taint will never be evicted. @@ -191,7 +194,7 @@ Taint Based Evictions has a `NoExecute` effect, where any Pod that does not tole {product-title} evicts Pods in a rate-limited way to prevent massive Pod evictions in scenarios such as the master becoming partitioned from the nodes. ==== -This feature, in combination with `tolerationSeconds`, allows you to specify how long a Pod should stay bound to a node that has a node condition. If the condition still exists after the `tolerationSections` period, the taint remains on the node and the Pods are evicted in a rate-limited manner. If the condition clears before the `tolerationSeconds` period, Pods are not removed. +This feature, in combination with `tolerationSeconds`, allows you to specify how long a Pod stays bound to a node that has a node condition. If the condition still exists after the `tolerationSections` period, the taint remains on the node and the Pods are evicted in a rate-limited manner. If the condition clears before the `tolerationSeconds` period, Pods are not removed. {product-title} automatically adds a toleration for `node.kubernetes.io/not-ready` and `node.kubernetes.io/unreachable` with `tolerationSeconds=300`, unless the Pod configuration specifies either toleration. @@ -209,9 +212,9 @@ spec tolerationSeconds: 300 ---- -These tolerations ensure that the default Pod behavior is to remain bound for 5 minutes after one of these node conditions problems is detected. +These tolerations ensure that the default Pod behavior is to remain bound for five minutes after one of these node conditions problems is detected. -You can configure these tolerations as needed. For example, if you have an application with a lot of local state you might want to keep the Pods bound to node for a longer time in the event of network partition, allowing for the partition to recover and avoiding Pod eviction. +You can configure these tolerations as needed. For example, if you have an application with a lot of local state, you might want to keep the Pods bound to node for a longer time in the event of network partition, allowing for the partition to recover and avoiding Pod eviction. DaemonSet Pods are created with NoExecute tolerations for the following taints with no tolerationSeconds: diff --git a/modules/nodes-scheduler-taints-tolerations-adding.adoc b/modules/nodes-scheduler-taints-tolerations-adding.adoc index 9980b39951..42f84fed7d 100644 --- a/modules/nodes-scheduler-taints-tolerations-adding.adoc +++ b/modules/nodes-scheduler-taints-tolerations-adding.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * nodes/nodes-scheduler-taints-tolerations.adoc +// * post_installation_configuration/node-tasks.adoc [id="nodes-scheduler-taints-tolerations-adding_{context}"] = Adding taints and tolerations diff --git a/modules/nodes-scheduler-taints-tolerations-binding.adoc b/modules/nodes-scheduler-taints-tolerations-binding.adoc index 6dde43dc58..11f71fe49a 100644 --- a/modules/nodes-scheduler-taints-tolerations-binding.adoc +++ b/modules/nodes-scheduler-taints-tolerations-binding.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * nodes/nodes-scheduler-taints-tolerations.adoc +// * post_installation_configuration/node-tasks.adoc [id="nodes-scheduler-taints-tolerations-bindings_{context}"] = Binding a user to a Node using taints and tolerations diff --git a/modules/nodes-scheduler-taints-tolerations-dedicating.adoc b/modules/nodes-scheduler-taints-tolerations-dedicating.adoc index 28f810f644..ec12f2a7ba 100644 --- a/modules/nodes-scheduler-taints-tolerations-dedicating.adoc +++ b/modules/nodes-scheduler-taints-tolerations-dedicating.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * nodes/nodes-scheduler-taints-tolerations.adoc +// * post_installation_configuration/node-tasks.adoc [id="nodes-scheduler-taints-tolerations_dedicating_{context}"] = Dedicating a Node for a User using taints and tolerations diff --git a/modules/nodes-scheduler-taints-tolerations-removing.adoc b/modules/nodes-scheduler-taints-tolerations-removing.adoc index 95f2e895af..0f7b5f2dc3 100644 --- a/modules/nodes-scheduler-taints-tolerations-removing.adoc +++ b/modules/nodes-scheduler-taints-tolerations-removing.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * nodes/nodes-scheduler-taints-tolerations.adoc +// * post_installation_configuration/node-tasks.adoc [id="nodes-scheduler-taints-tolerations-removing_{context}"] = Removing taints and tolerations diff --git a/modules/nodes-scheduler-taints-tolerations-special.adoc b/modules/nodes-scheduler-taints-tolerations-special.adoc index ec99e51bbb..58dfe1dafe 100644 --- a/modules/nodes-scheduler-taints-tolerations-special.adoc +++ b/modules/nodes-scheduler-taints-tolerations-special.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * nodes/nodes-scheduler-taints-tolerations.adoc +// * post_installation_configuration/node-tasks.adoc [id="nodes-scheduler-taints-tolerations-special_{context}"] = Controlling Nodes with special hardware using taints and tolerations diff --git a/modules/nw-networkpolicy-about.adoc b/modules/nw-networkpolicy-about.adoc index 1835557351..1eff7a6cfd 100644 --- a/modules/nw-networkpolicy-about.adoc +++ b/modules/nw-networkpolicy-about.adoc @@ -1,6 +1,8 @@ // Module included in the following assemblies: // // * networking/network_policy/about-network-policy.adoc +// * networking/configuring-networkpolicy.adoc +// * post_installation_configuration/network-configuration.adoc [id="nw-networkpolicy-about_{context}"] @@ -11,8 +13,10 @@ In {product-title} {product-version}, OpenShift SDN supports using NetworkPolicy [NOTE] ==== -The Kubernetes `v1` NetworkPolicy features are available in {product-title} -except for egress policy types and IPBlock. +IPBlock is supported in NetworkPolicy with limitations for OpenshiftSDN; it +supports IPBlock without except clauses. If you create a policy with an IPBlock +section including an except clause, the SDN Pods log generates warnings and the +entire IPBlock section of that policy is ignored. ==== [WARNING] diff --git a/modules/nw-networkpolicy-create.adoc b/modules/nw-networkpolicy-create.adoc index a1c7e31b67..b89dbdaf45 100644 --- a/modules/nw-networkpolicy-create.adoc +++ b/modules/nw-networkpolicy-create.adoc @@ -1,6 +1,8 @@ // Module included in the following assemblies: // // * networking/network_policy/creating-network-policy.adoc +// * networking/configuring-networkpolicy.adoc +// * post_installation_configuration/network-configuration.adoc [id="nw-networkpolicy-create_{context}"] diff --git a/modules/nw-networkpolicy-delete.adoc b/modules/nw-networkpolicy-delete.adoc index a8f7d65e5b..50f6b4c256 100644 --- a/modules/nw-networkpolicy-delete.adoc +++ b/modules/nw-networkpolicy-delete.adoc @@ -1,6 +1,8 @@ // Module included in the following assemblies: // // * networking/network_policy/deleting-network-policy.adoc +// * networking/configuring-networkpolicy.adoc +// * post_installation_configuration/network-configuration.adoc [id="nw-networkpolicy-delete_{context}"] diff --git a/modules/nw-networkpolicy-multitenant-isolation.adoc b/modules/nw-networkpolicy-multitenant-isolation.adoc index 396381197e..453c78b755 100644 --- a/modules/nw-networkpolicy-multitenant-isolation.adoc +++ b/modules/nw-networkpolicy-multitenant-isolation.adoc @@ -1,6 +1,8 @@ // Module included in the following assemblies: // // * networking/network_policy/multitenant-network-policy.adoc +// * networking/configuring-networkpolicy.adoc +// * post_installation_configuration/network-configuration.adoc [id="nw-networkpolicy-multitenant-isolation_{context}"] = Configuring multitenant isolation using NetworkPolicy diff --git a/modules/nw-networkpolicy-object.adoc b/modules/nw-networkpolicy-object.adoc index 7a4c44a2bb..2d306ea95f 100644 --- a/modules/nw-networkpolicy-object.adoc +++ b/modules/nw-networkpolicy-object.adoc @@ -3,6 +3,8 @@ // * networking/network_policy/creating-network-policy.adoc // * networking/network_policy/viewing-network-policy.adoc // * networking/network_policy/editing-network-policy.adoc +// * networking/configuring-networkpolicy.adoc +// * post_installation_configuration/network-configuration.adoc [id="nw-networkpolicy-object_{context}"] @@ -21,7 +23,7 @@ spec: matchLabels: app: mongodb ingress: - - from: + - from: - podSelector: <3> matchLabels: app: app diff --git a/modules/nw-networkpolicy-project-defaults.adoc b/modules/nw-networkpolicy-project-defaults.adoc index 545fea0ced..39a0292f0f 100644 --- a/modules/nw-networkpolicy-project-defaults.adoc +++ b/modules/nw-networkpolicy-project-defaults.adoc @@ -1,6 +1,8 @@ // Module included in the following assemblies: // // * networking/network_policy/default-network-policy.adoc +// * networking/configuring-networkpolicy.adoc +// * post_installation_configuration/network-configuration.adoc [id="nw-networkpolicy-project-defaults_{context}"] = Adding network policy objects to the new project template diff --git a/modules/nw-networkpolicy-view.adoc b/modules/nw-networkpolicy-view.adoc index 01488f323f..7562974767 100644 --- a/modules/nw-networkpolicy-view.adoc +++ b/modules/nw-networkpolicy-view.adoc @@ -1,6 +1,8 @@ // Module included in the following assemblies: // // * networking/network_policy/viewing-network-policy.adoc +// * networking/configuring-networkpolicy.adoc +// * post_installation_configuration/network-configuration.adoc [id="nw-networkpolicy-view_{context}"] diff --git a/modules/nw-operator-cr.adoc b/modules/nw-operator-cr.adoc index 6b7a99175f..f2dad8fb65 100644 --- a/modules/nw-operator-cr.adoc +++ b/modules/nw-operator-cr.adoc @@ -6,6 +6,7 @@ // * installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc // * installing/installing_vsphere/installing-vsphere-network-customizations.adoc // * installing/installing_gcp/installing-gcp-network-customizations.adoc +// * post_installation_configuration/network-configuration.adoc // Installation assemblies need different details than the CNO operator does ifeval::["{context}" == "cluster-network-operator"] diff --git a/modules/nw-proxy-configure-object.adoc b/modules/nw-proxy-configure-object.adoc index 191109ca74..c964a2598a 100644 --- a/modules/nw-proxy-configure-object.adoc +++ b/modules/nw-proxy-configure-object.adoc @@ -2,6 +2,7 @@ // // * networking/configuring-a-custom-pki.adoc // * networking/enable-cluster-wide-proxy.adoc +// * post_installation_configuration/network-configuration.adoc [id="nw-proxy-configure-object_{context}"] = Enabling the cluster-wide proxy diff --git a/modules/olm-installing-from-operatorhub-using-cli.adoc b/modules/olm-installing-from-operatorhub-using-cli.adoc index e46002e8b6..e676a310bd 100644 --- a/modules/olm-installing-from-operatorhub-using-cli.adoc +++ b/modules/olm-installing-from-operatorhub-using-cli.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * operators/olm-adding-operators-to-cluster.adoc +// * post_installation_configuration/preparing-for-users.adoc [id="olm-installing-operator-from-operatorhub-using-cli_{context}"] = Installing from OperatorHub using the CLI diff --git a/modules/olm-installing-from-operatorhub-using-web-console.adoc b/modules/olm-installing-from-operatorhub-using-web-console.adoc index 0c9a19f485..a0e6c44e4f 100644 --- a/modules/olm-installing-from-operatorhub-using-web-console.adoc +++ b/modules/olm-installing-from-operatorhub-using-web-console.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * operators/olm-adding-operators-to-cluster.adoc +// * post_installation_configuration/preparing-for-users.adoc ifeval::["{context}" != "olm-adding-operators-to-a-cluster"] :filter-type: jaeger :filter-operator: Jaeger diff --git a/modules/olm-installing-operators-from-operatorhub.adoc b/modules/olm-installing-operators-from-operatorhub.adoc index 6625909ffe..8dfb6afa63 100644 --- a/modules/olm-installing-operators-from-operatorhub.adoc +++ b/modules/olm-installing-operators-from-operatorhub.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * operators/olm-adding-operators-to-cluster.adoc +// * post_installation_configuration/preparing-for-users.adoc [id="olm-installing-operators-from-operatorhub_{context}"] = Installing Operators from OperatorHub @@ -14,7 +15,9 @@ endif::[] ifdef::openshift-dedicated[] web console. You can then subscribe the Operator to the default `openshift-operators` namespace to make it available for developers on your -cluster. +cluster. When you subscribe the Operator to all namespaces, the Operator is +installed in the `openshift-operators` namespace; this installation method is +not supported by all Operators. In {product-title} clusters, a curated list of Operators is made available for installation from OperatorHub. Administrators can only install Operators to diff --git a/modules/ossm-installation-activities.adoc b/modules/ossm-installation-activities.adoc index 733eddb608..9adafdc6f5 100644 --- a/modules/ossm-installation-activities.adoc +++ b/modules/ossm-installation-activities.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * service_mesh/service_mesh_install/preparing-ossm-installation.adoc +// * post_installation_configuration/network-configuration.adoc [id="ossm-installation-activities_{context}"] = {ProductName} installation activities diff --git a/modules/ossm-supported-configurations.adoc b/modules/ossm-supported-configurations.adoc index 7b75e71992..127f258b63 100644 --- a/modules/ossm-supported-configurations.adoc +++ b/modules/ossm-supported-configurations.adoc @@ -2,6 +2,7 @@ // // * service_mesh/service_mesh_install/preparing-ossm-install.adoc // * service_mesh/service_mesh_install/servicemesh-release-notes.adoc +// * post_installation_configuration/network-configuration.adoc [id="ossm-supported-configurations_{context}"] = {ProductName} supported configurations diff --git a/modules/private-clusters-about-aws.adoc b/modules/private-clusters-about-aws.adoc index 1c63247316..db4bc6fa48 100644 --- a/modules/private-clusters-about-aws.adoc +++ b/modules/private-clusters-about-aws.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * installing/installing_aws/installing-aws-private.adoc +// * post_installation_configuration/node-tasks.adoc [id="private-clusters-about-aws_{context}"] = Private clusters in AWS diff --git a/modules/private-clusters-setting-dns-private.adoc b/modules/private-clusters-setting-dns-private.adoc index b31207e42d..d7c0ff08a9 100644 --- a/modules/private-clusters-setting-dns-private.adoc +++ b/modules/private-clusters-setting-dns-private.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * installing/install_config/configuring-private-cluster.adoc +// * post_installation_configuration/network-configuration.adoc [id="private-clusters-setting-dns-private_{context}"] = Setting DNS to private diff --git a/modules/rbac-adding-roles.adoc b/modules/rbac-adding-roles.adoc index ecab72c23b..255caef435 100644 --- a/modules/rbac-adding-roles.adoc +++ b/modules/rbac-adding-roles.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * authentication/using-rbac.adoc +// * post_installation_configuration/preparing-for-users.adoc [id="adding-roles_{context}"] = Adding roles to users diff --git a/modules/rbac-cluster-role-binding-commands.adoc b/modules/rbac-cluster-role-binding-commands.adoc index 8831a7f331..2580d81eb3 100644 --- a/modules/rbac-cluster-role-binding-commands.adoc +++ b/modules/rbac-cluster-role-binding-commands.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * authentication/using-rbac.adoc +// * post_installation_configuration/preparing-for-users.adoc ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] diff --git a/modules/rbac-creating-cluster-admin.adoc b/modules/rbac-creating-cluster-admin.adoc index 1ebea6107d..c2db72e855 100644 --- a/modules/rbac-creating-cluster-admin.adoc +++ b/modules/rbac-creating-cluster-admin.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * authentication/using-rbac.adoc +// * post_installation_configuration/preparing-for-users.adoc [id="creating-cluster-admin_{context}"] = Creating a cluster admin diff --git a/modules/rbac-creating-cluster-role.adoc b/modules/rbac-creating-cluster-role.adoc index 52db02022f..271945f02d 100644 --- a/modules/rbac-creating-cluster-role.adoc +++ b/modules/rbac-creating-cluster-role.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * authentication/using-rbac.adoc +// * post_installation_configuration/preparing-for-users.adoc ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] [id="creating-cluster-role_{context}"] diff --git a/modules/rbac-creating-local-role.adoc b/modules/rbac-creating-local-role.adoc index cc0c0965d7..5d4c415470 100644 --- a/modules/rbac-creating-local-role.adoc +++ b/modules/rbac-creating-local-role.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * authentication/using-rbac.adoc +// * post_installation_configuration/preparing-for-users.adoc ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] [id="creating-local-role_{context}"] diff --git a/modules/rbac-default-projects.adoc b/modules/rbac-default-projects.adoc index 4d32a697a9..f13be4d647 100644 --- a/modules/rbac-default-projects.adoc +++ b/modules/rbac-default-projects.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * authentication/using-rbac.adoc +// * post_installation_configuration/preparing-for-users.adoc [id="rbac-default-projects_{context}"] = Default projects diff --git a/modules/rbac-local-role-binding-commands.adoc b/modules/rbac-local-role-binding-commands.adoc index 74138d2775..2801473ee7 100644 --- a/modules/rbac-local-role-binding-commands.adoc +++ b/modules/rbac-local-role-binding-commands.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * authentication/using-rbac.adoc +// * post_installation_configuration/preparing-for-users.adoc [id="local-role-binding-commands_{context}"] = Local role binding commands diff --git a/modules/rbac-overview.adoc b/modules/rbac-overview.adoc index 6f1adc6eac..e9e08875a0 100644 --- a/modules/rbac-overview.adoc +++ b/modules/rbac-overview.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * authentication/using-rbac.adoc +// * post_installation_configuration/preparing-for-users.adoc [id="authorization-overview_{context}"] = RBAC overview @@ -79,8 +80,7 @@ For example: {product-title} includes a set of default cluster roles that you can bind to users and groups cluster-wide or locally. You can manually modify the default -cluster roles, if required, but you must take extra steps each time -you restart a master node. +cluster roles, if required. [cols="1,4",options="header"] |=== diff --git a/modules/rbac-projects-namespaces.adoc b/modules/rbac-projects-namespaces.adoc index 0a05e9d175..3ed95356b6 100644 --- a/modules/rbac-projects-namespaces.adoc +++ b/modules/rbac-projects-namespaces.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * authentication/using-rbac.adoc +// * post_installation_configuration/preparing-for-users.adoc [id="rbac-projects-namespaces_{context}"] = Projects and namespaces diff --git a/modules/rbac-viewing-cluster-roles.adoc b/modules/rbac-viewing-cluster-roles.adoc index 29ceb2f0a0..bc67c25362 100644 --- a/modules/rbac-viewing-cluster-roles.adoc +++ b/modules/rbac-viewing-cluster-roles.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * authentication/using-rbac.adoc +// * post_installation_configuration/preparing-for-users.adoc [id="viewing-cluster-roles_{context}"] = Viewing cluster roles and bindings diff --git a/modules/rbac-viewing-local-roles.adoc b/modules/rbac-viewing-local-roles.adoc index b730529211..a0d9179cbb 100644 --- a/modules/rbac-viewing-local-roles.adoc +++ b/modules/rbac-viewing-local-roles.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * authentication/using-rbac.adoc +// * post_installation_configuration/preparing-for-users.adoc [id="viewing-local-roles_{context}"] = Viewing local roles and bindings diff --git a/modules/recommended-configurable-storage-technology.adoc b/modules/recommended-configurable-storage-technology.adoc index 83fb0e94c5..c394325442 100644 --- a/modules/recommended-configurable-storage-technology.adoc +++ b/modules/recommended-configurable-storage-technology.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * storage/optimizing-storage.adoc +// * post_installation_configuration/storage-configuration.adoc [id="recommended-configurable-storage-technology_{context}"] = Recommended configurable storage technology diff --git a/modules/recommended-etcd-practices.adoc b/modules/recommended-etcd-practices.adoc index 5e9e15b94c..7b5971e713 100644 --- a/modules/recommended-etcd-practices.adoc +++ b/modules/recommended-etcd-practices.adoc @@ -1,19 +1,21 @@ // Module included in the following assemblies: // // * scalability_and_performance/recommended-host-practices.adoc +// * post_installation_configuration/cluster-tasks.adoc +// * post_installation_configuration/node-tasks.adoc [id="recommended-etcd-practices_{context}"] = Recommended etcd practices -For large and dense clusters, etcd can suffer from poor performance -if the keyspace grows excessively large and exceeds the space quota. -Periodic maintenance of etcd including defragmentation needs to be done -to free up space in the data store. It is highly recommended that you monitor -Prometheus for etcd metrics and defragment it when needed before etcd raises -a cluster-wide alarm that puts the cluster into a maintenance mode, which -only accepts key reads and deletes. Some of the key metrics to monitor are -`etcd_server_quota_backend_bytes` which is the current quota limit, -`etcd_mvcc_db_total_size_in_use_in_bytes` which indicates the actual -database usage after a history compaction, and -`etcd_debugging_mvcc_db_total_size_in_bytes` which shows the database size +For large and dense clusters, etcd can suffer from poor performance +if the keyspace grows excessively large and exceeds the space quota. +Periodic maintenance of etcd, including defragmentation, must be performed +to free up space in the data store. It is highly recommended that you monitor +Prometheus for etcd metrics and defragment it when required before etcd raises +a cluster-wide alarm that puts the cluster into a maintenance mode, which +only accepts key reads and deletes. Some of the key metrics to monitor are +`etcd_server_quota_backend_bytes` which is the current quota limit, +`etcd_mvcc_db_total_size_in_use_in_bytes` which indicates the actual +database usage after a history compaction, and +`etcd_debugging_mvcc_db_total_size_in_bytes` which shows the database size including free space waiting for defragmentation. diff --git a/modules/recommended-node-host-practices.adoc b/modules/recommended-node-host-practices.adoc index 2a7454f8bf..ea1a9c2df3 100644 --- a/modules/recommended-node-host-practices.adoc +++ b/modules/recommended-node-host-practices.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * scalability_and_performance/recommended-host-practices.adoc +// * post_installation_configuration/node-tasks.adoc [id="recommended-node-host-practices_{context}"] = Recommended node host practices diff --git a/modules/rhel-adding-node.adoc b/modules/rhel-adding-node.adoc index 5f6b593219..456ab25a4a 100644 --- a/modules/rhel-adding-node.adoc +++ b/modules/rhel-adding-node.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * machine_management/adding-rhel-compute.adoc +// * post_installation_configuration/node-tasks.adoc [id="rhel-adding-node_{context}"] = Adding a RHEL compute machine to your cluster diff --git a/modules/rhel-ansible-parameters.adoc b/modules/rhel-ansible-parameters.adoc index 6a233a5049..c6d55655da 100644 --- a/modules/rhel-ansible-parameters.adoc +++ b/modules/rhel-ansible-parameters.adoc @@ -2,6 +2,7 @@ // // * machine_management/adding-rhel-compute.adoc // * machine_management/more-rhel-compute.adoc +// * post_installation_configuration/node-tasks.adoc [id="rhel-ansible-parameters_{context}"] = Required parameters for the Ansible hosts file diff --git a/modules/rhel-compute-overview.adoc b/modules/rhel-compute-overview.adoc index ab3dc6ab39..f47d4c1f81 100644 --- a/modules/rhel-compute-overview.adoc +++ b/modules/rhel-compute-overview.adoc @@ -2,6 +2,7 @@ // // * machine_management/adding-rhel-compute.adoc // * machine_management/more-rhel-compute.adoc +// * post_installation_configuration/node-tasks.adoc [id="rhel-compute-overview_{context}"] = About adding RHEL compute nodes to a cluster diff --git a/modules/rhel-compute-requirements.adoc b/modules/rhel-compute-requirements.adoc index df6f9123b9..e4ceb21633 100644 --- a/modules/rhel-compute-requirements.adoc +++ b/modules/rhel-compute-requirements.adoc @@ -2,6 +2,8 @@ // // * machine_management/adding-rhel-compute.adoc // * machine_management/more-rhel-compute.adoc +// * post_installation_configuration/node-tasks.adoc + [id="rhel-compute-requirements_{context}"] = System requirements for RHEL compute nodes diff --git a/modules/rhel-preparing-node.adoc b/modules/rhel-preparing-node.adoc index 0430913c35..335e9d66e3 100644 --- a/modules/rhel-preparing-node.adoc +++ b/modules/rhel-preparing-node.adoc @@ -2,6 +2,7 @@ // // * machine_management/adding-rhel-compute.adoc // * machine_management/more-rhel-compute.adoc +// * post_installation_configuration/node-tasks.adoc [id="rhel-preparing-node_{context}"] = Preparing a RHEL compute node diff --git a/modules/rhel-preparing-playbook-machine.adoc b/modules/rhel-preparing-playbook-machine.adoc index ab2ac4dda0..525e10b63a 100644 --- a/modules/rhel-preparing-playbook-machine.adoc +++ b/modules/rhel-preparing-playbook-machine.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * machine_management/adding-rhel-compute.adoc +// * post_installation_configuration/node-tasks.adoc [id="rhel-preparing-playbook-machine_{context}"] = Preparing the machine to run the playbook diff --git a/modules/rhel-removing-rhcos.adoc b/modules/rhel-removing-rhcos.adoc index 1e6b57e9e2..fba7f90738 100644 --- a/modules/rhel-removing-rhcos.adoc +++ b/modules/rhel-removing-rhcos.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * machine_management/adding-rhel-compute.adoc +// * post_installation_configuration/node-tasks.adoc [id="rhel-removing-rhcos_{context}"] = Removing RHCOS compute machines from a cluster diff --git a/modules/router-performance-optimizations.adoc b/modules/router-performance-optimizations.adoc index 9514614b42..729561c08a 100644 --- a/modules/router-performance-optimizations.adoc +++ b/modules/router-performance-optimizations.adoc @@ -1,3 +1,7 @@ +// Module included in the following assemblies: +// * scalability_and_performance/routing-optimization.adoc +// * post_installation_configuration/network-configuration.adoc + [id="router-performance-optimizations_{context}"] = Ingress Controller (router) performance optimizations diff --git a/modules/setting-up-cpu-manager.adoc b/modules/setting-up-cpu-manager.adoc index 461d4fb446..ad669adfcc 100644 --- a/modules/setting-up-cpu-manager.adoc +++ b/modules/setting-up-cpu-manager.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * scaling_and_performance/using-cpu-manager.adoc +// * post_installation_configuration/node-tasks.adoc [id="seting_up_cpu_manager_{context}"] = Setting up CPU Manager diff --git a/modules/setting-up-topology-manager.adoc b/modules/setting-up-topology-manager.adoc index dcb27ced9c..a8bf9eae0f 100644 --- a/modules/setting-up-topology-manager.adoc +++ b/modules/setting-up-topology-manager.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * scaling_and_performance/using-topology-manager.adoc +// * post_installation_configuration/node-tasks.adoc [id="seting_up_topology_manager_{context}"] = Setting up Topology Manager diff --git a/modules/topology-manager-policies.adoc b/modules/topology-manager-policies.adoc index 6f4d54f470..77370bdb9b 100644 --- a/modules/topology-manager-policies.adoc +++ b/modules/topology-manager-policies.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // // * scaling_and_performance/using-topology-manager.adoc +// * post_installation_configuration/node-tasks.adoc [id="topology_manager_policies_{context}"] = Topology Manager policies diff --git a/modules/what-huge-pages-do.adoc b/modules/what-huge-pages-do.adoc index 1a8c086c13..7633340b9a 100644 --- a/modules/what-huge-pages-do.adoc +++ b/modules/what-huge-pages-do.adoc @@ -2,6 +2,7 @@ // // * scalability_and_performance/what-huge-pages-do-and-how-they-are-consumed-by-apps.adoc // * virt/virtual_machines/advanced_vm_management/virt-using-huge-pages-with-vms.adoc +// * post_installation_configuration/node-tasks.adoc ifeval::["{context}" == "huge-pages"] :ocp-hugepages: @@ -37,7 +38,7 @@ reason, some applications may be designed to (or recommend) usage of pre-allocated huge pages instead of THP. ifdef::ocp-hugepages[] -In {product-title}, applications in a pod can allocate and consume pre-allocated +In {product-title}, applications in a Pod can allocate and consume pre-allocated huge pages. endif::ocp-hugepages[] @@ -54,4 +55,3 @@ endif::[] ifeval::["{context}" == "virt-using-huge-pages-with-vms"] :virt-hugepages!: endif::[] - diff --git a/post_installation_configuration/cluster-tasks.adoc b/post_installation_configuration/cluster-tasks.adoc new file mode 100644 index 0000000000..8696006e72 --- /dev/null +++ b/post_installation_configuration/cluster-tasks.adoc @@ -0,0 +1,79 @@ +:context: post-install-cluster-tasks +[id="post-install-cluster-tasks"] += Post-installation cluster tasks +include::modules/common-attributes.adoc[] +toc::[] + +After installing {product-title}, you can further expand and customize your +cluster to your requirements. + +[id="post-install-adjust-worker-nodes"] +== Adjust worker nodes +If you incorrectly sized the worker nodes during deployment, adjust them by +creating one or more new MachineSets, scale them up, then scale the original +MachineSet down before removing them. + +include::modules/differences-between-machinesets-and-machineconfigpool.adoc[leveloffset=+2] + +include::modules/machineset-manually-scaling.adoc[leveloffset=+2] + +[id="post-install-creating-infrastructure-machinesets"] +== Creating infrastructure MachineSets + +You can create a MachineSet to host only infrastructure components. +You apply specific Kubernetes labels to these Machines and then +update the infrastructure components to run on only those Machines. These +infrastructure nodes are not counted toward the total number of subscriptions +that are required to run the environment. + +include::modules/infrastructure-components.adoc[leveloffset=+2] + +[id="post-install-creating-infrastructure-machinesets-production"] +=== Creating infrastructure MachineSets for production environments + +In a production deployment, deploy at least three MachineSets to hold +infrastructure components. Both the logging aggregation solution and +the service mesh deploy Elasticsearch, and Elasticsearch requires three +instances that are installed on different nodes. For high availability, install +deploy these nodes to different availability zones. Since you need different +MachineSets for each availability zone, create at least three MachineSets. + +include::modules/machineset-creating.adoc[leveloffset=+3] + +[id="post-install-creating-infrastructure-machinesets-clouds"] +=== Creating MachineSets for different clouds +Use the sample MachineSet for your cloud. + +include::modules/machineset-yaml-aws.adoc[leveloffset=+3] +include::modules/machineset-yaml-azure.adoc[leveloffset=+3] +include::modules/machineset-yaml-gcp.adoc[leveloffset=+3] + +include::modules/cluster-autoscaler-about.adoc[leveloffset=+1] +include::modules/cluster-autoscaler-cr.adoc[leveloffset=+2] +:FeatureName: ClusterAutoscaler +include::modules/deploying-resource.adoc[leveloffset=+2] + +include::modules/machine-autoscaler-about.adoc[leveloffset=+1] +include::modules/machine-autoscaler-cr.adoc[leveloffset=+2] +:FeatureName: MachineAutoscaler +include::modules/deploying-resource.adoc[leveloffset=+2] + +include::modules/nodes-cluster-enabling-features-cluster.adoc[leveloffset=+1] + +[id="post-install-etcd-tasks"] +== etcd tasks +Enable, disable, or back up etcd. + +include::modules/recommended-etcd-practices.adoc[leveloffset=+2] +include::modules/about-etcd-encryption.adoc[leveloffset=+2] +include::modules/enabling-etcd-encryption.adoc[leveloffset=+2] +include::modules/disabling-etcd-encryption.adoc[leveloffset=+2] +include::modules/backup-etcd.adoc[leveloffset=+2] +include::modules/dr-restoring-cluster-state.adoc[leveloffset=+2] + +[id="post-install-pod-disruption-budgets"] +== Pod disruption budgets +Understand and configure Pod disruption budgets. + +include::modules/nodes-pods-pod-disruption-about.adoc[leveloffset=+2] +include::modules/nodes-pods-pod-disruption-configuring.adoc[leveloffset=+2] diff --git a/post_installation_configuration/network-configuration.adoc b/post_installation_configuration/network-configuration.adoc new file mode 100644 index 0000000000..f2deb73a05 --- /dev/null +++ b/post_installation_configuration/network-configuration.adoc @@ -0,0 +1,87 @@ +:context: post-install-network-configuration +[id="post-install-network-configuration"] += Post-installation network configuration +include::modules/common-attributes.adoc[] +include::modules/ossm-document-attributes.adoc[] +toc::[] + +After installing {product-title}, you can further expand and customize your +network to your requirements. + +[id="post-install-configuring-network-policy"] +== Configuring NetworkPolicy with OpenShift SDN + +Understand and work with network policy. + +include::modules/nw-networkpolicy-about.adoc[leveloffset=+2] +include::modules/nw-networkpolicy-object.adoc[leveloffset=+2] +include::modules/nw-networkpolicy-create.adoc[leveloffset=+2] +include::modules/nw-networkpolicy-delete.adoc[leveloffset=+2] +include::modules/nw-networkpolicy-view.adoc[leveloffset=+2] +include::modules/nw-networkpolicy-multitenant-isolation.adoc[leveloffset=+2] + +ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] +[id="post-install-nw-networkpolicy-creating-default-networkpolicy-objects-for-a-new-project"] +=== Creating default network policies for a new project + +As a cluster administrator, you can modify the new project template to +automatically include NetworkPolicy objects when you create a new project. + +include::modules/modifying-template-for-new-projects.adoc[leveloffset=+2] + +include::modules/nw-networkpolicy-project-defaults.adoc[leveloffset=+3] +endif::[] + +include::modules/private-clusters-setting-dns-private.adoc[leveloffset=+1] + +include::modules/nw-proxy-configure-object.adoc[leveloffset=+1] + +include::modules/nw-operator-cr.adoc[leveloffset=+1] + +[id="post-install-configuring_ingress_cluster_traffic"] +== Configuring ingress cluster traffic + +// This section is sourced from networking/configuring_ingress_cluster_traffic/overview-traffic.adoc + +{product-title} provides the following methods for communicating from +outside the cluster with services running in the cluster: + +* If you have HTTP/HTTPS, use an Ingress Controller. +* If you have a TLS-encrypted protocol other than HTTPS, such as TLS with the SNI +header, use an Ingress Controller. +* Otherwise, use a Load Balancer, an External IP, or a `NodePort`. + +[options="header"] +|=== + +|Method |Purpose + +|xref:../networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-ingress-controller.adoc#configuring-ingress-cluster-traffic-ingress-controller[Use an Ingress Controller] +|Allows access to HTTP/HTTPS traffic and TLS-encrypted protocols other than HTTPS such as, TLS with the SNI header. + +|xref:../networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-load-balancer.adoc#configuring-ingress-cluster-traffic-load-balancer[Automatically assign an external IP by using a load balancer service] +|Allows traffic to non-standard ports through an IP address assigned from a pool. + +|xref:../networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-service-external-ip.adoc#configuring-ingress-cluster-traffic-service-external-ip[Manually assign an external IP to a service] +|Allows traffic to non-standard ports through a specific IP address. + +|xref:../networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-nodeport.adoc#configuring-ingress-cluster-traffic-nodeport[Configure a `NodePort`] +|Expose a service on all nodes in the cluster. +|=== + +include::modules/ossm-supported-configurations.adoc[leveloffset=+1] + +include::modules/ossm-installation-activities.adoc[leveloffset=+2] + +.Next steps + +* xref:../service_mesh/service_mesh_install/installing-ossm.adoc#installing-ossm[Install {ProductName}] in your {product-title} environment. + +[id="post-installationrouting-optimization"] +== Optimizing routing + +The {product-title} HAProxy router scales to optimize performance. + +include::modules/baseline-router-performance.adoc[leveloffset=+2] + +include::modules/router-performance-optimizations.adoc[leveloffset=+2] diff --git a/post_installation_configuration/node-tasks.adoc b/post_installation_configuration/node-tasks.adoc new file mode 100644 index 0000000000..66781a621a --- /dev/null +++ b/post_installation_configuration/node-tasks.adoc @@ -0,0 +1,117 @@ +:context: post-install-node-tasks +[id="post-install-node-tasks"] += Post-installation node tasks +include::modules/common-attributes.adoc[] +toc::[] + +After installing {product-title}, you can further expand and customize your +cluster to your requirements through certain node tasks. + +[id="post-install-config-adding-rhel-compute"] +== Adding RHEL compute machines to an {product-title} cluster +Understand and work with RHEL compute nodes. + +include::modules/rhel-compute-overview.adoc[leveloffset=+2] +include::modules/rhel-compute-requirements.adoc[leveloffset=+2] +include::modules/rhel-preparing-playbook-machine.adoc[leveloffset=+2] +include::modules/rhel-preparing-node.adoc[leveloffset=+2] +include::modules/rhel-adding-node.adoc[leveloffset=+2] +include::modules/rhel-ansible-parameters.adoc[leveloffset=+2] +include::modules/rhel-removing-rhcos.adoc[leveloffset=+2] + +[id="post-installation-config-deploying-machine-health-checks"] +== Deploying MachineHealthChecks + +Understand and deploy MachineHealthChecks +include::modules/machine-user-provisioned-limitations.adoc[leveloffset=+2] +include::modules/machine-health-checks-about.adoc[leveloffset=+2] +include::modules/machine-health-checks-resource.adoc[leveloffset=+2] +include::modules/machine-health-checks-creating.adoc[leveloffset=+2] +include::modules/machineset-manually-scaling.adoc[leveloffset=+2] +include::modules/differences-between-machinesets-and-machineconfigpool.adoc[leveloffset=+2] + +include::modules/recommended-node-host-practices.adoc[leveloffset=+1] +include::modules/create-a-kubeletconfig-crd-to-edit-kubelet-parameters.adoc[leveloffset=+2] +include::modules/master-node-sizing.adoc[leveloffset=+2] +include::modules/recommended-etcd-practices.adoc[leveloffset=+2] +include::modules/setting-up-cpu-manager.adoc[leveloffset=+2] + +[id="post-install-huge-pages"] +== Huge pages +Understand and configure huge pages. + +include::modules/what-huge-pages-do.adoc[leveloffset=+2] +include::modules/how-huge-pages-are-consumed-by-apps.adoc[leveloffset=+2] +include::modules/configuring-huge-pages.adoc[leveloffset=+2] + +include::modules/nodes-pods-plugins-about.adoc[leveloffset=+1] +include::modules/nodes-pods-plugins-device-mgr.adoc[leveloffset=+2] +include::modules/nodes-pods-plugins-install.adoc[leveloffset=+2] + +[id="post-install-taints-tolerations"] +== Taints and tolerations +Understand and work with taints and tolerations. + +include::modules/nodes-scheduler-taints-tolerations-about.adoc[leveloffset=+2] +include::modules/nodes-scheduler-taints-tolerations-adding.adoc[leveloffset=+2] +include::modules/nodes-scheduler-taints-tolerations-dedicating.adoc[leveloffset=+2] +include::modules/nodes-scheduler-taints-tolerations-binding.adoc[leveloffset=+2] +include::modules/nodes-scheduler-taints-tolerations-special.adoc[leveloffset=+2] +include::modules/nodes-scheduler-taints-tolerations-removing.adoc[leveloffset=+2] + +[id="post-install-topology-manager"] +== Topology Manager +Understand and work with Topology Manager. + +include::modules/topology-manager-policies.adoc[leveloffset=+2] +include::modules/setting-up-topology-manager.adoc[leveloffset=+2] +include::modules/pod-interactions-with-topology-manager.adoc[leveloffset=+2] + +include::modules/nodes-cluster-overcommit-resource-requests.adoc[leveloffset=+1] + +include::modules/nodes-cluster-resource-override.adoc[leveloffset=+1] + +include::modules/nodes-cluster-resource-override-deploy-console.adoc[leveloffset=+2] + +include::modules/nodes-cluster-resource-override-deploy-cli.adoc[leveloffset=+2] + +include::modules/nodes-cluster-resource-configure.adoc[leveloffset=+2] + +include::modules/nodes-cluster-node-overcommit.adoc[leveloffset=+1] + +include::modules/nodes-cluster-overcommit-resources-containers.adoc[leveloffset=+2] + +include::modules/nodes-cluster-overcommit-qos-about.adoc[leveloffset=+2] + +include::modules/nodes-cluster-overcommit-configure-nodes.adoc[leveloffset=+2] + +include::modules/nodes-cluster-overcommit-node-enforcing.adoc[leveloffset=+2] + +include::modules/nodes-cluster-overcommit-node-resources.adoc[leveloffset=+2] + +include::modules/nodes-cluster-overcommit-node-disable.adoc[leveloffset=+2] + +include::modules/nodes-cluster-project-overcommit.adoc[leveloffset=+1] + +include::modules/nodes-cluster-overcommit-project-disable.adoc[leveloffset=+2] + + +[id="post-install-garbage-collection"] +== Freeing node resources using garbage collection +Understand and use garbage collection. + +include::modules/nodes-nodes-garbage-collection-containers.adoc[leveloffset=+2] +include::modules/nodes-nodes-garbage-collection-images.adoc[leveloffset=+2] +include::modules/nodes-nodes-garbage-collection-configuring.adoc[leveloffset=+2] + +[id="post-using-node-tuning-operator"] +== Using the Node Tuning Operator +Understand and use the Node Tuning Operator. + +include::modules/node-tuning-operator.adoc[leveloffset=+2] +include::modules/accessing-an-example-cluster-node-tuning-operator-specification.adoc[leveloffset=+2] +include::modules/custom-tuning-specification.adoc[leveloffset=+2] +include::modules/cluster-node-tuning-operator-default-profiles-set.adoc[leveloffset=+2] +include::modules/node-tuning-operator-supported-tuned-daemon-plug-ins.adoc[leveloffset=+2] + +include::modules/nodes-nodes-managing-max-pods-proc.adoc[leveloffset=+1] diff --git a/post_installation_configuration/preparing-for-users.adoc b/post_installation_configuration/preparing-for-users.adoc new file mode 100644 index 0000000000..8c15713aed --- /dev/null +++ b/post_installation_configuration/preparing-for-users.adoc @@ -0,0 +1,140 @@ +:context: post-install-preparing-for-users +[id="post-install-preparing-for-users"] += Preparing for users +include::modules/common-attributes.adoc[] +toc::[] + +After installing {product-title}, you can further expand and customize your +cluster to your requirements, including taking steps to prepare for users. + +[id="post-install-understanding-identity-provider"] +== Understanding identity provider configuration + +The {product-title} control plane includes a built-in OAuth server. Developers and +administrators obtain OAuth access tokens to authenticate themselves to the API. + +As an administrator, you can configure OAuth to specify an identity provider +after you install your cluster. + +include::modules/identity-provider-overview.adoc[leveloffset=+2] + +[id="post-install-supported-identity-providers"] +=== Supported identity providers +// This section is sourced from authentication/understanding-identity-provider.adoc +You can configure the following types of identity providers: + +[cols="2a,8a",options="header"] +|=== + +|Identity provider +|Description + +|xref:../authentication/identity_providers/configuring-htpasswd-identity-provider.adoc#configuring-htpasswd-identity-provider[HTPasswd] +|Configure the `htpasswd` identity provider to validate user names and passwords +against a flat file generated using +link:http://httpd.apache.org/docs/2.4/programs/htpasswd.html[`htpasswd`]. + +|xref:../authentication/identity_providers/configuring-keystone-identity-provider.adoc#configuring-keystone-identity-provider[Keystone] +|Configure the `keystone` identity provider to integrate +your {product-title} cluster with Keystone to enable shared authentication with +an OpenStack Keystone v3 server configured to store users in an internal +database. + +|xref:../authentication/identity_providers/configuring-ldap-identity-provider.adoc#configuring-ldap-identity-provider[LDAP] +|Configure the `ldap` identity provider to validate user names and passwords +against an LDAPv3 server, using simple bind authentication. + +|xref:../authentication/identity_providers/configuring-basic-authentication-identity-provider.adoc#configuring-basic-authentication-identity-provider[Basic authentication] +|Configure a `basic-authentication` identity provider for users to log in to +{product-title} with credentials validated against a remote identity provider. +Basic authentication is a generic backend integration mechanism. + +|xref:../authentication/identity_providers/configuring-request-header-identity-provider.adoc#configuring-request-header-identity-provider[Request header] +|Configure a `request-header` identity provider to identify users from request +header values, such as `X-Remote-User`. It is typically used in combination with +an authenticating proxy, which sets the request header value. + +|xref:../authentication/identity_providers/configuring-github-identity-provider.adoc#configuring-github-identity-provider[GitHub or GitHub Enterprise] +|Configure a `github` identity provider to validate user names and passwords +against GitHub or GitHub Enterprise's OAuth authentication server. + +|xref:../authentication/identity_providers/configuring-gitlab-identity-provider.adoc#configuring-gitlab-identity-provider[GitLab] +|Configure a `gitlab` identity provider to use +link:https://gitlab.com/[GitLab.com] or any other GitLab instance as an identity +provider. + +|xref:../authentication/identity_providers/configuring-google-identity-provider.adoc#configuring-google-identity-provider[Google] +|Configure a `google` identity provider using +link:https://developers.google.com/identity/protocols/OpenIDConnect[Google's OpenID Connect integration]. + +|xref:../authentication/identity_providers/configuring-oidc-identity-provider.adoc#configuring-oidc-identity-provider[OpenID Connect] +|Configure an `oidc` identity provider to integrate with an OpenID Connect +identity provider using an +link:http://openid.net/specs/openid-connect-core-1_0.html#CodeFlowAuth[Authorization Code Flow]. + +|=== + +After you define an identity provider, you can +xref:../authentication/using-rbac.adoc#authorization-overview_using-rbac[use +RBAC to define and apply permissions]. + +include::modules/identity-provider-parameters.adoc[leveloffset=+2] + +include::modules/identity-provider-default-CR.adoc[leveloffset=+2] + +[id="post-install-using-rbac-to-define-and-apply-permissions"] +== Using RBAC to define and apply permissions +Understand and apply role-based access control. + +include::modules/rbac-overview.adoc[leveloffset=+2] + +include::modules/rbac-projects-namespaces.adoc[leveloffset=+2] + +include::modules/rbac-default-projects.adoc[leveloffset=+2] + +include::modules/rbac-viewing-cluster-roles.adoc[leveloffset=+2] + +include::modules/rbac-viewing-local-roles.adoc[leveloffset=+2] + +include::modules/rbac-adding-roles.adoc[leveloffset=+2] + +include::modules/rbac-creating-local-role.adoc[leveloffset=+2] + +ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] +include::modules/rbac-creating-cluster-role.adoc[leveloffset=+2] +endif::[] + +include::modules/rbac-local-role-binding-commands.adoc[leveloffset=+2] + +ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] +include::modules/rbac-cluster-role-binding-commands.adoc[leveloffset=+2] + +include::modules/rbac-creating-cluster-admin.adoc[leveloffset=+2] +endif::[] + +include::modules/authentication-kubeadmin.adoc[leveloffset=+1] + +include::modules/authentication-remove-kubeadmin.adoc[leveloffset=+2] + +[id="post-install-image-configuration-resources"] +== Image configuration resources +Understand and configure image registry settings. + +include::modules/images-configuration-parameters.adoc[leveloffset=+2] + +include::modules/images-configuration-file.adoc[leveloffset=+2] + +include::modules/images-configuration-cas.adoc[leveloffset=+3] + +include::modules/images-configuration-insecure.adoc[leveloffset=+3] + +include::modules/images-configuration-registry-mirror.adoc[leveloffset=+3] + +include::modules/olm-installing-operators-from-operatorhub.adoc[leveloffset=+1] +include::modules/olm-installing-from-operatorhub-using-web-console.adoc[leveloffset=+2] +ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] +include::modules/olm-installing-from-operatorhub-using-cli.adoc[leveloffset=+2] +.Additional resources + +* xref:../operators/understanding_olm/olm-understanding-operatorgroups.adoc#olm-operatorgroups-about_olm-understanding-operatorgroups[About OperatorGroups] +endif::[] diff --git a/post_installation_configuration/storage-configuration.adoc b/post_installation_configuration/storage-configuration.adoc new file mode 100644 index 0000000000..8b47ee6ebe --- /dev/null +++ b/post_installation_configuration/storage-configuration.adoc @@ -0,0 +1,97 @@ +:context: post-install-storage-configuration +[id="post-install-storage-configuration"] += Post-installation storage configuration +include::modules/common-attributes.adoc[] +:gluster: GlusterFS +:gluster-native: Containerized GlusterFS +:gluster-external: External GlusterFS +:gluster-install-link: https://docs.gluster.org/en/latest/Install-Guide/Overview/ +:gluster-admin-link: https://docs.gluster.org/en/latest/Administrator%20Guide/overview/ +:gluster-role-link: https://github.com/openshift/openshift-ansible/tree/master/roles/openshift_storage_glusterfs +ifdef::openshift-enterprise,openshift-webscale[] +:gluster: Red Hat Gluster Storage +:gluster-native: converged mode +:gluster-external: independent mode +:gluster-install-link: https://access.redhat.com/documentation/en-us/red_hat_gluster_storage/3.3/html/installation_guide/ +:gluster-admin-link: https://access.redhat.com/documentation/en-us/red_hat_gluster_storage/3.3/html/administration_guide/ +:cns-link: https://access.redhat.com/documentation/en-us/red_hat_gluster_storage/3.3/html/container-native_storage_for_openshift_container_platform/ +endif::[] +toc::[] + +After installing {product-title}, you can further expand and customize your +cluster to your requirements, including storage configuration. + +[id="post-install-dynamic-provisioning"] +== Dynamic provisioning + +include::modules/dynamic-provisioning-about.adoc[leveloffset=+3] + +include::modules/dynamic-provisioning-available-plugins.adoc[leveloffset=+3] + +include::modules/dynamic-provisioning-defining-storage-class.adoc[leveloffset=+1] + +include::modules/dynamic-provisioning-storage-class-definition.adoc[leveloffset=+3] + +include::modules/dynamic-provisioning-annotations.adoc[leveloffset=+3] + +include::modules/dynamic-provisioning-cinder-definition.adoc[leveloffset=+3] + +include::modules/dynamic-provisioning-aws-definition.adoc[leveloffset=+3] + +include::modules/dynamic-provisioning-azure-disk-definition.adoc[leveloffset=+2] + +include::modules/dynamic-provisioning-azure-file-definition.adoc[leveloffset=+2] + +include::modules/dynamic-provisioning-azure-file-considerations.adoc[leveloffset=+3] + +include::modules/dynamic-provisioning-gce-definition.adoc[leveloffset=+2] + +// include::modules/dynamic-provisioning-gluster-definition.adoc[leveloffset=+2] + +// include::modules/dynamic-provisioning-ceph-rbd-definition.adoc[leveloffset=+2] + +include::modules/dynamic-provisioning-vsphere-definition.adoc[leveloffset=+2] + +include::modules/dynamic-provisioning-change-default-class.adoc[leveloffset=+1] + +[id="post-install-optimizing-storage"] +== Optimizing storage + +Optimizing storage helps to minimize storage use across all resources. By +optimizing storage, administrators help ensure that existing storage resources +are working in an efficient manner. + +include::modules/available-persistent-storage-options.adoc[leveloffset=+1] + +include::modules/recommended-configurable-storage-technology.adoc[leveloffset=+1] + +[id="post-install-deploy-OCS"] +== Deploy Red Hat OpenShift Container Storage +// This section is sourced from storage/persistent_storage/persistent-storage-ocs.adoc +Red Hat OpenShift Container Storage is a provider of agnostic persistent storage for {product-title} supporting file, block, and object storage, either in-house or in hybrid clouds. As a Red Hat storage solution, Red Hat OpenShift Container Storage is completely integrated with {product-title} for deployment, management, and monitoring. + +[options="header",cols="1,1"] +|=== + +|If you are looking for Red Hat OpenShift Container Storage information about... +|See the following Red Hat OpenShift Container Storage documentation: + +|What’s new, known issues, notable bug fixes, and Technology Previews +|link:https://access.redhat.com/documentation/en-us/red_hat_openshift_container_storage/4.4/html/4.4_release_notes/[Red Hat OpenShift Container Storage 4.4 Release Notes] + +|Supported workloads, layouts, hardware and software requirements, sizing and scaling recommendations +|link:https://access.redhat.com/documentation/en-us/red_hat_openshift_container_storage/4.4/html/planning_your_deployment/index[Planning your Red Hat OpenShift Container Storage 4.4 deployment] + +|Deploying Red Hat OpenShift Container Storage 4.4 on an existing {product-title} cluster +|link:https://access.redhat.com/documentation/en-us/red_hat_openshift_container_storage/4.4/html/deploying_openshift_container_storage/index[Deploying Red Hat OpenShift Container Storage 4.4] + +|Managing a Red Hat OpenShift Container Storage 4.4 cluster +|link:https://access.redhat.com/documentation/en-us/red_hat_openshift_container_storage/4.4/html/managing_openshift_container_storage/index[Managing Red Hat OpenShift Container Storage 4.4] + +|Monitoring a Red Hat OpenShift Container Storage 4.4 cluster +|link:https://access.redhat.com/documentation/en-us/red_hat_openshift_container_storage/4.4/html/monitoring_openshift_container_storage/index[Monitoring Red Hat OpenShift Container Storage 4.4] + +|Migrating your {product-title} cluster from version 3 to version 4 +|link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.4/html/migration/index[Migration] + +|===