From 6d537e58946b2f7edf183dd630c6c80bf6ffc542 Mon Sep 17 00:00:00 2001 From: Eric Ponvelle Date: Mon, 18 Jul 2022 15:08:51 -0400 Subject: [PATCH] OSDOCS-3826: Updated the compute section --- .../attributes-openshift-dedicated.adoc | 5 + modules/rosa-sdpolicy-account-management.adoc | 307 ------------- .../rosa-sdpolicy-am-aws-compute-types.adoc | 191 ++++++++ modules/rosa-sdpolicy-am-billing.adoc | 11 + ...rosa-sdpolicy-am-cluster-self-service.adoc | 19 + modules/rosa-sdpolicy-am-compute.adoc | 29 ++ modules/rosa-sdpolicy-am-limited-support.adoc | 19 + modules/rosa-sdpolicy-am-regions-az.adoc | 39 ++ modules/rosa-sdpolicy-am-sla.adoc | 8 + modules/rosa-sdpolicy-am-support.adoc | 12 + modules/rosa-sdpolicy-platform.adoc | 12 + modules/sdpolicy-account-management.adoc | 414 ------------------ .../sdpolicy-am-aws-compute-types-ccs.adoc | 191 ++++++++ ...sdpolicy-am-aws-compute-types-non-ccs.adoc | 32 ++ modules/sdpolicy-am-billing.adoc | 20 + modules/sdpolicy-am-cloud-providers.adoc | 12 + modules/sdpolicy-am-cluster-self-service.adoc | 11 + modules/sdpolicy-am-compute.adoc | 25 ++ modules/sdpolicy-am-gcp-compute-types.adoc | 32 ++ modules/sdpolicy-am-limited-support.adoc | 19 + ...dpolicy-am-regions-availability-zones.adoc | 65 +++ modules/sdpolicy-am-sla.adoc | 8 + modules/sdpolicy-am-support.adoc | 12 + modules/sdpolicy-platform.adoc | 12 + .../osd_policy/osd-service-definition.adoc | 18 +- .../rosa-service-definition.adoc | 18 +- 26 files changed, 818 insertions(+), 723 deletions(-) delete mode 100644 modules/rosa-sdpolicy-account-management.adoc create mode 100644 modules/rosa-sdpolicy-am-aws-compute-types.adoc create mode 100644 modules/rosa-sdpolicy-am-billing.adoc create mode 100644 modules/rosa-sdpolicy-am-cluster-self-service.adoc create mode 100644 modules/rosa-sdpolicy-am-compute.adoc create mode 100644 modules/rosa-sdpolicy-am-limited-support.adoc create mode 100644 modules/rosa-sdpolicy-am-regions-az.adoc create mode 100644 modules/rosa-sdpolicy-am-sla.adoc create mode 100644 modules/rosa-sdpolicy-am-support.adoc delete mode 100644 modules/sdpolicy-account-management.adoc create mode 100644 modules/sdpolicy-am-aws-compute-types-ccs.adoc create mode 100644 modules/sdpolicy-am-aws-compute-types-non-ccs.adoc create mode 100644 modules/sdpolicy-am-billing.adoc create mode 100644 modules/sdpolicy-am-cloud-providers.adoc create mode 100644 modules/sdpolicy-am-cluster-self-service.adoc create mode 100644 modules/sdpolicy-am-compute.adoc create mode 100644 modules/sdpolicy-am-gcp-compute-types.adoc create mode 100644 modules/sdpolicy-am-limited-support.adoc create mode 100644 modules/sdpolicy-am-regions-availability-zones.adoc create mode 100644 modules/sdpolicy-am-sla.adoc create mode 100644 modules/sdpolicy-am-support.adoc diff --git a/_attributes/attributes-openshift-dedicated.adoc b/_attributes/attributes-openshift-dedicated.adoc index 762f8d668e..9d871c17be 100644 --- a/_attributes/attributes-openshift-dedicated.adoc +++ b/_attributes/attributes-openshift-dedicated.adoc @@ -13,3 +13,8 @@ :AWS: Amazon Web Services (AWS) :GCP: Google Cloud Platform (GCP) :kebab: image:kebab.png[title="Options menu"] +:rhq-short: Red Hat Quay +:SMProductName: Red Hat OpenShift Service Mesh +:pipelines-title: Red Hat OpenShift Pipelines +:logging-sd: Red Hat OpenShift Logging +:ServerlessProductName: OpenShift Serverless \ No newline at end of file diff --git a/modules/rosa-sdpolicy-account-management.adoc b/modules/rosa-sdpolicy-account-management.adoc deleted file mode 100644 index 66192dbadf..0000000000 --- a/modules/rosa-sdpolicy-account-management.adoc +++ /dev/null @@ -1,307 +0,0 @@ - -// Module included in the following assemblies: -// -// * assemblies/rosa-service-definition.adoc - -[id="rosa-sdpolicy-account-management_{context}"] -= Account management - - -This section provides information about the service definition for {product-title} account management. - -[id="rosa-sdpolicy-billing_{context}"] -== Billing - -{product-title} is billed through Amazon Web Services (AWS) based on the usage of AWS components used by the service, such as load balancers, storage, EC2 instances, other components, and Red Hat subscriptions for the OpenShift service. - -Any additional Red Hat software must be purchased separately. - -[id="rosa-sdpolicy-cluster-self-service_{context}"] -== Cluster self-service - -Customers can self-service their clusters, including, but not limited to: - -* Create a cluster -* Delete a cluster -* Add or remove an identity provider -* Add or remove a user from an elevated group -* Configure cluster privacy -* Add or remove machine pools and configure autoscaling -* Define upgrade policies - -These tasks can be self-serviced using the `rosa` CLI utility. - -[id="rosa-sdpolicy-compute_{context}"] -== Compute - -Single availability zone clusters require a minimum of 3 control planes, 2 infrastructure nodes, and 2 worker nodes deployed to a single availability zone. - -Multiple availability zone clusters require a minimum of 3 control planes. 3 infrastructure nodes, and 3 worker nodes. Additional nodes must be purchased in multiples of three to maintain proper node distribution. - -All {product-title} clusters support a maximum of 180 worker nodes. - -[NOTE] -==== -The `Default` machine pool node type and size cannot be changed after the cluster is created. -==== - -Control plane and infrastructure nodes are deployed and managed by Red Hat. Shutting down the underlying infrastructure through the cloud provider console is unsupported and can lead to data loss. There are at least 3 control plane nodes that handle etcd- and API-related workloads. There are at least 2 infrastructure nodes that handle metrics, routing, the web console, and other workloads. Control plane and infrastructure nodes are strictly for Red Hat workloads to operate the service, and customer workloads are not permitted to be deployed on these nodes. - -[NOTE] -==== -Approximately one vCPU core and 1 GiB of memory are reserved on each worker node and removed from allocatable resources. This reservation of resources is necessary to run processes required by the underlying platform. These processes include system daemons such as udev, kubelet, and container runtime among others. The reserved resources also account for kernel reservations. - -{OCP} core systems such as audit log aggregation, metrics collection, DNS, image registry, SDN, and others might consume additional allocatable resources to maintain the stability and maintainability of the cluster. The additional resources consumed might vary based on usage. - -For additional information, see the link:https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#system-reserved[Kubernetes documentation]. -==== - -[id="rosa-sdpolicy-aws-compute-types_{context}"] -== AWS compute types - -{product-title} offers the following worker node types and sizes: - -.General purpose compute types -[%collapsible] -==== -- m5.xlarge (4 vCPU, 16 GiB) -- m5.2xlarge (8 vCPU, 32 GiB) -- m5.4xlarge (16 vCPU, 64 GiB) -- m5.8xlarge (32 vCPU, 128 GiB) -- m5.12xlarge (48 vCPU, 192 GiB) -- m5.16xlarge (64 vCPU, 256 GiB) -- m5.24xlarge (96 vCPU, 384 GiB) -- m5d.xlarge (4 vCPU, 16 GiB) -- m5d.2xlarge (8 vCPU, 32 GiB) -- m5d.4xlarge (16 vCPU, 64 GiB) -- m5d.8xlarge (32 vCPU, 128 GiB) -- m5d.12xlarge (48 vCPU, 192 GiB) -- m5d.16xlarge (64 vCPU, 256 GiB) -- m5d.24xlarge (96 vCPU, 384 GiB) -- m5n.xlarge (4 vCPU, 16 GiB) -- m5n.2xlarge (8 vCPU, 32 GiB) -- m5n.4xlarge (16 vCPU, 64 GiB) -- m5n.8xlarge (32 vCPU, 128 GiB) -- m5n.12xlarge (48 vCPU, 192 GiB) -- m5n.16xlarge (64 vCPU, 256 GiB) -- m5n.24xlarge (96 vCPU, 384 GiB) -- m5dn.xlarge (4 vCPU, 16 GiB) -- m5dn.2xlarge (8 vCPU, 32 GiB) -- m5dn.4xlarge (16 vCPU, 64 GiB) -- m5dn.8xlarge (32 vCPU, 128 GiB) -- m5dn.12xlarge (48 vCPU, 192 GiB) -- m5dn.16xlarge (64 vCPU, 256 GiB) -- m5dn.24xlarge (96 vCPU, 384 GiB) -- m5zn.xlarge (4 vCPU, 16 GiB) -- m5zn.2xlarge (8 vCPU, 32 GiB) -- m5zn.3xlarge (12 vCPU, 48 GiB) -- m5zn.6xlarge (24 vCPU, 96 GiB) -- m5zn.12xlarge (48 vCPU, 192 GiB) -- m6i.xlarge (4 vCPU, 16 GiB) -- m6i.2xlarge (8 vCPU, 32 GiB) -- m6i.4xlarge (16 vCPU, 64 GiB) -- m6i.8xlarge (32 vCPU, 128 GiB) -- m6i.12xlarge (48 vCPU, 192 GiB) -- m6i.16xlarge (64 vCPU, 256 GiB) -- m6i.24xlarge (96 vCPU, 384 GiB) -- m6i.32xlarge (128 vCPU, 512 GiB) -==== - -.Burstable general purpose compute types -[%collapsible] -==== -- t3.xlarge (4 vCPU, 16 GiB) -- t3.2xlarge (8 vCPU, 32 GiB) -- t3a.xlarge (4 vCPU, 16 GiB) -- t3a.2xlarge (8 vCPU, 32 GiB) -==== - -.Memory-optimized compute types -[%collapsible] -==== -- r4.xlarge (4 vCPU, 30.5 GiB) -- r4.2xlarge (8 vCPU, 61 GiB) -- r4.4xlarge (16 vCPU, 122 GiB) -- r4.8xlarge (32 vCPU, 244 GiB) -- r4.16xlarge (64 vCPU, 488 GiB) -- r5.xlarge (4 vCPU, 32 GiB) -- r5.2xlarge (8 vCPU, 64 GiB) -- r5.4xlarge (16 vCPU, 128 GiB) -- r5.8xlarge (32 vCPU, 256 GiB) -- r5.12xlarge (48 vCPU, 384 GiB) -- r5.16xlarge (64 vCPU, 512 GiB) -- r5.24xlarge (96 vCPU, 768 GiB) -- r5a.xlarge (4 vCPU, 32 GiB) -- r5a.2xlarge (8 vCPU, 64 GiB) -- r5a.4xlarge (16 vCPU, 128 GiB) -- r5a.8xlarge (32 vCPU, 256 GiB) -- r5a.12xlarge (48 vCPU, 384 GiB) -- r5a.16xlarge (64 vCPU, 512 GiB) -- r5a.24xlarge (96 vCPU, 768 GiB) -- r5ad.xlarge (4 vCPU, 32 GiB) -- r5ad.2xlarge (8 vCPU, 64 GiB) -- r5ad.4xlarge (16 vCPU, 128 GiB) -- r5ad.8xlarge (32 vCPU, 256 GiB) -- r5ad.12xlarge (48 vCPU, 384 GiB) -- r5ad.16xlarge (64 vCPU, 512 GiB) -- r5ad.24xlarge (96 vCPU, 768 GiB) -- r5d.xlarge (4 vCPU, 32 GiB) -- r5d.2xlarge (8 vCPU, 64 GiB) -- r5d.4xlarge (16 vCPU, 128 GiB) -- r5d.8xlarge (32 vCPU, 256 GiB) -- r5d.12xlarge (48 vCPU, 384 GiB) -- r5d.16xlarge (64 vCPU, 512 GiB) -- r5d.24xlarge (96 vCPU, 768 GiB) -- r5n.xlarge (4 vCPU, 32 GiB) -- r5n.2xlarge (8 vCPU, 64 GiB) -- r5n.4xlarge (16 vCPU, 128 GiB) -- r5n.8xlarge (32 vCPU, 256 GiB) -- r5n.12xlarge (48 vCPU, 384 GiB) -- r5n.16xlarge (64 vCPU, 512 GiB) -- r5n.24xlarge (96 vCPU, 768 GiB) -- r5dn.xlarge (4 vCPU, 32 GiB) -- r5dn.2xlarge (8 vCPU, 64 GiB) -- r5dn.4xlarge (16 vCPU, 128 GiB) -- r5dn.8xlarge (32 vCPU, 256 GiB) -- r5dn.12xlarge (48 vCPU, 384 GiB) -- r5dn.16xlarge (64 vCPU, 512 GiB) -- r5dn.24xlarge (96 vCPU, 768 GiB) -- r6i.xlarge (4 vCPU, 32 GiB) -- r6i.2xlarge (8 vCPU, 64 GiB) -- r6i.4xlarge (16 vCPU, 128 GiB) -- r6i.8xlarge (32 vCPU, 256 GiB) -- r6i.12xlarge (48 vCPU, 384 GiB) -- r6i.16xlarge (64 vCPU, 512 GiB) -- r6i.24xlarge (96 vCPU, 768 GiB) -- r6i.32xlarge (128 vCPU, 1,024 GiB) -- z1d.xlarge (4 vCPU, 32 GiB) -- z1d.2xlarge (8 vCPU, 64 GiB) -- z1d.3xlarge (12 vCPU, 96 GiB) -- z1d.6xlarge (24 vCPU, 192 GiB) -- z1d.12xlarge (48 vCPU, 384 GiB) -==== - -.Compute-optimized compute types -[%collapsible] -==== -- c5.xlarge (4 vCPU, 8 GiB) -- c5.2xlarge (8 vCPU, 16 GiB) -- c5.4xlarge (16 vCPU, 32 GiB) -- c5.9xlarge (36 vCPU, 72 GiB) -- c5.12xlarge (48 vCPU, 96 GiB) -- c5.18xlarge (72 vCPU, 144 GiB) -- c5.24xlarge (96 vCPU, 192 GiB) -- c5d.xlarge (4 vCPU, 8 GiB) -- c5d.2xlarge (8 vCPU, 16 GiB) -- c5d.4xlarge (16 vCPU, 32 GiB) -- c5d.9xlarge (36 vCPU, 72 GiB) -- c5d.12xlarge (48 vCPU, 96 GiB) -- c5d.18xlarge (72 vCPU, 144 GiB) -- c5d.24xlarge (96 vCPU, 192 GiB) -- c5a.xlarge (4 vCPU, 8 GiB) -- c5a.2xlarge (8 vCPU, 16 GiB) -- c5a.4xlarge (16 vCPU, 32 GiB) -- c5a.8xlarge (32 vCPU, 64 GiB) -- c5a.12xlarge (48 vCPU, 96 GiB) -- c5a.16xlarge (64 vCPU, 128 GiB) -- c5a.24xlarge (96 vCPU, 192 GiB) -- c5ad.xlarge (4 vCPU, 8 GiB) -- c5ad.2xlarge (8 vCPU, 16 GiB) -- c5ad.4xlarge (16 vCPU, 32 GiB) -- c5ad.8xlarge (32 vCPU, 64 GiB) -- c5ad.12xlarge (48 vCPU, 96 GiB) -- c5ad.16xlarge (64 vCPU, 128 GiB) -- c5ad.24xlarge (96 vCPU, 192 GiB) -- c5n.xlarge (4 vCPU, 10.5 GiB) -- c5n.2xlarge (8 vCPU, 21 GiB) -- c5n.4xlarge (16 vCPU, 42 GiB) -- c5n.9xlarge (36 vCPU, 96 GiB) -- c5n.18xlarge (72 vCPU, 192 GiB) -- c6i.xlarge (4 vCPU, 8 GiB) -- c6i.2xlarge (8 vCPU, 16 GiB) -- c6i.4xlarge (16 vCPU, 32 GiB) -- c6i.8xlarge (32 vCPU, 64 GiB) -- c6i.12xlarge (48 vCPU, 96 GiB) -- c6i.16xlarge (64 vCPU, 128 GiB) -- c6i.24xlarge (96 vCPU, 192 GiB) -- c6i.32xlarge (128 vCPU, 256 GiB) -==== - -.Storage-optimized compute types -[%collapsible] -==== -- i3.xlarge (4 vCPU, 30.5 GiB) -- i3.2xlarge (8 vCPU, 61 GiB) -- i3.4xlarge (16 vCPU, 122 GiB) -- i3.8xlarge (32 vCPU, 244 GiB) -- i3.16xlarge (64 vCPU, 488 GiB) -- i3en.xlarge (4 vCPU, 32 GiB) -- i3en.2xlarge (8 vCPU, 64 GiB) -- i3en.3xlarge (12 vCPU, 96 GiB) -- i3en.6xlarge (24 vCPU, 192 GiB) -- i3en.12xlarge (48 vCPU, 384 GiB) -- i3en.24xlarge (96 vCPU, 768 GiB) -==== - -[id="rosa-sdpolicy-regions-az_{context}"] -== Regions and availability zones -The following AWS regions are supported by Red Hat OpenShift 4 and are supported for {product-title}. Note: China and GovCloud (US) regions are not supported, regardless of their support on OpenShift 4. - -- af-south-1 (Cape Town, AWS opt-in required) -- ap-east-1 (Hong Kong, AWS opt-in required) -- ap-northeast-1 (Tokyo) -- ap-northeast-2 (Seoul) -- ap-northeast-3 (Osaka) -- ap-south-1 (Mumbai) -- ap-southeast-1 (Singapore) -- ap-southeast-2 (Sydney) -- ca-central-1 (Central Canada) -- eu-central-1 (Frankfurt) -- eu-north-1 (Stockholm) -- eu-south-1 (Milan, AWS opt-in required) -- eu-west-1 (Ireland) -- eu-west-2 (London) -- eu-west-3 (Paris) -- me-south-1 (Bahrain, AWS opt-in required) -- sa-east-1 (São Paulo) -- us-east-1 (N. Virginia) -- us-east-2 (Ohio) -- us-west-1 (N. California) -- us-west-2 (Oregon) - -Multiple availability zone clusters can only be deployed in regions with at least 3 availability zones. For more information, see the link:https://aws.amazon.com/about-aws/global-infrastructure/regions_az/[Regions and Availability Zones] section in the AWS documentation. - -Each new {product-title} cluster is installed within an installer-created or preexisting Virtual Private Cloud (VPC) in a single region, with the option to deploy into a single availability zone (Single-AZ) or across multiple availability zones (Multi-AZ). This provides cluster-level network and resource isolation, and enables cloud-provider VPC settings, such as VPN connections and VPC Peering. Persistent volumes (PVs) are backed by AWS Elastic Block Storage (EBS), and are specific to the availability zone in which they are provisioned. Persistent volume claims (PVCs) do not bind to a volume until the associated pod resource is assigned into a specific availability zone to prevent unschedulable pods. Availability zone-specific resources are only usable by resources in the same availability zone. - -[WARNING] -==== -The region and the choice of single or multiple availability zone cannot be changed after a cluster has been deployed. -==== - -[id="rosa-sdpolicy-sla_{context}"] -== Service Level Agreement (SLA) -Any SLAs for the service itself are defined in Appendix 4 of the link:https://www.redhat.com/licenses/Appendix_4_Red_Hat_Online_Services_20210503.pdf[Red Hat Enterprise Agreement Appendix 4 (Online Subscription Services)]. - - -[id="rosa-limited-support_{context}"] -== Limited support status - -When a cluster transitions to a _Limited Support_ status, Red Hat no longer proactively monitors the cluster, the SLA is no longer applicable, and credits requested against the SLA are denied. It does not mean that you no longer have product support. In some cases, the cluster can return to a fully-supported status if you remediate the violating factors. However, in other cases, you might have to delete and recreate the cluster. - -A cluster might transition to a Limited Support status for many reasons, including the following scenarios: - -If you do not upgrade a cluster to a supported version before the end-of-life date:: Red Hat does not make any runtime or SLA guarantees for versions after their end-of-life date. To receive continued support, upgrade the cluster to a supported version prior to the end-of-life date. If you do not upgrade the cluster prior to the end-of-life date, the cluster transitions to a Limited Support status until it is upgraded to a supported version. -+ -Red Hat provides commercially reasonable support to upgrade from an unsupported version to a supported version. However, if a supported upgrade path is no longer available, you might have to create a new cluster and migrate your workloads. - -If you remove or replace any native {product-title} components or any other component that is installed and managed by Red Hat:: If cluster administrator permissions were used, Red Hat is not responsible for any of your or your authorized users’ actions, including those that affect infrastructure services, service availability, or data loss. If Red Hat detects any such actions, the cluster might transition to a Limited Support status. Red Hat notifies you of the status change and you should either revert the action or create a support case to explore remediation steps that might require you to delete and recreate the cluster. - -If you have questions about a specific action that might cause a cluster to transition to a Limited Support status or need further assistance, open a support ticket. - -[id="rosa-sdpolicy-support_{context}"] -== Support -{product-title} includes Red Hat Premium Support, which can be accessed by using the link:https://access.redhat.com/support?extIdCarryOver=true&sc_cid=701f2000001Css5AAC[Red Hat Customer Portal]. - -See {product-title} link:https://access.redhat.com/support/offerings/openshift/sla?extIdCarryOver=true&sc_cid=701f2000001Css5AAC[SLAs] for support response times. - -AWS support is subject to a customer's existing support contract with AWS. diff --git a/modules/rosa-sdpolicy-am-aws-compute-types.adoc b/modules/rosa-sdpolicy-am-aws-compute-types.adoc new file mode 100644 index 0000000000..f426ef4982 --- /dev/null +++ b/modules/rosa-sdpolicy-am-aws-compute-types.adoc @@ -0,0 +1,191 @@ + +// Module included in the following assemblies: +// +// * assemblies/rosa-service-definition.adoc +:_content-type: CONCEPT +[id="rosa-sdpolicy-aws-compute-types_{context}"] += AWS compute types + +{product-title} offers the following worker node types and sizes: + +.General purpose compute types +[%collapsible] +==== +- m5.xlarge (4 vCPU, 16 GiB) +- m5.2xlarge (8 vCPU, 32 GiB) +- m5.4xlarge (16 vCPU, 64 GiB) +- m5.8xlarge (32 vCPU, 128 GiB) +- m5.12xlarge (48 vCPU, 192 GiB) +- m5.16xlarge (64 vCPU, 256 GiB) +- m5.24xlarge (96 vCPU, 384 GiB) +- m5d.xlarge (4 vCPU, 16 GiB) +- m5d.2xlarge (8 vCPU, 32 GiB) +- m5d.4xlarge (16 vCPU, 64 GiB) +- m5d.8xlarge (32 vCPU, 128 GiB) +- m5d.12xlarge (48 vCPU, 192 GiB) +- m5d.16xlarge (64 vCPU, 256 GiB) +- m5d.24xlarge (96 vCPU, 384 GiB) +- m5n.xlarge (4 vCPU, 16 GiB) +- m5n.2xlarge (8 vCPU, 32 GiB) +- m5n.4xlarge (16 vCPU, 64 GiB) +- m5n.8xlarge (32 vCPU, 128 GiB) +- m5n.12xlarge (48 vCPU, 192 GiB) +- m5n.16xlarge (64 vCPU, 256 GiB) +- m5n.24xlarge (96 vCPU, 384 GiB) +- m5dn.xlarge (4 vCPU, 16 GiB) +- m5dn.2xlarge (8 vCPU, 32 GiB) +- m5dn.4xlarge (16 vCPU, 64 GiB) +- m5dn.8xlarge (32 vCPU, 128 GiB) +- m5dn.12xlarge (48 vCPU, 192 GiB) +- m5dn.16xlarge (64 vCPU, 256 GiB) +- m5dn.24xlarge (96 vCPU, 384 GiB) +- m5zn.xlarge (4 vCPU, 16 GiB) +- m5zn.2xlarge (8 vCPU, 32 GiB) +- m5zn.3xlarge (12 vCPU, 48 GiB) +- m5zn.6xlarge (24 vCPU, 96 GiB) +- m5zn.12xlarge (48 vCPU, 192 GiB) +- m6i.xlarge (4 vCPU, 16 GiB) +- m6i.2xlarge (8 vCPU, 32 GiB) +- m6i.4xlarge (16 vCPU, 64 GiB) +- m6i.8xlarge (32 vCPU, 128 GiB) +- m6i.12xlarge (48 vCPU, 192 GiB) +- m6i.16xlarge (64 vCPU, 256 GiB) +- m6i.24xlarge (96 vCPU, 384 GiB) +- m6i.32xlarge (128 vCPU, 512 GiB) +==== + +.Burstable general purpose compute types +[%collapsible] +==== +- t3.xlarge (4 vCPU, 16 GiB) +- t3.2xlarge (8 vCPU, 32 GiB) +- t3a.xlarge (4 vCPU, 16 GiB) +- t3a.2xlarge (8 vCPU, 32 GiB) +==== + +.Memory-optimized compute types +[%collapsible] +==== +- r4.xlarge (4 vCPU, 30.5 GiB) +- r4.2xlarge (8 vCPU, 61 GiB) +- r4.4xlarge (16 vCPU, 122 GiB) +- r4.8xlarge (32 vCPU, 244 GiB) +- r4.16xlarge (64 vCPU, 488 GiB) +- r5.xlarge (4 vCPU, 32 GiB) +- r5.2xlarge (8 vCPU, 64 GiB) +- r5.4xlarge (16 vCPU, 128 GiB) +- r5.8xlarge (32 vCPU, 256 GiB) +- r5.12xlarge (48 vCPU, 384 GiB) +- r5.16xlarge (64 vCPU, 512 GiB) +- r5.24xlarge (96 vCPU, 768 GiB) +- r5a.xlarge (4 vCPU, 32 GiB) +- r5a.2xlarge (8 vCPU, 64 GiB) +- r5a.4xlarge (16 vCPU, 128 GiB) +- r5a.8xlarge (32 vCPU, 256 GiB) +- r5a.12xlarge (48 vCPU, 384 GiB) +- r5a.16xlarge (64 vCPU, 512 GiB) +- r5a.24xlarge (96 vCPU, 768 GiB) +- r5ad.xlarge (4 vCPU, 32 GiB) +- r5ad.2xlarge (8 vCPU, 64 GiB) +- r5ad.4xlarge (16 vCPU, 128 GiB) +- r5ad.8xlarge (32 vCPU, 256 GiB) +- r5ad.12xlarge (48 vCPU, 384 GiB) +- r5ad.16xlarge (64 vCPU, 512 GiB) +- r5ad.24xlarge (96 vCPU, 768 GiB) +- r5d.xlarge (4 vCPU, 32 GiB) +- r5d.2xlarge (8 vCPU, 64 GiB) +- r5d.4xlarge (16 vCPU, 128 GiB) +- r5d.8xlarge (32 vCPU, 256 GiB) +- r5d.12xlarge (48 vCPU, 384 GiB) +- r5d.16xlarge (64 vCPU, 512 GiB) +- r5d.24xlarge (96 vCPU, 768 GiB) +- r5n.xlarge (4 vCPU, 32 GiB) +- r5n.2xlarge (8 vCPU, 64 GiB) +- r5n.4xlarge (16 vCPU, 128 GiB) +- r5n.8xlarge (32 vCPU, 256 GiB) +- r5n.12xlarge (48 vCPU, 384 GiB) +- r5n.16xlarge (64 vCPU, 512 GiB) +- r5n.24xlarge (96 vCPU, 768 GiB) +- r5dn.xlarge (4 vCPU, 32 GiB) +- r5dn.2xlarge (8 vCPU, 64 GiB) +- r5dn.4xlarge (16 vCPU, 128 GiB) +- r5dn.8xlarge (32 vCPU, 256 GiB) +- r5dn.12xlarge (48 vCPU, 384 GiB) +- r5dn.16xlarge (64 vCPU, 512 GiB) +- r5dn.24xlarge (96 vCPU, 768 GiB) +- r6i.xlarge (4 vCPU, 32 GiB) +- r6i.2xlarge (8 vCPU, 64 GiB) +- r6i.4xlarge (16 vCPU, 128 GiB) +- r6i.8xlarge (32 vCPU, 256 GiB) +- r6i.12xlarge (48 vCPU, 384 GiB) +- r6i.16xlarge (64 vCPU, 512 GiB) +- r6i.24xlarge (96 vCPU, 768 GiB) +- r6i.32xlarge (128 vCPU, 1,024 GiB) +- z1d.xlarge (4 vCPU, 32 GiB) +- z1d.2xlarge (8 vCPU, 64 GiB) +- z1d.3xlarge (12 vCPU, 96 GiB) +- z1d.6xlarge (24 vCPU, 192 GiB) +- z1d.12xlarge (48 vCPU, 384 GiB) +==== + +.Compute-optimized compute types +[%collapsible] +==== +- c5.xlarge (4 vCPU, 8 GiB) +- c5.2xlarge (8 vCPU, 16 GiB) +- c5.4xlarge (16 vCPU, 32 GiB) +- c5.9xlarge (36 vCPU, 72 GiB) +- c5.12xlarge (48 vCPU, 96 GiB) +- c5.18xlarge (72 vCPU, 144 GiB) +- c5.24xlarge (96 vCPU, 192 GiB) +- c5d.xlarge (4 vCPU, 8 GiB) +- c5d.2xlarge (8 vCPU, 16 GiB) +- c5d.4xlarge (16 vCPU, 32 GiB) +- c5d.9xlarge (36 vCPU, 72 GiB) +- c5d.12xlarge (48 vCPU, 96 GiB) +- c5d.18xlarge (72 vCPU, 144 GiB) +- c5d.24xlarge (96 vCPU, 192 GiB) +- c5a.xlarge (4 vCPU, 8 GiB) +- c5a.2xlarge (8 vCPU, 16 GiB) +- c5a.4xlarge (16 vCPU, 32 GiB) +- c5a.8xlarge (32 vCPU, 64 GiB) +- c5a.12xlarge (48 vCPU, 96 GiB) +- c5a.16xlarge (64 vCPU, 128 GiB) +- c5a.24xlarge (96 vCPU, 192 GiB) +- c5ad.xlarge (4 vCPU, 8 GiB) +- c5ad.2xlarge (8 vCPU, 16 GiB) +- c5ad.4xlarge (16 vCPU, 32 GiB) +- c5ad.8xlarge (32 vCPU, 64 GiB) +- c5ad.12xlarge (48 vCPU, 96 GiB) +- c5ad.16xlarge (64 vCPU, 128 GiB) +- c5ad.24xlarge (96 vCPU, 192 GiB) +- c5n.xlarge (4 vCPU, 10.5 GiB) +- c5n.2xlarge (8 vCPU, 21 GiB) +- c5n.4xlarge (16 vCPU, 42 GiB) +- c5n.9xlarge (36 vCPU, 96 GiB) +- c5n.18xlarge (72 vCPU, 192 GiB) +- c6i.xlarge (4 vCPU, 8 GiB) +- c6i.2xlarge (8 vCPU, 16 GiB) +- c6i.4xlarge (16 vCPU, 32 GiB) +- c6i.8xlarge (32 vCPU, 64 GiB) +- c6i.12xlarge (48 vCPU, 96 GiB) +- c6i.16xlarge (64 vCPU, 128 GiB) +- c6i.24xlarge (96 vCPU, 192 GiB) +- c6i.32xlarge (128 vCPU, 256 GiB) +==== + +.Storage-optimized compute types +[%collapsible] +==== +- i3.xlarge (4 vCPU, 30.5 GiB) +- i3.2xlarge (8 vCPU, 61 GiB) +- i3.4xlarge (16 vCPU, 122 GiB) +- i3.8xlarge (32 vCPU, 244 GiB) +- i3.16xlarge (64 vCPU, 488 GiB) +- i3en.xlarge (4 vCPU, 32 GiB) +- i3en.2xlarge (8 vCPU, 64 GiB) +- i3en.3xlarge (12 vCPU, 96 GiB) +- i3en.6xlarge (24 vCPU, 192 GiB) +- i3en.12xlarge (48 vCPU, 384 GiB) +- i3en.24xlarge (96 vCPU, 768 GiB) +==== \ No newline at end of file diff --git a/modules/rosa-sdpolicy-am-billing.adoc b/modules/rosa-sdpolicy-am-billing.adoc new file mode 100644 index 0000000000..209c755c6d --- /dev/null +++ b/modules/rosa-sdpolicy-am-billing.adoc @@ -0,0 +1,11 @@ + +// Module included in the following assemblies: +// +// * assemblies/rosa-service-definition.adoc +:_content-type: CONCEPT +[id="rosa-sdpolicy-billing_{context}"] += Billing + +{product-title} is billed through Amazon Web Services (AWS) based on the usage of AWS components used by the service, such as load balancers, storage, EC2 instances, other components, and Red Hat subscriptions for the OpenShift service. + +Any additional Red Hat software must be purchased separately. \ No newline at end of file diff --git a/modules/rosa-sdpolicy-am-cluster-self-service.adoc b/modules/rosa-sdpolicy-am-cluster-self-service.adoc new file mode 100644 index 0000000000..1f43ec8363 --- /dev/null +++ b/modules/rosa-sdpolicy-am-cluster-self-service.adoc @@ -0,0 +1,19 @@ + +// Module included in the following assemblies: +// +// * assemblies/rosa-service-definition.adoc +:_content-type: CONCEPT +[id="rosa-sdpolicy-cluster-self-service_{context}"] += Cluster self-service + +Customers can self-service their clusters, including, but not limited to: + +* Create a cluster +* Delete a cluster +* Add or remove an identity provider +* Add or remove a user from an elevated group +* Configure cluster privacy +* Add or remove machine pools and configure autoscaling +* Define upgrade policies + +These tasks can be self-serviced using the `rosa` CLI utility. \ No newline at end of file diff --git a/modules/rosa-sdpolicy-am-compute.adoc b/modules/rosa-sdpolicy-am-compute.adoc new file mode 100644 index 0000000000..0a22cf4e42 --- /dev/null +++ b/modules/rosa-sdpolicy-am-compute.adoc @@ -0,0 +1,29 @@ + +// Module included in the following assemblies: +// +// * assemblies/rosa-service-definition.adoc +:_content-type: CONCEPT +[id="rosa-sdpolicy-compute_{context}"] += Compute + +Single availability zone clusters require a minimum of 3 control planes, 2 infrastructure nodes, and 2 worker nodes deployed to a single availability zone. + +Multiple availability zone clusters require a minimum of 3 control planes. 3 infrastructure nodes, and 3 worker nodes. Additional nodes must be purchased in multiples of three to maintain proper node distribution. + +All {product-title} clusters support a maximum of 180 worker nodes. + +[NOTE] +==== +The `Default` machine pool node type and size cannot be changed after the cluster is created. +==== + +Control plane and infrastructure nodes are deployed and managed by Red Hat. Shutting down the underlying infrastructure through the cloud provider console is unsupported and can lead to data loss. There are at least 3 control plane nodes that handle etcd- and API-related workloads. There are at least 2 infrastructure nodes that handle metrics, routing, the web console, and other workloads. You must not run any workloads on the control and infrastructure nodes. Any workloads you intend to run must be deployed on worker nodes. See the Red Hat Operator support section below for more information about Red Hat workloads that must be deployed on worker nodes. + +[NOTE] +==== +Approximately one vCPU core and 1 GiB of memory are reserved on each worker node and removed from allocatable resources. This reservation of resources is necessary to run processes required by the underlying platform. These processes include system daemons such as udev, kubelet, and container runtime among others. The reserved resources also account for kernel reservations. + +{OCP} core systems such as audit log aggregation, metrics collection, DNS, image registry, SDN, and others might consume additional allocatable resources to maintain the stability and maintainability of the cluster. The additional resources consumed might vary based on usage. + +For additional information, see the link:https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#system-reserved[Kubernetes documentation]. +==== \ No newline at end of file diff --git a/modules/rosa-sdpolicy-am-limited-support.adoc b/modules/rosa-sdpolicy-am-limited-support.adoc new file mode 100644 index 0000000000..89c6b7389e --- /dev/null +++ b/modules/rosa-sdpolicy-am-limited-support.adoc @@ -0,0 +1,19 @@ + +// Module included in the following assemblies: +// +// * assemblies/rosa-service-definition.adoc +:_content-type: CONCEPT +[id="rosa-limited-support_{context}"] += Limited support status + +When a cluster transitions to a _Limited Support_ status, Red Hat no longer proactively monitors the cluster, the SLA is no longer applicable, and credits requested against the SLA are denied. It does not mean that you no longer have product support. In some cases, the cluster can return to a fully-supported status if you remediate the violating factors. However, in other cases, you might have to delete and recreate the cluster. + +A cluster might move to a Limited Support status for many reasons, including the following scenarios: + +If you do not upgrade a cluster to a supported version before the end-of-life date:: Red Hat does not make any runtime or SLA guarantees for versions after their end-of-life date. To receive continued support, upgrade the cluster to a supported version prior to the end-of-life date. If you do not upgrade the cluster prior to the end-of-life date, the cluster transitions to a Limited Support status until it is upgraded to a supported version. ++ +Red Hat provides commercially reasonable support to upgrade from an unsupported version to a supported version. However, if a supported upgrade path is no longer available, you might have to create a new cluster and migrate your workloads. + +If you remove or replace any native {product-title} components or any other component that is installed and managed by Red Hat:: If cluster administrator permissions were used, Red Hat is not responsible for any of your or your authorized users’ actions, including those that affect infrastructure services, service availability, or data loss. If Red Hat detects any such actions, the cluster might transition to a Limited Support status. Red Hat notifies you of the status change and you should either revert the action or create a support case to explore remediation steps that might require you to delete and recreate the cluster. + +If you have questions about a specific action that might cause a cluster to move to a Limited Support status or need further assistance, open a support ticket. \ No newline at end of file diff --git a/modules/rosa-sdpolicy-am-regions-az.adoc b/modules/rosa-sdpolicy-am-regions-az.adoc new file mode 100644 index 0000000000..185c124f7f --- /dev/null +++ b/modules/rosa-sdpolicy-am-regions-az.adoc @@ -0,0 +1,39 @@ + +// Module included in the following assemblies: +// +// * assemblies/rosa-service-definition.adoc +:_content-type: CONCEPT +[id="rosa-sdpolicy-regions-az_{context}"] += Regions and availability zones +The following AWS regions are supported by Red Hat OpenShift 4 and are supported for {product-title}. Note: China and GovCloud (US) regions are not supported, regardless of their support on OpenShift 4. + +- af-south-1 (Cape Town, AWS opt-in required) +- ap-east-1 (Hong Kong, AWS opt-in required) +- ap-northeast-1 (Tokyo) +- ap-northeast-2 (Seoul) +- ap-northeast-3 (Osaka) +- ap-south-1 (Mumbai) +- ap-southeast-1 (Singapore) +- ap-southeast-2 (Sydney) +- ca-central-1 (Central Canada) +- eu-central-1 (Frankfurt) +- eu-north-1 (Stockholm) +- eu-south-1 (Milan, AWS opt-in required) +- eu-west-1 (Ireland) +- eu-west-2 (London) +- eu-west-3 (Paris) +- me-south-1 (Bahrain, AWS opt-in required) +- sa-east-1 (São Paulo) +- us-east-1 (N. Virginia) +- us-east-2 (Ohio) +- us-west-1 (N. California) +- us-west-2 (Oregon) + +Multiple availability zone clusters can only be deployed in regions with at least 3 availability zones. For more information, see the link:https://aws.amazon.com/about-aws/global-infrastructure/regions_az/[Regions and Availability Zones] section in the AWS documentation. + +Each new {product-title} cluster is installed within an installer-created or preexisting Virtual Private Cloud (VPC) in a single region, with the option to deploy into a single availability zone (Single-AZ) or across multiple availability zones (Multi-AZ). This provides cluster-level network and resource isolation, and enables cloud-provider VPC settings, such as VPN connections and VPC Peering. Persistent volumes (PVs) are backed by AWS Elastic Block Storage (EBS), and are specific to the availability zone in which they are provisioned. Persistent volume claims (PVCs) do not bind to a volume until the associated pod resource is assigned into a specific availability zone to prevent unschedulable pods. Availability zone-specific resources are only usable by resources in the same availability zone. + +[WARNING] +==== +The region and the choice of single or multiple availability zone cannot be changed after a cluster has been deployed. +==== \ No newline at end of file diff --git a/modules/rosa-sdpolicy-am-sla.adoc b/modules/rosa-sdpolicy-am-sla.adoc new file mode 100644 index 0000000000..67a15b72b5 --- /dev/null +++ b/modules/rosa-sdpolicy-am-sla.adoc @@ -0,0 +1,8 @@ + +// Module included in the following assemblies: +// +// * assemblies/rosa-service-definition.adoc +:_content-type: CONCEPT +[id="rosa-sdpolicy-sla_{context}"] += Service Level Agreement (SLA) +Any SLAs for the service itself are defined in Appendix 4 of the link:https://www.redhat.com/licenses/Appendix_4_Red_Hat_Online_Services_20210503.pdf[Red Hat Enterprise Agreement Appendix 4 (Online Subscription Services)]. \ No newline at end of file diff --git a/modules/rosa-sdpolicy-am-support.adoc b/modules/rosa-sdpolicy-am-support.adoc new file mode 100644 index 0000000000..403d1a529e --- /dev/null +++ b/modules/rosa-sdpolicy-am-support.adoc @@ -0,0 +1,12 @@ + +// Module included in the following assemblies: +// +// * assemblies/rosa-service-definition.adoc +:_content-type: CONCEPT +[id="rosa-sdpolicy-support_{context}"] += Support +{product-title} includes Red Hat Premium Support, which can be accessed by using the link:https://access.redhat.com/support?extIdCarryOver=true&sc_cid=701f2000001Css5AAC[Red Hat Customer Portal]. + +See {product-title} link:https://access.redhat.com/support/offerings/openshift/sla?extIdCarryOver=true&sc_cid=701f2000001Css5AAC[SLAs] for support response times. + +AWS support is subject to a customer's existing support contract with AWS. diff --git a/modules/rosa-sdpolicy-platform.adoc b/modules/rosa-sdpolicy-platform.adoc index b1c54af443..9061d0b264 100644 --- a/modules/rosa-sdpolicy-platform.adoc +++ b/modules/rosa-sdpolicy-platform.adoc @@ -103,6 +103,18 @@ See the link:https://docs.openshift.com/rosa/rosa_policy/rosa-life-cycle.html[{p == Operating system {product-title} runs on OpenShift 4 and uses Red Hat CoreOS as the operating system for all control plane and worker nodes. +[id="rosa-sdpolicy-red-hat-operator_{context}"] +== Red Hat Operator support +Red Hat workloads typically refer to Red Hat-provided Operators made available through Operator Hub. Red Hat workloads are not managed by the Red Hat SRE team, and must be deployed on worker nodes. These Operators may require additional Red Hat subscriptions, and may incur additional cloud infrastructure costs. Examples of these Red Hat-provided Operators are: + +* {rhq-short} +* Red Hat Advanced Cluster Management +* Red Hat Advanced Cluster Security +* {SMProductName} +* {ServerlessProductName} +* {logging-sd} +* {pipelines-title} + [id="rosa-sdpolicy-kubernetes-operator_{context}"] == Kubernetes Operator support All Operators listed in the Operator Hub marketplace should be available for installation. These operators are considered customer workloads, and are not monitored by Red Hat SRE. diff --git a/modules/sdpolicy-account-management.adoc b/modules/sdpolicy-account-management.adoc deleted file mode 100644 index d5c7dafabe..0000000000 --- a/modules/sdpolicy-account-management.adoc +++ /dev/null @@ -1,414 +0,0 @@ - -// Module included in the following assemblies: -// -// * assemblies/osd-service-definition.adoc - -[id="sdpolicy-account-management_{context}"] -= Account management - -[id="billing_{context}"] -== Billing -Each {product-title} cluster requires a minimum annual base cluster purchase and there are two billing options available for each cluster: Standard and Customer Cloud Subscription (CCS). - -Standard {product-title} clusters are deployed in to their own cloud infrastructure accounts, each owned by Red Hat. Red Hat is responsible for this account, and cloud infrastructure costs are paid directly by Red Hat. The customer only pays the Red Hat subscription costs. - -In the CCS model, the customer pays the cloud infrastructure provider directly for cloud costs and the cloud infrastructure account is part of a customer’s Organization, with specific access granted to Red Hat. In this model, the customer pays Red Hat for the CCS subscription and pays the cloud provider for the cloud costs. It is the customer's responsibility to pre-purchase or provide Reserved Instance (RI) compute instances to ensure lower cloud infrastructure costs. - -Additional resources can be purchased for an OpenShift Dedicated Cluster, including: - -* Additional nodes (can be different types and sizes through the use of machine pools) -* Middleware (JBoss EAP, JBoss Fuse, and so on) - additional pricing based on specific middleware component -* Additional storage in increments of 500 GB (standard only; 100 GB included) -* Additional 12 TiB Network I/O (standard only; 12 TB included) -* Load Balancers for Services are available in bundles of 4; enables non-HTTP/SNI traffic or non-standard ports (standard only) - -[id="cluster-self-service_{context}"] -== Cluster self-service - -Customers can create, scale, and delete their clusters from {cluster-manage-url}, provided that they have pre-purchased the necessary subscriptions. - -Actions available in {cluster-manager-first} must not be directly performed from within the cluster as this might cause adverse affects, including having all actions automatically reverted. - -[id="cloud-providers_{context}"] -== Cloud providers - -{product-title} offers OpenShift Container Platform clusters as a managed service on the following cloud providers: - -* Amazon Web Services (AWS) -* Google Cloud Platform (GCP) - -[id="compute_{context}"] -== Compute - -Single availability zone clusters require a minimum of 2 worker nodes for Customer Cloud Subscription (CCS) clusters deployed to a single availability zone. A minimum of 4 worker nodes is required for standard clusters. These 4 worker nodes are included in the base subscription. - -Multiple availability zone clusters require a minimum of 3 worker nodes for Customer Cloud Subscription (CCS) clusters, 1 deployed to each of 3 availability zones. A minimum of 9 worker nodes are required for standard clusters. These 9 worker nodes are included in the base subscription, and additional nodes must be purchased in multiples of 3 to maintain proper node distribution. - -Worker nodes must all be the same type and size within a single {product-title} cluster. - -[NOTE] -==== -The default machine pool node type and size cannot be changed after the cluster has been created. -==== - -Control and infrastructure nodes are also provided by Red Hat. There are at least 3 control planenodes that handle etcd and API-related workloads. There are at least 2 infrastructure nodes that handle metrics, routing, the web console, and other workloads. Control and infrastructure nodes are strictly for Red Hat workloads to operate the service, and customer workloads are not permitted to be deployed on these nodes. - -[NOTE] -==== -Approximately 1 vCPU core and 1 GiB of memory are reserved on each worker node and removed from allocatable resources. This is necessary to run link:https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#system-reserved[processes required by the underlying platform]. This includes system daemons such as udev, kubelet, container runtime, and so on, and also accounts for kernel reservations. {OCP} core systems such as audit log aggregation, metrics collection, DNS, image registry, SDN, and so on might consume additional allocatable resources to maintain the stability and maintainability of the cluster. The additional resources consumed might vary based on usage. -==== - -[id="aws-compute-types-ccs_{context}"] -== AWS compute types for Customer Cloud Subscription clusters - -{product-title} offers the following worker node types and sizes on AWS: - -.General purpose -[%collapsible] -==== -- m5.xlarge (4 vCPU, 16 GiB) -- m5.2xlarge (8 vCPU, 32 GiB) -- m5.4xlarge (16 vCPU, 64 GiB) -- m5.8xlarge (32 vCPU, 128 GiB) -- m5.12xlarge (48 vCPU, 192 GiB) -- m5.16xlarge (64 vCPU, 256 GiB) -- m5.24xlarge (96 vCPU, 384 GiB) -- m5d.xlarge (4 vCPU, 16 GiB) -- m5d.2xlarge (8 vCPU, 32 GiB) -- m5d.4xlarge (16 vCPU, 64 GiB) -- m5d.8xlarge (32 vCPU, 128 GiB) -- m5d.12xlarge (48 vCPU, 192 GiB) -- m5d.16xlarge (64 vCPU, 256 GiB) -- m5d.24xlarge (96 vCPU, 384 GiB) -- m5n.xlarge (4 vCPU, 16 GiB) -- m5n.2xlarge (8 vCPU, 32 GiB) -- m5n.4xlarge (16 vCPU, 64 GiB) -- m5n.8xlarge (32 vCPU, 128 GiB) -- m5n.12xlarge (48 vCPU, 192 GiB) -- m5n.16xlarge (64 vCPU, 256 GiB) -- m5n.24xlarge (96 vCPU, 384 GiB) -- m5dn.xlarge (4 vCPU, 16 GiB) -- m5dn.2xlarge (8 vCPU, 32 GiB) -- m5dn.4xlarge (16 vCPU, 64 GiB) -- m5dn.8xlarge (32 vCPU, 128 GiB) -- m5dn.12xlarge (48 vCPU, 192 GiB) -- m5dn.16xlarge (64 vCPU, 256 GiB) -- m5dn.24xlarge (96 vCPU, 384 GiB) -- m5zn.xlarge (4 vCPU, 16 GiB) -- m5zn.2xlarge (8 vCPU, 32 GiB) -- m5zn.3xlarge (12 vCPU, 48 GiB) -- m5zn.6xlarge (24 vCPU, 96 GiB) -- m5zn.12xlarge (48 vCPU, 192 GiB) -- m6i.xlarge (4 vCPU, 16 GiB) -- m6i.2xlarge (8 vCPU, 32 GiB) -- m6i.4xlarge (16 vCPU, 64 GiB) -- m6i.8xlarge (32 vCPU, 128 GiB) -- m6i.12xlarge (48 vCPU, 192 GiB) -- m6i.16xlarge (64 vCPU, 256 GiB) -- m6i.24xlarge (96 vCPU, 384 GiB) -- m6i.32xlarge (128 vCPU, 512 GiB) -==== - -.Burstable general purpose compute types -[%collapsible] -==== -- t3.xlarge (4 vCPU, 16 GiB) -- t3.2xlarge (8 vCPU, 32 GiB) -- t3a.xlarge (4 vCPU, 16 GiB) -- t3a.2xlarge (8 vCPU, 32 GiB) -==== - -.Memory-optimized -[%collapsible] -==== -- r4.xlarge (4 vCPU, 30.5 GiB) -- r4.2xlarge (8 vCPU, 61 GiB) -- r4.4xlarge (16 vCPU, 122 GiB) -- r4.8xlarge (32 vCPU, 244 GiB) -- r4.16xlarge (64 vCPU, 488 GiB) -- r5.xlarge (4 vCPU, 32 GiB) -- r5.2xlarge (8 vCPU, 64 GiB) -- r5.4xlarge (16 vCPU, 128 GiB) -- r5.8xlarge (32 vCPU, 256 GiB) -- r5.12xlarge (48 vCPU, 384 GiB) -- r5.16xlarge (64 vCPU, 512 GiB) -- r5.24xlarge (96 vCPU, 768 GiB) -- r5a.xlarge (4 vCPU, 32 GiB) -- r5a.2xlarge (8 vCPU, 64 GiB) -- r5a.4xlarge (16 vCPU, 128 GiB) -- r5a.8xlarge (32 vCPU, 256 GiB) -- r5a.12xlarge (48 vCPU, 384 GiB) -- r5a.16xlarge (64 vCPU, 512 GiB) -- r5a.24xlarge (96 vCPU, 768 GiB) -- r5ad.xlarge (4 vCPU, 32 GiB) -- r5ad.2xlarge (8 vCPU, 64 GiB) -- r5ad.4xlarge (16 vCPU, 128 GiB) -- r5ad.8xlarge (32 vCPU, 256 GiB) -- r5ad.12xlarge (48 vCPU, 384 GiB) -- r5ad.16xlarge (64 vCPU, 512 GiB) -- r5ad.24xlarge (96 vCPU, 768 GiB) -- r5d.xlarge (4 vCPU, 32 GiB) -- r5d.2xlarge (8 vCPU, 64 GiB) -- r5d.4xlarge (16 vCPU, 128 GiB) -- r5d.8xlarge (32 vCPU, 256 GiB) -- r5d.12xlarge (48 vCPU, 384 GiB) -- r5d.16xlarge (64 vCPU, 512 GiB) -- r5d.24xlarge (96 vCPU, 768 GiB) -- r5n.xlarge (4 vCPU, 32 GiB) -- r5n.2xlarge (8 vCPU, 64 GiB) -- r5n.4xlarge (16 vCPU, 128 GiB) -- r5n.8xlarge (32 vCPU, 256 GiB) -- r5n.12xlarge (48 vCPU, 384 GiB) -- r5n.16xlarge (64 vCPU, 512 GiB) -- r5n.24xlarge (96 vCPU, 768 GiB) -- r5dn.xlarge (4 vCPU, 32 GiB) -- r5dn.2xlarge (8 vCPU, 64 GiB) -- r5dn.4xlarge (16 vCPU, 128 GiB) -- r5dn.8xlarge (32 vCPU, 256 GiB) -- r5dn.12xlarge (48 vCPU, 384 GiB) -- r5dn.16xlarge (64 vCPU, 512 GiB) -- r5dn.24xlarge (96 vCPU, 768 GiB) -- r6i.xlarge (4 vCPU, 32 GiB) -- r6i.2xlarge (8 vCPU, 64 GiB) -- r6i.4xlarge (16 vCPU, 128 GiB) -- r6i.8xlarge (32 vCPU, 256 GiB) -- r6i.12xlarge (48 vCPU, 384 GiB) -- r6i.16xlarge (64 vCPU, 512 GiB) -- r6i.24xlarge (96 vCPU, 768 GiB) -- r6i.32xlarge (128 vCPU, 1,024 GiB) -- z1d.xlarge (4 vCPU, 32 GiB) -- z1d.2xlarge (8 vCPU, 64 GiB) -- z1d.3xlarge (12 vCPU, 96 GiB) -- z1d.6xlarge (24 vCPU, 192 GiB) -- z1d.12xlarge (48 vCPU, 384 GiB) -==== - -.Compute-optimized -[%collapsible] -==== -- c5.xlarge (4 vCPU, 8 GiB) -- c5.2xlarge (8 vCPU, 16 GiB) -- c5.4xlarge (16 vCPU, 32 GiB) -- c5.9xlarge (36 vCPU, 72 GiB) -- c5.12xlarge (48 vCPU, 96 GiB) -- c5.18xlarge (72 vCPU, 144 GiB) -- c5.24xlarge (96 vCPU, 192 GiB) -- c5d.xlarge (4 vCPU, 8 GiB) -- c5d.2xlarge (8 vCPU, 16 GiB) -- c5d.4xlarge (16 vCPU, 32 GiB) -- c5d.9xlarge (36 vCPU, 72 GiB) -- c5d.12xlarge (48 vCPU, 96 GiB) -- c5d.18xlarge (72 vCPU, 144 GiB) -- c5d.24xlarge (96 vCPU, 192 GiB) -- c5a.xlarge (4 vCPU, 8 GiB) -- c5a.2xlarge (8 vCPU, 16 GiB) -- c5a.4xlarge (16 vCPU, 32 GiB) -- c5a.8xlarge (32 vCPU, 64 GiB) -- c5a.12xlarge (48 vCPU, 96 GiB) -- c5a.16xlarge (64 vCPU, 128 GiB) -- c5a.24xlarge (96 vCPU, 192 GiB) -- c5ad.xlarge (4 vCPU, 8 GiB) -- c5ad.2xlarge (8 vCPU, 16 GiB) -- c5ad.4xlarge (16 vCPU, 32 GiB) -- c5ad.8xlarge (32 vCPU, 64 GiB) -- c5ad.12xlarge (48 vCPU, 96 GiB) -- c5ad.16xlarge (64 vCPU, 128 GiB) -- c5ad.24xlarge (96 vCPU, 192 GiB) -- c5n.xlarge (4 vCPU, 10.5 GiB) -- c5n.2xlarge (8 vCPU, 21 GiB) -- c5n.4xlarge (16 vCPU, 42 GiB) -- c5n.9xlarge (36 vCPU, 96 GiB) -- c5n.18xlarge (72 vCPU, 192 GiB) -- c6i.xlarge (4 vCPU, 8 GiB) -- c6i.2xlarge (8 vCPU, 16 GiB) -- c6i.4xlarge (16 vCPU, 32 GiB) -- c6i.8xlarge (32 vCPU, 64 GiB) -- c6i.12xlarge (48 vCPU, 96 GiB) -- c6i.16xlarge (64 vCPU, 128 GiB) -- c6i.24xlarge (96 vCPU, 192 GiB) -- c6i.32xlarge (128 vCPU, 256 GiB) -==== - -.Storage-optimized compute types -[%collapsible] -==== -- i3.xlarge (4 vCPU, 30.5 GiB) -- i3.2xlarge (8 vCPU, 61 GiB) -- i3.4xlarge (16 vCPU, 122 GiB) -- i3.8xlarge (32 vCPU, 244 GiB) -- i3.16xlarge (64 vCPU, 488 GiB) -- i3en.xlarge (4 vCPU, 32 GiB) -- i3en.2xlarge (8 vCPU, 64 GiB) -- i3en.3xlarge (12 vCPU, 96 GiB) -- i3en.6xlarge (24 vCPU, 192 GiB) -- i3en.12xlarge (48 vCPU, 384 GiB) -- i3en.24xlarge (96 vCPU, 768 GiB) -==== - -[id="aws-compute-types-non-ccs_{context}"] -== AWS compute types for standard clusters - -{product-title} offers the following worker node types and sizes on AWS: - -.General purpose -[%collapsible] -==== -- m5.xlarge (4 vCPU, 16 GiB) -- m5.2xlarge (8 vCPU, 32 GiB) -- m5.4xlarge (16 vCPU, 64 GiB) -- m5.8xlarge (32 vCPU, 128 GiB) -- m5.12xlarge (48 vCPU, 192 GiB) -- m5.16xlarge (64 vCPU, 256 GiB) -- m5.24xlarge (96 vCPU, 384 GiB) -==== - -.Memory-optimized -[%collapsible] -==== -- r5.xlarge (4 vCPU, 32 GiB) -- r5.2xlarge (8 vCPU, 64 GiB) -- r5.4xlarge (16 vCPU, 128 GiB) -- r5.8xlarge (32 vCPU, 256 GiB) -- r5.12xlarge (48 vCPU, 384 GiB) -- r5.16xlarge (64 vCPU, 512 GiB) -- r5.24xlarge (96 vCPU, 768 GiB) -==== - -.Compute-optimized -[%collapsible] -==== -- c5.2xlarge (8 vCPU, 16 GiB) -- c5.4xlarge (16 vCPU, 32 GiB) -- c5.9xlarge (36 vCPU, 72 GiB) -- c5.12xlarge (48 vCPU, 96 GiB) -- c5.18xlarge (72 vCPU, 144 GiB) -- c5.24xlarge (96 vCPU, 192 GiB) -==== - -[id="gcp-compute-types_{context}"] -== Google Cloud compute types - -{product-title} offers the following worker node types and sizes on Google Cloud that are chosen to have a common CPU and memory capacity that are the same as other cloud instance types: - -.General purpose -[%collapsible] -==== -* custom-4-16384 (4 vCPU, 16 GiB) -* custom-8-32768 (8 vCPU, 32 GiB) -* custom-16-65536 (16 vCPU, 64 GiB) -* custom-32-131072 (32 vCPU, 128 GiB) -* custom-48-196608 (48 vCPU, 192 GiB) -* custom-64-262144 (64 vCPU, 256 GiB) -* custom-96-393216 (96 vCPU, 384 GiB) -==== - -.Memory-optimized -[%collapsible] -==== -* custom-4-32768-ext (4 vCPU, 32 GiB) -* custom-8-65536-ext (8 vCPU, 64 GiB) -* custom-16-131072-ext (16 vCPU, 128 GiB) -* custom-32-262144 (32 vCPU, 256 GiB) -* custom-48-393216 (48 vCPU, 384 GiB) -* custom-64-524288 (64 vCPU, 512 GiB) -* custom-96-786432 (96 vCPU, 768 GiB) -==== - -.Compute-optimized -[%collapsible] -==== -* custom-8-16384 (8 vCPU, 16 GiB) -* custom-16-32768 (16 vCPU, 32 GiB) -* custom-36-73728 (36 vCPU, 72 GiB) -* custom-48-98304 (48 vCPU, 96 GiB) -* custom-72-147456 (72 vCPU, 144 GiB) -* custom-96-196608 (96 vCPU, 192 GiB) -==== - -[id="regions-availability-zones_{context}"] -== Regions and availability zones -The following AWS regions are supported by {OCP} 4 and are supported for {product-title}: - -* af-south-1 (Cape Town, AWS opt-in required) -* ap-east-1 (Hong Kong, AWS opt-in required) -* ap-northeast-1 (Tokyo) -* ap-northeast-2 (Seoul) -* ap-northeast-3 (Osaka) -* ap-south-1 (Mumbai) -* ap-southeast-1 (Singapore) -* ap-southeast-2 (Sydney) -* ca-central-1 (Central Canada) -* eu-central-1 (Frankfurt) -* eu-north-1 (Stockholm) -* eu-south-1 (Milan, AWS opt-in required) -* eu-west-1 (Ireland) -* eu-west-2 (London) -* eu-west-3 (Paris) -* me-south-1 (Bahrain, AWS opt-in required) -* sa-east-1 (São Paulo) -* us-east-1 (N. Virginia) -* us-east-2 (Ohio) -* us-west-1 (N. California) -* us-west-2 (Oregon) - -The following Google Cloud regions are currently supported: - -* asia-east1, Changhua County, Taiwan -* asia-east2, Hong Kong -* asia-northeast1, Tokyo, Japan -* asia-northeast2, Osaka, Japan -* asia-northeast3, Seoul, Korea -* asia-south1, Mumbai, India -* asia-southeast1, Jurong West, Singapore -* asia-southeast2, Jakarta, Indonesia -* europe-north1, Hamina, Finland -* europe-west1, St. Ghislain, Belgium -* europe-west2, London, England, UK -* europe-west3, Frankfurt, Germany -* europe-west4, Eemshaven, Netherlands -* europe-west6, Zürich, Switzerland -* northamerica-northeast1, Montréal, Québec, Canada -* southamerica-east1, Osasco (São Paulo), Brazil -* us-central1, Council Bluffs, Iowa, USA -* us-east1, Moncks Corner, South Carolina, USA -* us-east4, Ashburn, Northern Virginia, USA -* us-west1, The Dalles, Oregon, USA -* us-west2, Los Angeles, California, USA -* us-west3, Salt Lake City, Utah, USA -* us-west4, Las Vegas, Nevada, USA - -Multi-AZ clusters can only be deployed in regions with at least 3 availability zones (see link:https://aws.amazon.com/about-aws/global-infrastructure/regions_az/[AWS] and link:https://cloud.google.com/compute/docs/regions-zones[Google Cloud]). - -Each new {product-title} cluster is installed within a dedicated Virtual Private Cloud (VPC) in a single Region, with the option to deploy into a single Availability Zone (Single-AZ) or across multiple Availability Zones (Multi-AZ). This provides cluster-level network and resource isolation, and enables cloud-provider VPC settings, such as VPN connections and VPC Peering. Persistent volumes are backed by cloud block storage and are specific to the availability zone in which they are provisioned. Persistent volumes do not bind to a volume until the associated pod resource is assigned into a specific availability zone in order to prevent unschedulable pods. Availability zone-specific resources are only usable by resources in the same availability zone. - -[WARNING] -==== -The region and the choice of single or multi availability zone cannot be changed once a cluster has been deployed. -==== - -[id="sla_{context}"] -== Service level agreement (SLA) -Any SLAs for the service itself are defined in Appendix 4 of the link:https://www.redhat.com/en/about/agreements[Red Hat Enterprise Agreement Appendix 4 (Online Subscription Services)]. - -[id="limited-support_{context}"] -== Limited support status - -When a cluster transitions to a _Limited Support_ status, Red Hat no longer proactively monitors the cluster, the SLA is no longer applicable, and credits requested against the SLA are denied. It does not mean that you no longer have product support. In some cases, the cluster can return to a fully-supported status if you remediate the violating factors. However, in other cases, you might have to delete and recreate the cluster. - -A cluster might transition to a Limited Support status for many reasons, including the following scenarios: - -If you do not upgrade a cluster to a supported version before the end-of-life date:: Red Hat does not make any runtime or SLA guarantees for versions after their end-of-life date. To receive continued support, upgrade the cluster to a supported version prior to the end-of-life date. If you do not upgrade the cluster prior to the end-of-life date, the cluster transitions to a Limited Support status until it is upgraded to a supported version. -+ -Red Hat provides commercially reasonable support to upgrade from an unsupported version to a supported version. However, if a supported upgrade path is no longer available, you might have to create a new cluster and migrate your workloads. - -If you remove or replace any native {product-title} components or any other component that is installed and managed by Red Hat:: If cluster administrator permissions were used, Red Hat is not responsible for any of your or your authorized users’ actions, including those that affect infrastructure services, service availability, or data loss. If Red Hat detects any such actions, the cluster might transition to a Limited Support status. Red Hat notifies you of the status change and you should either revert the action or create a support case to explore remediation steps that might require you to delete and recreate the cluster. - -If you have questions about a specific action that might cause a cluster to transition to a Limited Support status or need further assistance, open a support ticket. - -[id="support_{context}"] -== Support -{product-title} includes Red Hat Premium Support, which can be accessed by using the link:https://access.redhat.com/support?extIdCarryOver=true&sc_cid=701f2000001Css5AAC[Red Hat Customer Portal]. - -See the link:https://access.redhat.com/support/offerings/production/soc[Scope of Coverage Page] for link:https://access.redhat.com/support/offerings/production/scope_moredetail[more details] on what is covered with included support for {product-title}. - -See {product-title} link:https://access.redhat.com/support/offerings/openshift/sla?extIdCarryOver=true&sc_cid=701f2000001Css5AAC[SLAs] for support response times. diff --git a/modules/sdpolicy-am-aws-compute-types-ccs.adoc b/modules/sdpolicy-am-aws-compute-types-ccs.adoc new file mode 100644 index 0000000000..ebbeb4cfdb --- /dev/null +++ b/modules/sdpolicy-am-aws-compute-types-ccs.adoc @@ -0,0 +1,191 @@ + +// Module included in the following assemblies: +// +// * assemblies/osd-service-definition.adoc +:_content-type: CONCEPT +[id="aws-compute-types-ccs_{context}"] += AWS compute types for Customer Cloud Subscription clusters + +{product-title} offers the following worker node types and sizes on AWS: + +.General purpose +[%collapsible] +==== +- m5.xlarge (4 vCPU, 16 GiB) +- m5.2xlarge (8 vCPU, 32 GiB) +- m5.4xlarge (16 vCPU, 64 GiB) +- m5.8xlarge (32 vCPU, 128 GiB) +- m5.12xlarge (48 vCPU, 192 GiB) +- m5.16xlarge (64 vCPU, 256 GiB) +- m5.24xlarge (96 vCPU, 384 GiB) +- m5d.xlarge (4 vCPU, 16 GiB) +- m5d.2xlarge (8 vCPU, 32 GiB) +- m5d.4xlarge (16 vCPU, 64 GiB) +- m5d.8xlarge (32 vCPU, 128 GiB) +- m5d.12xlarge (48 vCPU, 192 GiB) +- m5d.16xlarge (64 vCPU, 256 GiB) +- m5d.24xlarge (96 vCPU, 384 GiB) +- m5n.xlarge (4 vCPU, 16 GiB) +- m5n.2xlarge (8 vCPU, 32 GiB) +- m5n.4xlarge (16 vCPU, 64 GiB) +- m5n.8xlarge (32 vCPU, 128 GiB) +- m5n.12xlarge (48 vCPU, 192 GiB) +- m5n.16xlarge (64 vCPU, 256 GiB) +- m5n.24xlarge (96 vCPU, 384 GiB) +- m5dn.xlarge (4 vCPU, 16 GiB) +- m5dn.2xlarge (8 vCPU, 32 GiB) +- m5dn.4xlarge (16 vCPU, 64 GiB) +- m5dn.8xlarge (32 vCPU, 128 GiB) +- m5dn.12xlarge (48 vCPU, 192 GiB) +- m5dn.16xlarge (64 vCPU, 256 GiB) +- m5dn.24xlarge (96 vCPU, 384 GiB) +- m5zn.xlarge (4 vCPU, 16 GiB) +- m5zn.2xlarge (8 vCPU, 32 GiB) +- m5zn.3xlarge (12 vCPU, 48 GiB) +- m5zn.6xlarge (24 vCPU, 96 GiB) +- m5zn.12xlarge (48 vCPU, 192 GiB) +- m6i.xlarge (4 vCPU, 16 GiB) +- m6i.2xlarge (8 vCPU, 32 GiB) +- m6i.4xlarge (16 vCPU, 64 GiB) +- m6i.8xlarge (32 vCPU, 128 GiB) +- m6i.12xlarge (48 vCPU, 192 GiB) +- m6i.16xlarge (64 vCPU, 256 GiB) +- m6i.24xlarge (96 vCPU, 384 GiB) +- m6i.32xlarge (128 vCPU, 512 GiB) +==== + +.Burstable general purpose compute types +[%collapsible] +==== +- t3.xlarge (4 vCPU, 16 GiB) +- t3.2xlarge (8 vCPU, 32 GiB) +- t3a.xlarge (4 vCPU, 16 GiB) +- t3a.2xlarge (8 vCPU, 32 GiB) +==== + +.Memory-optimized +[%collapsible] +==== +- r4.xlarge (4 vCPU, 30.5 GiB) +- r4.2xlarge (8 vCPU, 61 GiB) +- r4.4xlarge (16 vCPU, 122 GiB) +- r4.8xlarge (32 vCPU, 244 GiB) +- r4.16xlarge (64 vCPU, 488 GiB) +- r5.xlarge (4 vCPU, 32 GiB) +- r5.2xlarge (8 vCPU, 64 GiB) +- r5.4xlarge (16 vCPU, 128 GiB) +- r5.8xlarge (32 vCPU, 256 GiB) +- r5.12xlarge (48 vCPU, 384 GiB) +- r5.16xlarge (64 vCPU, 512 GiB) +- r5.24xlarge (96 vCPU, 768 GiB) +- r5a.xlarge (4 vCPU, 32 GiB) +- r5a.2xlarge (8 vCPU, 64 GiB) +- r5a.4xlarge (16 vCPU, 128 GiB) +- r5a.8xlarge (32 vCPU, 256 GiB) +- r5a.12xlarge (48 vCPU, 384 GiB) +- r5a.16xlarge (64 vCPU, 512 GiB) +- r5a.24xlarge (96 vCPU, 768 GiB) +- r5ad.xlarge (4 vCPU, 32 GiB) +- r5ad.2xlarge (8 vCPU, 64 GiB) +- r5ad.4xlarge (16 vCPU, 128 GiB) +- r5ad.8xlarge (32 vCPU, 256 GiB) +- r5ad.12xlarge (48 vCPU, 384 GiB) +- r5ad.16xlarge (64 vCPU, 512 GiB) +- r5ad.24xlarge (96 vCPU, 768 GiB) +- r5d.xlarge (4 vCPU, 32 GiB) +- r5d.2xlarge (8 vCPU, 64 GiB) +- r5d.4xlarge (16 vCPU, 128 GiB) +- r5d.8xlarge (32 vCPU, 256 GiB) +- r5d.12xlarge (48 vCPU, 384 GiB) +- r5d.16xlarge (64 vCPU, 512 GiB) +- r5d.24xlarge (96 vCPU, 768 GiB) +- r5n.xlarge (4 vCPU, 32 GiB) +- r5n.2xlarge (8 vCPU, 64 GiB) +- r5n.4xlarge (16 vCPU, 128 GiB) +- r5n.8xlarge (32 vCPU, 256 GiB) +- r5n.12xlarge (48 vCPU, 384 GiB) +- r5n.16xlarge (64 vCPU, 512 GiB) +- r5n.24xlarge (96 vCPU, 768 GiB) +- r5dn.xlarge (4 vCPU, 32 GiB) +- r5dn.2xlarge (8 vCPU, 64 GiB) +- r5dn.4xlarge (16 vCPU, 128 GiB) +- r5dn.8xlarge (32 vCPU, 256 GiB) +- r5dn.12xlarge (48 vCPU, 384 GiB) +- r5dn.16xlarge (64 vCPU, 512 GiB) +- r5dn.24xlarge (96 vCPU, 768 GiB) +- r6i.xlarge (4 vCPU, 32 GiB) +- r6i.2xlarge (8 vCPU, 64 GiB) +- r6i.4xlarge (16 vCPU, 128 GiB) +- r6i.8xlarge (32 vCPU, 256 GiB) +- r6i.12xlarge (48 vCPU, 384 GiB) +- r6i.16xlarge (64 vCPU, 512 GiB) +- r6i.24xlarge (96 vCPU, 768 GiB) +- r6i.32xlarge (128 vCPU, 1,024 GiB) +- z1d.xlarge (4 vCPU, 32 GiB) +- z1d.2xlarge (8 vCPU, 64 GiB) +- z1d.3xlarge (12 vCPU, 96 GiB) +- z1d.6xlarge (24 vCPU, 192 GiB) +- z1d.12xlarge (48 vCPU, 384 GiB) +==== + +.Compute-optimized +[%collapsible] +==== +- c5.xlarge (4 vCPU, 8 GiB) +- c5.2xlarge (8 vCPU, 16 GiB) +- c5.4xlarge (16 vCPU, 32 GiB) +- c5.9xlarge (36 vCPU, 72 GiB) +- c5.12xlarge (48 vCPU, 96 GiB) +- c5.18xlarge (72 vCPU, 144 GiB) +- c5.24xlarge (96 vCPU, 192 GiB) +- c5d.xlarge (4 vCPU, 8 GiB) +- c5d.2xlarge (8 vCPU, 16 GiB) +- c5d.4xlarge (16 vCPU, 32 GiB) +- c5d.9xlarge (36 vCPU, 72 GiB) +- c5d.12xlarge (48 vCPU, 96 GiB) +- c5d.18xlarge (72 vCPU, 144 GiB) +- c5d.24xlarge (96 vCPU, 192 GiB) +- c5a.xlarge (4 vCPU, 8 GiB) +- c5a.2xlarge (8 vCPU, 16 GiB) +- c5a.4xlarge (16 vCPU, 32 GiB) +- c5a.8xlarge (32 vCPU, 64 GiB) +- c5a.12xlarge (48 vCPU, 96 GiB) +- c5a.16xlarge (64 vCPU, 128 GiB) +- c5a.24xlarge (96 vCPU, 192 GiB) +- c5ad.xlarge (4 vCPU, 8 GiB) +- c5ad.2xlarge (8 vCPU, 16 GiB) +- c5ad.4xlarge (16 vCPU, 32 GiB) +- c5ad.8xlarge (32 vCPU, 64 GiB) +- c5ad.12xlarge (48 vCPU, 96 GiB) +- c5ad.16xlarge (64 vCPU, 128 GiB) +- c5ad.24xlarge (96 vCPU, 192 GiB) +- c5n.xlarge (4 vCPU, 10.5 GiB) +- c5n.2xlarge (8 vCPU, 21 GiB) +- c5n.4xlarge (16 vCPU, 42 GiB) +- c5n.9xlarge (36 vCPU, 96 GiB) +- c5n.18xlarge (72 vCPU, 192 GiB) +- c6i.xlarge (4 vCPU, 8 GiB) +- c6i.2xlarge (8 vCPU, 16 GiB) +- c6i.4xlarge (16 vCPU, 32 GiB) +- c6i.8xlarge (32 vCPU, 64 GiB) +- c6i.12xlarge (48 vCPU, 96 GiB) +- c6i.16xlarge (64 vCPU, 128 GiB) +- c6i.24xlarge (96 vCPU, 192 GiB) +- c6i.32xlarge (128 vCPU, 256 GiB) +==== + +.Storage-optimized compute types +[%collapsible] +==== +- i3.xlarge (4 vCPU, 30.5 GiB) +- i3.2xlarge (8 vCPU, 61 GiB) +- i3.4xlarge (16 vCPU, 122 GiB) +- i3.8xlarge (32 vCPU, 244 GiB) +- i3.16xlarge (64 vCPU, 488 GiB) +- i3en.xlarge (4 vCPU, 32 GiB) +- i3en.2xlarge (8 vCPU, 64 GiB) +- i3en.3xlarge (12 vCPU, 96 GiB) +- i3en.6xlarge (24 vCPU, 192 GiB) +- i3en.12xlarge (48 vCPU, 384 GiB) +- i3en.24xlarge (96 vCPU, 768 GiB) +==== \ No newline at end of file diff --git a/modules/sdpolicy-am-aws-compute-types-non-ccs.adoc b/modules/sdpolicy-am-aws-compute-types-non-ccs.adoc new file mode 100644 index 0000000000..ce9ce6285b --- /dev/null +++ b/modules/sdpolicy-am-aws-compute-types-non-ccs.adoc @@ -0,0 +1,32 @@ + +// Module included in the following assemblies: +// +// * assemblies/osd-service-definition.adoc +:_content-type: CONCEPT +[id="aws-compute-types-non-ccs_{context}"] += AWS compute types for standard clusters + +{product-title} offers the following worker node types and sizes on AWS: + +.General purpose +[%collapsible] +==== +- m5.xlarge (4 vCPU, 16 GiB) +- m5.2xlarge (8 vCPU, 32 GiB) +- m5.4xlarge (16 vCPU, 64 GiB) +==== + +.Memory-optimized +[%collapsible] +==== +- r5.xlarge (4 vCPU, 32 GiB) +- r5.2xlarge (8 vCPU, 64 GiB) +- r5.4xlarge (16 vCPU, 128 GiB) +==== + +.Compute-optimized +[%collapsible] +==== +- c5.2xlarge (8 vCPU, 16 GiB) +- c5.4xlarge (16 vCPU, 32 GiB) +==== \ No newline at end of file diff --git a/modules/sdpolicy-am-billing.adoc b/modules/sdpolicy-am-billing.adoc new file mode 100644 index 0000000000..3c6b5caa08 --- /dev/null +++ b/modules/sdpolicy-am-billing.adoc @@ -0,0 +1,20 @@ + +// Module included in the following assemblies: +// +// * assemblies/osd-service-definition.adoc +:_content-type: CONCEPT +[id="billing_{context}"] += Billing +Each {product-title} cluster requires a minimum annual base cluster purchase and there are two billing options available for each cluster: Standard and Customer Cloud Subscription (CCS). + +Standard {product-title} clusters are deployed in to their own cloud infrastructure accounts, each owned by Red Hat. Red Hat is responsible for this account, and cloud infrastructure costs are paid directly by Red Hat. The customer only pays the Red Hat subscription costs. + +In the CCS model, the customer pays the cloud infrastructure provider directly for cloud costs and the cloud infrastructure account is part of a customer’s Organization, with specific access granted to Red Hat. In this model, the customer pays Red Hat for the CCS subscription and pays the cloud provider for the cloud costs. It is the customer's responsibility to pre-purchase or provide Reserved Instance (RI) compute instances to ensure lower cloud infrastructure costs. + +Additional resources can be purchased for an OpenShift Dedicated Cluster, including: + +* Additional nodes (can be different types and sizes through the use of machine pools) +* Middleware (JBoss EAP, JBoss Fuse, and so on) - additional pricing based on specific middleware component +* Additional storage in increments of 500 GB (standard only; 100 GB included) +* Additional 12 TiB Network I/O (standard only; 12 TB included) +* Load Balancers for Services are available in bundles of 4; enables non-HTTP/SNI traffic or non-standard ports (standard only) \ No newline at end of file diff --git a/modules/sdpolicy-am-cloud-providers.adoc b/modules/sdpolicy-am-cloud-providers.adoc new file mode 100644 index 0000000000..3f8a3369af --- /dev/null +++ b/modules/sdpolicy-am-cloud-providers.adoc @@ -0,0 +1,12 @@ + +// Module included in the following assemblies: +// +// * assemblies/osd-service-definition.adoc +:_content-type: CONCEPT +[id="cloud-providers_{context}"] += Cloud providers + +{product-title} offers OpenShift Container Platform clusters as a managed service on the following cloud providers: + +* Amazon Web Services (AWS) +* Google Cloud Platform (GCP) \ No newline at end of file diff --git a/modules/sdpolicy-am-cluster-self-service.adoc b/modules/sdpolicy-am-cluster-self-service.adoc new file mode 100644 index 0000000000..e3fbcc802b --- /dev/null +++ b/modules/sdpolicy-am-cluster-self-service.adoc @@ -0,0 +1,11 @@ + +// Module included in the following assemblies: +// +// * assemblies/osd-service-definition.adoc +:_content-type: CONCEPT +[id="cluster-self-service_{context}"] += Cluster self-service + +Customers can create, scale, and delete their clusters from {cluster-manage-url}, provided that they have pre-purchased the necessary subscriptions. + +Actions available in {cluster-manager-first} must not be directly performed from within the cluster as this might cause adverse affects, including having all actions automatically reverted. \ No newline at end of file diff --git a/modules/sdpolicy-am-compute.adoc b/modules/sdpolicy-am-compute.adoc new file mode 100644 index 0000000000..d1891ca737 --- /dev/null +++ b/modules/sdpolicy-am-compute.adoc @@ -0,0 +1,25 @@ + +// Module included in the following assemblies: +// +// * assemblies/osd-service-definition.adoc +:_content-type: CONCEPT +[id="compute_{context}"] += Compute + +Single availability zone clusters require a minimum of 2 worker nodes for Customer Cloud Subscription (CCS) clusters deployed to a single availability zone. A minimum of 4 worker nodes is required for standard clusters. These 4 worker nodes are included in the base subscription. + +Multiple availability zone clusters require a minimum of 3 worker nodes for Customer Cloud Subscription (CCS) clusters, 1 deployed to each of 3 availability zones. A minimum of 9 worker nodes are required for standard clusters. These 9 worker nodes are included in the base subscription, and additional nodes must be purchased in multiples of 3 to maintain proper node distribution. + +Worker nodes must all be the same type and size within a single {product-title} cluster. + +[NOTE] +==== +The default machine pool node type and size cannot be changed after the cluster has been created. +==== + +Control plane and infrastructure nodes are also provided by Red Hat. There are at least 3 control plane nodes that handle etcd and API-related workloads. There are at least 2 infrastructure nodes that handle metrics, routing, the web console, and other workloads. You must not run any workloads on the control plane and infrastructure nodes. Any workloads you intend to run must be deployed on worker nodes. See the Red Hat Operator support section below for more information about Red Hat workloads that must be deployed on worker nodes. + +[NOTE] +==== +Approximately 1 vCPU core and 1 GiB of memory are reserved on each worker node and removed from allocatable resources. This is necessary to run link:https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#system-reserved[processes required by the underlying platform]. This includes system daemons such as udev, kubelet, container runtime, and so on, and also accounts for kernel reservations. {OCP} core systems such as audit log aggregation, metrics collection, DNS, image registry, SDN, and so on might consume additional allocatable resources to maintain the stability and maintainability of the cluster. The additional resources consumed might vary based on usage. +==== \ No newline at end of file diff --git a/modules/sdpolicy-am-gcp-compute-types.adoc b/modules/sdpolicy-am-gcp-compute-types.adoc new file mode 100644 index 0000000000..e798bf396e --- /dev/null +++ b/modules/sdpolicy-am-gcp-compute-types.adoc @@ -0,0 +1,32 @@ + +// Module included in the following assemblies: +// +// * assemblies/osd-service-definition.adoc +:_content-type: CONCEPT +[id="gcp-compute-types_{context}"] += Google Cloud compute types + +{product-title} offers the following worker node types and sizes on Google Cloud that are chosen to have a common CPU and memory capacity that are the same as other cloud instance types: + +.General purpose +[%collapsible] +==== +* custom-4-16384 (4 vCPU, 16 GiB) +* custom-8-32768 (8 vCPU, 32 GiB) +* custom-16-65536 (16 vCPU, 64 GiB) +==== + +.Memory-optimized +[%collapsible] +==== +* custom-4-32768-ext (4 vCPU, 32 GiB) +* custom-8-65536-ext (8 vCPU, 64 GiB) +* custom-16-131072-ext (16 vCPU, 128 GiB) +==== + +.Compute-optimized +[%collapsible] +==== +* custom-8-16384 (8 vCPU, 16 GiB) +* custom-16-32768 (16 vCPU, 32 GiB) +==== \ No newline at end of file diff --git a/modules/sdpolicy-am-limited-support.adoc b/modules/sdpolicy-am-limited-support.adoc new file mode 100644 index 0000000000..0ac264ac5d --- /dev/null +++ b/modules/sdpolicy-am-limited-support.adoc @@ -0,0 +1,19 @@ + +// Module included in the following assemblies: +// +// * assemblies/osd-service-definition.adoc +:_content-type: CONCEPT +[id="limited-support_{context}"] += Limited support status + +When a cluster transitions to a _Limited Support_ status, Red Hat no longer proactively monitors the cluster, the SLA is no longer applicable, and credits requested against the SLA are denied. It does not mean that you no longer have product support. In some cases, the cluster can return to a fully-supported status if you remediate the violating factors. However, in other cases, you might have to delete and recreate the cluster. + +A cluster might transition to a Limited Support status for many reasons, including the following scenarios: + +If you do not upgrade a cluster to a supported version before the end-of-life date:: Red Hat does not make any runtime or SLA guarantees for versions after their end-of-life date. To receive continued support, upgrade the cluster to a supported version prior to the end-of-life date. If you do not upgrade the cluster prior to the end-of-life date, the cluster transitions to a Limited Support status until it is upgraded to a supported version. ++ +Red Hat provides commercially reasonable support to upgrade from an unsupported version to a supported version. However, if a supported upgrade path is no longer available, you might have to create a new cluster and migrate your workloads. + +If you remove or replace any native {product-title} components or any other component that is installed and managed by Red Hat:: If cluster administrator permissions were used, Red Hat is not responsible for any of your or your authorized users’ actions, including those that affect infrastructure services, service availability, or data loss. If Red Hat detects any such actions, the cluster might transition to a Limited Support status. Red Hat notifies you of the status change and you should either revert the action or create a support case to explore remediation steps that might require you to delete and recreate the cluster. + +If you have questions about a specific action that might cause a cluster to transition to a Limited Support status or need further assistance, open a support ticket. \ No newline at end of file diff --git a/modules/sdpolicy-am-regions-availability-zones.adoc b/modules/sdpolicy-am-regions-availability-zones.adoc new file mode 100644 index 0000000000..52a4a13b5c --- /dev/null +++ b/modules/sdpolicy-am-regions-availability-zones.adoc @@ -0,0 +1,65 @@ + +// Module included in the following assemblies: +// +// * assemblies/osd-service-definition.adoc +:_content-type: CONCEPT +[id="regions-availability-zones_{context}"] += Regions and availability zones +The following AWS regions are supported by {OCP} 4 and are supported for {product-title}: + +* af-south-1 (Cape Town, AWS opt-in required) +* ap-east-1 (Hong Kong, AWS opt-in required) +* ap-northeast-1 (Tokyo) +* ap-northeast-2 (Seoul) +* ap-northeast-3 (Osaka) +* ap-south-1 (Mumbai) +* ap-southeast-1 (Singapore) +* ap-southeast-2 (Sydney) +* ca-central-1 (Central Canada) +* eu-central-1 (Frankfurt) +* eu-north-1 (Stockholm) +* eu-south-1 (Milan, AWS opt-in required) +* eu-west-1 (Ireland) +* eu-west-2 (London) +* eu-west-3 (Paris) +* me-south-1 (Bahrain, AWS opt-in required) +* sa-east-1 (São Paulo) +* us-east-1 (N. Virginia) +* us-east-2 (Ohio) +* us-west-1 (N. California) +* us-west-2 (Oregon) + +The following Google Cloud regions are currently supported: + +* asia-east1, Changhua County, Taiwan +* asia-east2, Hong Kong +* asia-northeast1, Tokyo, Japan +* asia-northeast2, Osaka, Japan +* asia-northeast3, Seoul, Korea +* asia-south1, Mumbai, India +* asia-southeast1, Jurong West, Singapore +* asia-southeast2, Jakarta, Indonesia +* europe-north1, Hamina, Finland +* europe-west1, St. Ghislain, Belgium +* europe-west2, London, England, UK +* europe-west3, Frankfurt, Germany +* europe-west4, Eemshaven, Netherlands +* europe-west6, Zürich, Switzerland +* northamerica-northeast1, Montréal, Québec, Canada +* southamerica-east1, Osasco (São Paulo), Brazil +* us-central1, Council Bluffs, Iowa, USA +* us-east1, Moncks Corner, South Carolina, USA +* us-east4, Ashburn, Northern Virginia, USA +* us-west1, The Dalles, Oregon, USA +* us-west2, Los Angeles, California, USA +* us-west3, Salt Lake City, Utah, USA +* us-west4, Las Vegas, Nevada, USA + +Multi-AZ clusters can only be deployed in regions with at least 3 availability zones (see link:https://aws.amazon.com/about-aws/global-infrastructure/regions_az/[AWS] and link:https://cloud.google.com/compute/docs/regions-zones[Google Cloud]). + +Each new {product-title} cluster is installed within a dedicated Virtual Private Cloud (VPC) in a single Region, with the option to deploy into a single Availability Zone (Single-AZ) or across multiple Availability Zones (Multi-AZ). This provides cluster-level network and resource isolation, and enables cloud-provider VPC settings, such as VPN connections and VPC Peering. Persistent volumes are backed by cloud block storage and are specific to the availability zone in which they are provisioned. Persistent volumes do not bind to a volume until the associated pod resource is assigned into a specific availability zone in order to prevent unschedulable pods. Availability zone-specific resources are only usable by resources in the same availability zone. + +[WARNING] +==== +The region and the choice of single or multi availability zone cannot be changed once a cluster has been deployed. +==== \ No newline at end of file diff --git a/modules/sdpolicy-am-sla.adoc b/modules/sdpolicy-am-sla.adoc new file mode 100644 index 0000000000..99cd6a8b8e --- /dev/null +++ b/modules/sdpolicy-am-sla.adoc @@ -0,0 +1,8 @@ + +// Module included in the following assemblies: +// +// * assemblies/osd-service-definition.adoc +:_content-type: CONCEPT +[id="sla_{context}"] += Service level agreement (SLA) +Any SLAs for the service itself are defined in Appendix 4 of the link:https://www.redhat.com/en/about/agreements[Red Hat Enterprise Agreement Appendix 4 (Online Subscription Services)]. \ No newline at end of file diff --git a/modules/sdpolicy-am-support.adoc b/modules/sdpolicy-am-support.adoc new file mode 100644 index 0000000000..016f001262 --- /dev/null +++ b/modules/sdpolicy-am-support.adoc @@ -0,0 +1,12 @@ + +// Module included in the following assemblies: +// +// * assemblies/osd-service-definition.adoc +:_content-type: CONCEPT +[id="support_{context}"] += Support +{product-title} includes Red Hat Premium Support, which can be accessed by using the link:https://access.redhat.com/support?extIdCarryOver=true&sc_cid=701f2000001Css5AAC[Red Hat Customer Portal]. + +See the link:https://access.redhat.com/support/offerings/production/soc[Scope of Coverage Page] for link:https://access.redhat.com/support/offerings/production/scope_moredetail[more details] on what is covered with included support for {product-title}. + +See {product-title} link:https://access.redhat.com/support/offerings/openshift/sla?extIdCarryOver=true&sc_cid=701f2000001Css5AAC[SLAs] for support response times. diff --git a/modules/sdpolicy-platform.adoc b/modules/sdpolicy-platform.adoc index 68ca2774ca..0780ea360e 100644 --- a/modules/sdpolicy-platform.adoc +++ b/modules/sdpolicy-platform.adoc @@ -90,6 +90,18 @@ Windows containers are not available on {product-title} at this time. == Operating system {product-title} runs on OpenShift 4 and uses Red Hat Enterprise Linux CoreOS as the operating system for all control plane and worker nodes. +== Red Hat Operator support +[id="sdpolicy-red-hat-operator_{context}"] +Red Hat workloads typically refer to Red Hat-provided Operators made available through Operator Hub. Red Hat workloads are not managed by the Red Hat SRE team, and must be deployed on worker nodes. These Operators may require additional Red Hat subscriptions, and may incur additional cloud infrastructure costs. Examples of these Red Hat-provided Operators are: + +* {rhq-short} +* Red Hat Advanced Cluster Management +* Red Hat Advanced Cluster Security +* {SMProductName} +* {ServerlessProductName} +* {logging-sd} +* {pipelines-title} + [id="kubernetes-operator-support_{context}"] == Kubernetes Operator support All Operators listed in the OperatorHub marketplace should be available for installation. Operators installed from OperatorHub, including Red Hat Operators, are not SRE managed as part of the {product-title} service. Refer to the link:https://access.redhat.com/solutions/4807821[Red Hat Customer Portal] for more information on the supportability of a given Operator. diff --git a/osd_architecture/osd_policy/osd-service-definition.adoc b/osd_architecture/osd_policy/osd-service-definition.adoc index 7dd7f44b1f..e2679e1d01 100644 --- a/osd_architecture/osd_policy/osd-service-definition.adoc +++ b/osd_architecture/osd_policy/osd-service-definition.adoc @@ -6,7 +6,23 @@ include::_attributes/attributes-openshift-dedicated.adoc[] toc::[] -include::modules/sdpolicy-account-management.adoc[leveloffset=+1] +[id="sdpolicy-account-management_{context}"] +== Account management +include::modules/sdpolicy-am-billing.adoc[leveloffset=+2] +include::modules/sdpolicy-am-cluster-self-service.adoc[leveloffset=+2] +include::modules/sdpolicy-am-cloud-providers.adoc[leveloffset=+2] +include::modules/sdpolicy-am-compute.adoc[leveloffset=+2] + +.Additional Resources +* xref:../../osd_architecture/osd_policy/osd-service-definition.adoc#sdpolicy-red-hat-operator_osd-service-definition[Red Hat Operator Support] + +include::modules/sdpolicy-am-aws-compute-types-ccs.adoc[leveloffset=+2] +include::modules/sdpolicy-am-aws-compute-types-non-ccs.adoc[leveloffset=+2] +include::modules/sdpolicy-am-gcp-compute-types.adoc[leveloffset=+2] +include::modules/sdpolicy-am-regions-availability-zones.adoc[leveloffset=+2] +include::modules/sdpolicy-am-sla.adoc[leveloffset=+2] +include::modules/sdpolicy-am-limited-support.adoc[leveloffset=+2] +include::modules/sdpolicy-am-support.adoc[leveloffset=+2] include::modules/sdpolicy-logging.adoc[leveloffset=+1] include::modules/sdpolicy-monitoring.adoc[leveloffset=+1] include::modules/sdpolicy-networking.adoc[leveloffset=+1] diff --git a/rosa_architecture/rosa_policy_service_definition/rosa-service-definition.adoc b/rosa_architecture/rosa_policy_service_definition/rosa-service-definition.adoc index 95b25436a8..0c84f98590 100644 --- a/rosa_architecture/rosa_policy_service_definition/rosa-service-definition.adoc +++ b/rosa_architecture/rosa_policy_service_definition/rosa-service-definition.adoc @@ -8,7 +8,23 @@ toc::[] This documentation outlines the service definition for the {product-title} (ROSA) managed service. -include::modules/rosa-sdpolicy-account-management.adoc[leveloffset=+1] +[id="rosa-sdpolicy-account-management_{context}"] +== Account management + +This section provides information about the service definition for {product-title} account management. + +include::modules/rosa-sdpolicy-am-billing.adoc[leveloffset=+2] +include::modules/rosa-sdpolicy-am-cluster-self-service.adoc[leveloffset=+2] +include::modules/rosa-sdpolicy-am-compute.adoc[leveloffset=+2] + +.Additional Resources +* xref:../../rosa_architecture/rosa_policy_service_definition/rosa-service-definition.adoc#rosa-sdpolicy-red-hat-operator_rosa-service-definition[Red Hat Operator Support] + +include::modules/rosa-sdpolicy-am-aws-compute-types.adoc[leveloffset=+2] +include::modules/rosa-sdpolicy-am-regions-az.adoc[leveloffset=+2] +include::modules/rosa-sdpolicy-am-sla.adoc[leveloffset=+2] +include::modules/rosa-sdpolicy-am-limited-support.adoc[leveloffset=+2] +include::modules/rosa-sdpolicy-am-support.adoc[leveloffset=+2] include::modules/rosa-sdpolicy-logging.adoc[leveloffset=+1] include::modules/rosa-sdpolicy-monitoring.adoc[leveloffset=+1] include::modules/rosa-sdpolicy-networking.adoc[leveloffset=+1]