From 764086994827016f917da5dd32d6edd927335e76 Mon Sep 17 00:00:00 2001 From: Paul Needle Date: Tue, 23 Nov 2021 03:48:26 +0000 Subject: [PATCH] OSDOCS-2869 - Migrating unique files from dedicated-4 --- _topic_maps/_topic_map_osd.yml | 202 +++ _topic_maps/_topic_map_rosa.yml | 259 +++ adding_service_cluster/adding-service.adoc | 21 + .../available-services.adoc | 11 + adding_service_cluster/images | 1 + adding_service_cluster/modules | 1 + .../rosa-available-services.adoc | 14 + administering_a_cluster/osd-admin-roles.adoc | 12 + ...sd-config-custom-domains-applications.adoc | 8 + .../dedicated-aws-peering.adoc | 5 +- .../rosa-configuring-private-connections.adoc | 19 + .../rosa-private-cluster.adoc | 24 + .../config-identity-providers.adoc | 17 + identity_providers/images | 1 + identity_providers/modules | 1 + images/156_OpenShift_ROSA_Arch_0621_arch.svg | 1 + ...6_OpenShift_ROSA_Arch_0621_privatelink.svg | 322 ++++ images/osd-monitoring-architecture.svg | 1 + logging/rosa-install-logging.adoc | 30 + logging/rosa-viewing-logs.adoc | 9 + modules/access-cluster.adoc | 27 + modules/access-service.adoc | 34 + modules/add-user.adoc | 25 + modules/adding-service-existing.adoc | 40 + modules/attributes-openshift-dedicated.adoc | 12 + modules/aws-cloudwatch.adoc | 14 + modules/aws-direct-connect.adoc | 40 + modules/aws-limits.adoc | 85 + modules/aws-vpc.adoc | 38 + modules/aws-vpn.adoc | 45 + modules/ccs-aws-customer-procedure.adoc | 24 + modules/ccs-aws-customer-requirements.adoc | 72 + modules/ccs-aws-iam.adoc | 117 ++ modules/ccs-aws-provisioned.adoc | 77 + modules/ccs-aws-scp.adoc | 205 +++ modules/ccs-aws-understand.adoc | 13 + modules/ccs-gcp-customer-procedure.adoc | 107 ++ modules/ccs-gcp-customer-requirements.adoc | 67 + modules/ccs-gcp-iam.adoc | 105 ++ modules/ccs-gcp-understand.adoc | 13 + modules/codeready-workspaces.adoc | 17 + modules/config-aws-access.adoc | 74 + modules/config-github-idp.adoc | 68 + modules/config-gitlab-idp.adoc | 57 + modules/config-google-idp.adoc | 58 + modules/config-htpasswd-idp.adoc | 53 + modules/config-idp.adoc | 67 + modules/config-ldap-idp.adoc | 94 ++ modules/config-openid-idp.adoc | 105 ++ modules/container-benefits.adoc | 27 + modules/create-aws-cluster.adoc | 91 ++ modules/create-cluster.adoc | 42 + modules/create-gcp-cluster.adoc | 99 ++ modules/dedicated-aws-dc-existing.adoc | 8 +- modules/dedicated-aws-dc-hvif.adoc | 18 +- modules/dedicated-aws-dc-methods.adoc | 6 +- .../dedicated-aws-vpc-accepting-peering.adoc | 2 +- ...ed-aws-vpc-configuring-routing-tables.adoc | 4 +- .../dedicated-aws-vpc-initiating-peering.adoc | 7 +- modules/dedicated-aws-vpc-peering-terms.adoc | 6 +- modules/dedicated-aws-vpn-creating.adoc | 2 +- .../dedicated-aws-vpn-troubleshooting.adoc | 1 - modules/deleting-cluster.adoc | 21 + modules/deleting-service-cli.adoc | 20 + modules/deleting-service.adoc | 23 + modules/deploy-app.adoc | 60 + modules/enable-aws-access.adoc | 29 + modules/enable-private-cluster-existing.adoc | 40 + modules/enable-private-cluster-new.adoc | 39 + modules/enable-public-cluster.adoc | 25 + modules/gcp-limits.adoc | 64 + modules/kubernetes-about.adoc | 16 + modules/life-cycle-dates.adoc | 29 + modules/life-cycle-definitions.adoc | 41 + modules/life-cycle-install.adoc | 9 + modules/life-cycle-limited-support.adoc | 15 + modules/life-cycle-major-versions.adoc | 14 + modules/life-cycle-mandatory-upgrades.adoc | 15 + modules/life-cycle-minor-versions.adoc | 22 + modules/life-cycle-overview.adoc | 17 + modules/life-cycle-patch-versions.adoc | 17 + modules/life-cycle-supported-versions.adoc | 10 + .../managing-dedicated-administrators.adoc | 39 + modules/notification-subscribe.adoc | 22 + modules/ocm-disabling-autoscaling-nodes.adoc | 21 + modules/ocm-enabling-autoscaling-nodes.adoc | 21 + ...sd-applications-config-custom-domains.adoc | 107 ++ modules/osd-aws-privatelink-about.adoc | 6 + modules/osd-aws-privatelink-architecture.adoc | 43 + ...aws-privatelink-config-dns-forwarding.adoc | 30 + ...ws-privatelink-firewall-prerequisites.adoc | 291 ++++ ...sd-aws-privatelink-required-resources.adoc | 40 + modules/osd-intro.adoc | 58 + ...-tolerations-to-monitoring-components.adoc | 68 + ...-for-monitoring-user-defined-projects.adoc | 29 + ...ng-configurable-monitoring-components.adoc | 17 + ...uring-a-local-persistent-volume-claim.adoc | 95 ++ ...ring-configuring-the-monitoring-stack.adoc | 71 + ...monitoring-deploying-a-sample-service.adoc | 86 + ...ring-exploring-the-visualized-metrics.adoc | 47 + ...rape-samples-in-user-defined-projects.adoc | 20 + ...sd-monitoring-maintenance-and-support.adoc | 15 + ...tion-time-for-prometheus-metrics-data.adoc | 63 + ...itoring-components-to-different-nodes.adoc | 87 + ...-for-all-projects-as-an-administrator.adoc | 44 + ...-user-defined-projects-as-a-developer.adoc | 36 + modules/osd-monitoring-querying-metrics.adoc | 12 + ...ewing-monitoring-dashboards-developer.adoc | 26 + ...ample-limit-for-user-defined-projects.adoc | 51 + ...-log-levels-for-monitoring-components.adoc | 82 + ...-collection-for-user-defined-projects.adoc | 10 + ...specifying-how-a-service-is-monitored.adoc | 70 + ...osd-monitoring-support-considerations.adoc | 10 + ...ing-targets-for-user-defined-projects.adoc | 11 + ...osd-monitoring-troubleshooting-issues.adoc | 110 ++ .../osd-monitoring-understanding-metrics.adoc | 33 + ...ng-understanding-the-monitoring-stack.adoc | 16 + modules/osd-rhoam.adoc | 24 + .../osd-storage-pv-aws-config-account.adoc | 34 + modules/osd-storage-pv-aws-connect-pods.adoc | 37 + ...orage-pv-aws-create-sharedvolumes-cli.adoc | 38 + ...e-pv-aws-create-sharedvolumes-console.adoc | 37 + modules/osd-storage-pv-aws-install-efs.adoc | 13 + modules/osd-storage-pv-aws-uninstall-efs.adoc | 19 + modules/osd-vs-ocp.adoc | 39 + modules/policy-change-management.adoc | 71 + modules/policy-customer-responsibility.adoc | 41 + modules/policy-disaster-recovery.adoc | 16 + modules/policy-failure-points.adoc | 50 + .../policy-identity-access-management.adoc | 169 ++ modules/policy-incident.adoc | 91 ++ modules/policy-responsibilities.adoc | 55 + ...policy-security-regulation-compliance.adoc | 72 + modules/policy-shared-responsibility.adoc | 168 ++ modules/rosa-about.adoc | 11 + .../rosa-accessing-your-cluster-quick.adoc | 60 + modules/rosa-accessing-your-cluster.adoc | 144 ++ modules/rosa-adding-instance-types.adoc | 49 + modules/rosa-adding-node-labels.adoc | 55 + modules/rosa-architecture.adoc | 10 + modules/rosa-aws-iam.adoc | 40 + .../rosa-aws-privatelink-create-cluster.adoc | 59 + modules/rosa-aws-procedure.adoc | 18 + modules/rosa-aws-provisioned.adoc | 118 ++ modules/rosa-aws-requirements.adoc | 56 + modules/rosa-aws-scp.adoc | 208 +++ modules/rosa-aws-understand.adoc | 14 + ...osa-checking-account-version-info-cli.adoc | 78 + modules/rosa-common-commands.adoc | 83 + modules/rosa-configure.adoc | 271 ++++ modules/rosa-configuring-aws-account.adoc | 76 + modules/rosa-containers-concept.adoc | 18 + modules/rosa-create-cluster-admins.adoc | 62 + .../rosa-create-dedicated-cluster-admins.adoc | 43 + modules/rosa-create-objects.adoc | 500 ++++++ modules/rosa-creating-cluster.adoc | 96 ++ modules/rosa-delete-cluster-admins.adoc | 31 + modules/rosa-delete-dedicated-admins.adoc | 36 + modules/rosa-delete-objects.adoc | 276 ++++ ...sa-deleting-aws-resources-aws-console.adoc | 51 + modules/rosa-deleting-aws-resources-cli.adoc | 100 ++ modules/rosa-deleting-cluster.adoc | 37 + modules/rosa-disabling-autoscaling-nodes.adoc | 27 + modules/rosa-edit-objects.adoc | 228 +++ .../rosa-enable-private-cluster-existing.adoc | 33 + modules/rosa-enable-private-cluster-new.adoc | 33 + modules/rosa-enabling-autoscaling-nodes.adoc | 47 + modules/rosa-getting-support.adoc | 22 + modules/rosa-initialize.adoc | 94 ++ modules/rosa-install-logging-addon.adoc | 106 ++ modules/rosa-install-uninstall-addon.adoc | 112 ++ modules/rosa-installing.adoc | 208 +++ modules/rosa-kubernetes-concept.adoc | 28 + modules/rosa-list-objects.adoc | 569 +++++++ modules/rosa-logs.adoc | 120 ++ modules/rosa-openshift-concepts.adoc | 10 + modules/rosa-parent-commands.adoc | 65 + ...planning-cluster-maximums-environment.adoc | 58 + modules/rosa-planning-cluster-maximums.adoc | 54 + modules/rosa-planning-considerations.adoc | 42 + ...planning-environment-application-reqs.adoc | 169 ++ ...rosa-planning-environment-cluster-max.adoc | 42 + modules/rosa-policy-change-management.adoc | 67 + .../rosa-policy-customer-responsibility.adoc | 39 + modules/rosa-policy-disaster-recovery.adoc | 16 + modules/rosa-policy-failure-points.adoc | 50 + ...osa-policy-identity-access-management.adoc | 154 ++ modules/rosa-policy-incident.adoc | 90 ++ modules/rosa-policy-responsibilities.adoc | 55 + ...policy-security-regulation-compliance.adoc | 53 + .../rosa-policy-shared-responsibility.adoc | 167 ++ modules/rosa-quickstart-instructions.adoc | 29 + modules/rosa-required-aws-service-quotas.adoc | 88 + ...uirements-deploying-in-opt-in-regions.adoc | 16 + modules/rosa-scaling-worker-nodes.adoc | 49 + modules/rosa-sdpolicy-account-management.adoc | 134 ++ modules/rosa-sdpolicy-logging.adoc | 18 + modules/rosa-sdpolicy-monitoring.adoc | 21 + modules/rosa-sdpolicy-networking.adoc | 73 + modules/rosa-sdpolicy-platform.adoc | 99 ++ modules/rosa-sdpolicy-security.adoc | 60 + modules/rosa-sdpolicy-storage.adoc | 30 + ...etting-the-aws-security-token-version.adoc | 49 + modules/rosa-setting-up-cli.adoc | 26 + modules/rosa-sts-about-iam-resources.adoc | 14 + ...account-wide-role-and-policy-commands.adoc | 92 ++ ...a-sts-account-wide-roles-and-policies.adoc | 1422 +++++++++++++++++ modules/rosa-sts-aws-iam.adoc | 4 + modules/rosa-sts-aws-requirements.adoc | 42 + .../rosa-sts-creating-a-cluster-quickly.adoc | 98 ++ ...reating-a-cluster-with-customizations.adoc | 238 +++ .../rosa-sts-interactive-mode-reference.adoc | 70 + modules/rosa-sts-oidc-provider-command.adoc | 28 + modules/rosa-sts-oidc-provider.adoc | 8 + modules/rosa-sts-operator-role-commands.adoc | 72 + modules/rosa-sts-operator-roles.adoc | 38 + modules/rosa-sts-setting-up-environment.adoc | 206 +++ modules/rosa-sts-support-considerations.adoc | 15 + modules/rosa-troubleshooting-deployment.adoc | 63 + .../rosa-troubleshooting-expired-token.adoc | 22 + modules/rosa-troubleshooting-installing.adoc | 70 + modules/rosa-understanding.adoc | 31 + modules/rosa-upgrade-cluster-cli.adoc | 107 ++ modules/rosa-upgrading-automatic-ocm.adoc | 20 + modules/rosa-upgrading-cli-tutorial.adoc | 60 + modules/rosa-upgrading-manual-ocm.adoc | 54 + .../rosa-upgrading-preparing-4-7-to-4-8.adoc | 42 + modules/rosa-using-bash-script.adoc | 46 + modules/rosa-using-sts.adoc | 10 + modules/rosa-view-cloudwatch-logs.adoc | 21 + modules/scaling-cluster.adoc | 29 + modules/sdpolicy-account-management.adoc | 207 +++ modules/sdpolicy-logging.adoc | 18 + modules/sdpolicy-monitoring.adoc | 17 + modules/sdpolicy-networking.adoc | 82 + modules/sdpolicy-platform.adoc | 95 ++ modules/sdpolicy-security.adoc | 60 + modules/sdpolicy-storage.adoc | 28 + modules/understanding-admin-roles.adoc | 20 + modules/understanding-clusters.adoc | 18 + modules/understanding-idp.adoc | 82 + modules/upgrade-auto.adoc | 31 + modules/upgrade-manual.adoc | 45 + modules/upgrade.adoc | 55 + modules/viewing-notifications.adoc | 27 + ...-monitoring-for-user-defined-projects.adoc | 22 + .../osd-configuring-the-monitoring-stack.adoc | 77 + monitoring/osd-managing-alerts.adoc | 12 + monitoring/osd-managing-metrics.adoc | 45 + .../osd-reviewing-monitoring-dashboards.adoc | 31 + ...osd-troubleshooting-monitoring-issues.adoc | 10 + ...sd-understanding-the-monitoring-stack.adoc | 31 + nodes/nodes-about-autoscaling-nodes.adoc | 78 + nodes/nodes-machinepools-about.adoc | 40 + nodes/rosa-managing-worker-nodes.adoc | 20 + osd_architecture/images | 1 + osd_architecture/modules | 1 + osd_architecture/osd-architecture.adoc | 14 + osd_architecture/osd-understanding.adoc | 10 + osd_cluster_create/creating-your-cluster.adoc | 15 + osd_cluster_create/images | 1 + osd_cluster_create/modules | 1 + osd_notifications/images | 1 + osd_notifications/modules | 1 + osd_notifications/notifications.adoc | 15 + osd_planning/aws-ccs.adoc | 17 + osd_planning/gcp-ccs.adoc | 17 + osd_planning/images | 1 + osd_planning/modules | 1 + osd_policy/images | 1 + osd_policy/modules | 1 + osd_policy/osd-life-cycle.adoc | 22 + osd_policy/osd-service-definition.adoc | 14 + osd_policy/policy-process-security.adoc | 13 + osd_policy/policy-responsibility-matrix.adoc | 13 + .../policy-understand-availability.adoc | 11 + .../aws-private-connections.adoc | 12 + osd_private_connections/images | 1 + osd_private_connections/modules | 1 + osd_private_connections/private-cluster.adoc | 18 + osd_quickstart/images | 1 + osd_quickstart/modules | 1 + osd_quickstart/osd-quickstart.adoc | 19 + rosa_architecture/images | 1 + rosa_architecture/modules | 1 + .../rosa-architecture-models.adoc | 15 + .../rosa-basic-architecture-concepts.adoc | 11 + rosa_architecture/rosa-understanding.adoc | 10 + rosa_cli/images | 1 + rosa_cli/modules | 1 + rosa_cli/rosa-checking-acct-version-cli.adoc | 8 + rosa_cli/rosa-checking-logs-cli.adoc | 8 + rosa_cli/rosa-get-started-cli.adoc | 13 + rosa_cli/rosa-manage-objects-cli.adoc | 17 + rosa_getting_started/images | 1 + rosa_getting_started/modules | 1 + .../rosa-accessing-cluster.adoc | 23 + rosa_getting_started/rosa-aws-prereqs.adoc | 27 + ...rosa-aws-privatelink-creating-cluster.adoc | 22 + .../rosa-config-aws-account.adoc | 22 + .../rosa-config-identity-providers.adoc | 22 + .../rosa-creating-cluster.adoc | 22 + .../rosa-deleting-access-cluster.adoc | 12 + .../rosa-deleting-cluster.adoc | 10 + .../rosa-getting-started-workflow.adoc | 22 + .../rosa-installing-rosa.adoc | 22 + rosa_getting_started/rosa-quickstart.adoc | 12 + .../rosa-required-aws-service-quotas.adoc | 17 + rosa_getting_started_sts/images | 1 + rosa_getting_started_sts/modules | 1 + .../rosa-sts-accessing-cluster.adoc | 19 + .../rosa-sts-aws-prereqs.adoc | 25 + .../rosa-sts-config-identity-providers.adoc | 22 + .../rosa-sts-deleting-access-cluster.adoc | 12 + .../rosa-sts-deleting-cluster.adoc | 12 + .../rosa-sts-getting-started-workflow.adoc | 26 + .../rosa-sts-required-aws-service-quotas.adoc | 13 + .../rosa-sts-setting-up-environment.adoc | 21 + .../rosa_creating_a_cluster_with_sts/images | 1 + .../rosa_creating_a_cluster_with_sts/modules | 1 + .../rosa-sts-about-iam-resources.adoc | 25 + .../rosa-sts-creating-a-cluster-quickly.adoc | 23 + ...reating-a-cluster-with-customizations.adoc | 25 + .../rosa-sts-interactive-mode-reference.adoc | 16 + rosa_planning/images | 1 + rosa_planning/modules | 1 + rosa_planning/rosa-limits-scalability.adoc | 11 + rosa_planning/rosa-planning-environment.adoc | 10 + rosa_policy/images | 1 + rosa_policy/modules | 1 + rosa_policy/rosa-life-cycle.adoc | 27 + rosa_policy/rosa-policy-process-security.adoc | 33 + .../rosa-policy-responsibility-matrix.adoc | 12 + .../rosa-policy-understand-availability.adoc | 10 + rosa_policy/rosa-service-definition.adoc | 22 + rosa_support/images | 1 + rosa_support/modules | 1 + rosa_support/rosa-getting-support.adoc | 9 + .../rosa-troubleshooting-deployments.adoc | 7 + .../rosa-troubleshooting-expired-tokens.adoc | 7 + .../rosa-troubleshooting-installations.adoc | 8 + .../osd-persistent-storage-aws.adoc | 39 + support/osd-managed-resources.adoc | 53 + upgrading/images | 1 + upgrading/modules | 1 + upgrading/osd-upgrades.adoc | 19 + upgrading/rosa-upgrading-sts.adoc | 29 + upgrading/rosa-upgrading.adoc | 28 + 348 files changed, 17527 insertions(+), 28 deletions(-) create mode 100644 _topic_maps/_topic_map_osd.yml create mode 100644 _topic_maps/_topic_map_rosa.yml create mode 100644 adding_service_cluster/adding-service.adoc create mode 100644 adding_service_cluster/available-services.adoc create mode 120000 adding_service_cluster/images create mode 120000 adding_service_cluster/modules create mode 100644 adding_service_cluster/rosa-available-services.adoc create mode 100644 administering_a_cluster/osd-admin-roles.adoc create mode 100644 applications/deployments/osd-config-custom-domains-applications.adoc create mode 100644 cloud_infrastructure_access/rosa-configuring-private-connections.adoc create mode 100644 cloud_infrastructure_access/rosa-private-cluster.adoc create mode 100644 identity_providers/config-identity-providers.adoc create mode 120000 identity_providers/images create mode 120000 identity_providers/modules create mode 100644 images/156_OpenShift_ROSA_Arch_0621_arch.svg create mode 100644 images/156_OpenShift_ROSA_Arch_0621_privatelink.svg create mode 100644 images/osd-monitoring-architecture.svg create mode 100644 logging/rosa-install-logging.adoc create mode 100644 logging/rosa-viewing-logs.adoc create mode 100644 modules/access-cluster.adoc create mode 100644 modules/access-service.adoc create mode 100644 modules/add-user.adoc create mode 100644 modules/adding-service-existing.adoc create mode 100644 modules/attributes-openshift-dedicated.adoc create mode 100644 modules/aws-cloudwatch.adoc create mode 100644 modules/aws-direct-connect.adoc create mode 100644 modules/aws-limits.adoc create mode 100644 modules/aws-vpc.adoc create mode 100644 modules/aws-vpn.adoc create mode 100644 modules/ccs-aws-customer-procedure.adoc create mode 100644 modules/ccs-aws-customer-requirements.adoc create mode 100644 modules/ccs-aws-iam.adoc create mode 100644 modules/ccs-aws-provisioned.adoc create mode 100644 modules/ccs-aws-scp.adoc create mode 100644 modules/ccs-aws-understand.adoc create mode 100644 modules/ccs-gcp-customer-procedure.adoc create mode 100644 modules/ccs-gcp-customer-requirements.adoc create mode 100644 modules/ccs-gcp-iam.adoc create mode 100644 modules/ccs-gcp-understand.adoc create mode 100644 modules/codeready-workspaces.adoc create mode 100644 modules/config-aws-access.adoc create mode 100644 modules/config-github-idp.adoc create mode 100644 modules/config-gitlab-idp.adoc create mode 100644 modules/config-google-idp.adoc create mode 100644 modules/config-htpasswd-idp.adoc create mode 100644 modules/config-idp.adoc create mode 100644 modules/config-ldap-idp.adoc create mode 100644 modules/config-openid-idp.adoc create mode 100644 modules/container-benefits.adoc create mode 100644 modules/create-aws-cluster.adoc create mode 100644 modules/create-cluster.adoc create mode 100644 modules/create-gcp-cluster.adoc create mode 100644 modules/deleting-cluster.adoc create mode 100644 modules/deleting-service-cli.adoc create mode 100644 modules/deleting-service.adoc create mode 100644 modules/deploy-app.adoc create mode 100644 modules/enable-aws-access.adoc create mode 100644 modules/enable-private-cluster-existing.adoc create mode 100644 modules/enable-private-cluster-new.adoc create mode 100644 modules/enable-public-cluster.adoc create mode 100644 modules/gcp-limits.adoc create mode 100644 modules/kubernetes-about.adoc create mode 100644 modules/life-cycle-dates.adoc create mode 100644 modules/life-cycle-definitions.adoc create mode 100644 modules/life-cycle-install.adoc create mode 100644 modules/life-cycle-limited-support.adoc create mode 100644 modules/life-cycle-major-versions.adoc create mode 100644 modules/life-cycle-mandatory-upgrades.adoc create mode 100644 modules/life-cycle-minor-versions.adoc create mode 100644 modules/life-cycle-overview.adoc create mode 100644 modules/life-cycle-patch-versions.adoc create mode 100644 modules/life-cycle-supported-versions.adoc create mode 100644 modules/managing-dedicated-administrators.adoc create mode 100644 modules/notification-subscribe.adoc create mode 100644 modules/ocm-disabling-autoscaling-nodes.adoc create mode 100644 modules/ocm-enabling-autoscaling-nodes.adoc create mode 100644 modules/osd-applications-config-custom-domains.adoc create mode 100644 modules/osd-aws-privatelink-about.adoc create mode 100644 modules/osd-aws-privatelink-architecture.adoc create mode 100644 modules/osd-aws-privatelink-config-dns-forwarding.adoc create mode 100644 modules/osd-aws-privatelink-firewall-prerequisites.adoc create mode 100644 modules/osd-aws-privatelink-required-resources.adoc create mode 100644 modules/osd-intro.adoc create mode 100644 modules/osd-monitoring-assigning-tolerations-to-monitoring-components.adoc create mode 100644 modules/osd-monitoring-components-for-monitoring-user-defined-projects.adoc create mode 100644 modules/osd-monitoring-configurable-monitoring-components.adoc create mode 100644 modules/osd-monitoring-configuring-a-local-persistent-volume-claim.adoc create mode 100644 modules/osd-monitoring-configuring-the-monitoring-stack.adoc create mode 100644 modules/osd-monitoring-deploying-a-sample-service.adoc create mode 100644 modules/osd-monitoring-exploring-the-visualized-metrics.adoc create mode 100644 modules/osd-monitoring-limiting-scrape-samples-in-user-defined-projects.adoc create mode 100644 modules/osd-monitoring-maintenance-and-support.adoc create mode 100644 modules/osd-monitoring-modifying-retention-time-for-prometheus-metrics-data.adoc create mode 100644 modules/osd-monitoring-moving-monitoring-components-to-different-nodes.adoc create mode 100644 modules/osd-monitoring-querying-metrics-for-all-projects-as-an-administrator.adoc create mode 100644 modules/osd-monitoring-querying-metrics-for-user-defined-projects-as-a-developer.adoc create mode 100644 modules/osd-monitoring-querying-metrics.adoc create mode 100644 modules/osd-monitoring-reviewing-monitoring-dashboards-developer.adoc create mode 100644 modules/osd-monitoring-setting-a-scrape-sample-limit-for-user-defined-projects.adoc create mode 100644 modules/osd-monitoring-setting-log-levels-for-monitoring-components.adoc create mode 100644 modules/osd-monitoring-setting-up-metrics-collection-for-user-defined-projects.adoc create mode 100644 modules/osd-monitoring-specifying-how-a-service-is-monitored.adoc create mode 100644 modules/osd-monitoring-support-considerations.adoc create mode 100644 modules/osd-monitoring-targets-for-user-defined-projects.adoc create mode 100644 modules/osd-monitoring-troubleshooting-issues.adoc create mode 100644 modules/osd-monitoring-understanding-metrics.adoc create mode 100644 modules/osd-monitoring-understanding-the-monitoring-stack.adoc create mode 100644 modules/osd-rhoam.adoc create mode 100644 modules/osd-storage-pv-aws-config-account.adoc create mode 100644 modules/osd-storage-pv-aws-connect-pods.adoc create mode 100644 modules/osd-storage-pv-aws-create-sharedvolumes-cli.adoc create mode 100644 modules/osd-storage-pv-aws-create-sharedvolumes-console.adoc create mode 100644 modules/osd-storage-pv-aws-install-efs.adoc create mode 100644 modules/osd-storage-pv-aws-uninstall-efs.adoc create mode 100644 modules/osd-vs-ocp.adoc create mode 100644 modules/policy-change-management.adoc create mode 100644 modules/policy-customer-responsibility.adoc create mode 100644 modules/policy-disaster-recovery.adoc create mode 100644 modules/policy-failure-points.adoc create mode 100644 modules/policy-identity-access-management.adoc create mode 100644 modules/policy-incident.adoc create mode 100644 modules/policy-responsibilities.adoc create mode 100644 modules/policy-security-regulation-compliance.adoc create mode 100644 modules/policy-shared-responsibility.adoc create mode 100644 modules/rosa-about.adoc create mode 100644 modules/rosa-accessing-your-cluster-quick.adoc create mode 100644 modules/rosa-accessing-your-cluster.adoc create mode 100644 modules/rosa-adding-instance-types.adoc create mode 100644 modules/rosa-adding-node-labels.adoc create mode 100644 modules/rosa-architecture.adoc create mode 100644 modules/rosa-aws-iam.adoc create mode 100644 modules/rosa-aws-privatelink-create-cluster.adoc create mode 100644 modules/rosa-aws-procedure.adoc create mode 100644 modules/rosa-aws-provisioned.adoc create mode 100644 modules/rosa-aws-requirements.adoc create mode 100644 modules/rosa-aws-scp.adoc create mode 100644 modules/rosa-aws-understand.adoc create mode 100644 modules/rosa-checking-account-version-info-cli.adoc create mode 100644 modules/rosa-common-commands.adoc create mode 100644 modules/rosa-configure.adoc create mode 100644 modules/rosa-configuring-aws-account.adoc create mode 100644 modules/rosa-containers-concept.adoc create mode 100644 modules/rosa-create-cluster-admins.adoc create mode 100644 modules/rosa-create-dedicated-cluster-admins.adoc create mode 100644 modules/rosa-create-objects.adoc create mode 100644 modules/rosa-creating-cluster.adoc create mode 100644 modules/rosa-delete-cluster-admins.adoc create mode 100644 modules/rosa-delete-dedicated-admins.adoc create mode 100644 modules/rosa-delete-objects.adoc create mode 100644 modules/rosa-deleting-aws-resources-aws-console.adoc create mode 100644 modules/rosa-deleting-aws-resources-cli.adoc create mode 100644 modules/rosa-deleting-cluster.adoc create mode 100644 modules/rosa-disabling-autoscaling-nodes.adoc create mode 100644 modules/rosa-edit-objects.adoc create mode 100644 modules/rosa-enable-private-cluster-existing.adoc create mode 100644 modules/rosa-enable-private-cluster-new.adoc create mode 100644 modules/rosa-enabling-autoscaling-nodes.adoc create mode 100644 modules/rosa-getting-support.adoc create mode 100644 modules/rosa-initialize.adoc create mode 100644 modules/rosa-install-logging-addon.adoc create mode 100644 modules/rosa-install-uninstall-addon.adoc create mode 100644 modules/rosa-installing.adoc create mode 100644 modules/rosa-kubernetes-concept.adoc create mode 100644 modules/rosa-list-objects.adoc create mode 100644 modules/rosa-logs.adoc create mode 100644 modules/rosa-openshift-concepts.adoc create mode 100644 modules/rosa-parent-commands.adoc create mode 100644 modules/rosa-planning-cluster-maximums-environment.adoc create mode 100644 modules/rosa-planning-cluster-maximums.adoc create mode 100644 modules/rosa-planning-considerations.adoc create mode 100644 modules/rosa-planning-environment-application-reqs.adoc create mode 100644 modules/rosa-planning-environment-cluster-max.adoc create mode 100644 modules/rosa-policy-change-management.adoc create mode 100644 modules/rosa-policy-customer-responsibility.adoc create mode 100644 modules/rosa-policy-disaster-recovery.adoc create mode 100644 modules/rosa-policy-failure-points.adoc create mode 100644 modules/rosa-policy-identity-access-management.adoc create mode 100644 modules/rosa-policy-incident.adoc create mode 100644 modules/rosa-policy-responsibilities.adoc create mode 100644 modules/rosa-policy-security-regulation-compliance.adoc create mode 100644 modules/rosa-policy-shared-responsibility.adoc create mode 100644 modules/rosa-quickstart-instructions.adoc create mode 100644 modules/rosa-required-aws-service-quotas.adoc create mode 100644 modules/rosa-requirements-deploying-in-opt-in-regions.adoc create mode 100644 modules/rosa-scaling-worker-nodes.adoc create mode 100644 modules/rosa-sdpolicy-account-management.adoc create mode 100644 modules/rosa-sdpolicy-logging.adoc create mode 100644 modules/rosa-sdpolicy-monitoring.adoc create mode 100644 modules/rosa-sdpolicy-networking.adoc create mode 100644 modules/rosa-sdpolicy-platform.adoc create mode 100644 modules/rosa-sdpolicy-security.adoc create mode 100644 modules/rosa-sdpolicy-storage.adoc create mode 100644 modules/rosa-setting-the-aws-security-token-version.adoc create mode 100644 modules/rosa-setting-up-cli.adoc create mode 100644 modules/rosa-sts-about-iam-resources.adoc create mode 100644 modules/rosa-sts-account-wide-role-and-policy-commands.adoc create mode 100644 modules/rosa-sts-account-wide-roles-and-policies.adoc create mode 100644 modules/rosa-sts-aws-iam.adoc create mode 100644 modules/rosa-sts-aws-requirements.adoc create mode 100644 modules/rosa-sts-creating-a-cluster-quickly.adoc create mode 100644 modules/rosa-sts-creating-a-cluster-with-customizations.adoc create mode 100644 modules/rosa-sts-interactive-mode-reference.adoc create mode 100644 modules/rosa-sts-oidc-provider-command.adoc create mode 100644 modules/rosa-sts-oidc-provider.adoc create mode 100644 modules/rosa-sts-operator-role-commands.adoc create mode 100644 modules/rosa-sts-operator-roles.adoc create mode 100644 modules/rosa-sts-setting-up-environment.adoc create mode 100644 modules/rosa-sts-support-considerations.adoc create mode 100644 modules/rosa-troubleshooting-deployment.adoc create mode 100644 modules/rosa-troubleshooting-expired-token.adoc create mode 100644 modules/rosa-troubleshooting-installing.adoc create mode 100644 modules/rosa-understanding.adoc create mode 100644 modules/rosa-upgrade-cluster-cli.adoc create mode 100644 modules/rosa-upgrading-automatic-ocm.adoc create mode 100644 modules/rosa-upgrading-cli-tutorial.adoc create mode 100644 modules/rosa-upgrading-manual-ocm.adoc create mode 100644 modules/rosa-upgrading-preparing-4-7-to-4-8.adoc create mode 100644 modules/rosa-using-bash-script.adoc create mode 100644 modules/rosa-using-sts.adoc create mode 100644 modules/rosa-view-cloudwatch-logs.adoc create mode 100644 modules/scaling-cluster.adoc create mode 100644 modules/sdpolicy-account-management.adoc create mode 100644 modules/sdpolicy-logging.adoc create mode 100644 modules/sdpolicy-monitoring.adoc create mode 100644 modules/sdpolicy-networking.adoc create mode 100644 modules/sdpolicy-platform.adoc create mode 100644 modules/sdpolicy-security.adoc create mode 100644 modules/sdpolicy-storage.adoc create mode 100644 modules/understanding-admin-roles.adoc create mode 100644 modules/understanding-clusters.adoc create mode 100644 modules/understanding-idp.adoc create mode 100644 modules/upgrade-auto.adoc create mode 100644 modules/upgrade-manual.adoc create mode 100644 modules/upgrade.adoc create mode 100644 modules/viewing-notifications.adoc create mode 100644 monitoring/osd-accessing-monitoring-for-user-defined-projects.adoc create mode 100644 monitoring/osd-configuring-the-monitoring-stack.adoc create mode 100644 monitoring/osd-managing-alerts.adoc create mode 100644 monitoring/osd-managing-metrics.adoc create mode 100644 monitoring/osd-reviewing-monitoring-dashboards.adoc create mode 100644 monitoring/osd-troubleshooting-monitoring-issues.adoc create mode 100644 monitoring/osd-understanding-the-monitoring-stack.adoc create mode 100644 nodes/nodes-about-autoscaling-nodes.adoc create mode 100644 nodes/nodes-machinepools-about.adoc create mode 100644 nodes/rosa-managing-worker-nodes.adoc create mode 120000 osd_architecture/images create mode 120000 osd_architecture/modules create mode 100644 osd_architecture/osd-architecture.adoc create mode 100644 osd_architecture/osd-understanding.adoc create mode 100644 osd_cluster_create/creating-your-cluster.adoc create mode 120000 osd_cluster_create/images create mode 120000 osd_cluster_create/modules create mode 120000 osd_notifications/images create mode 120000 osd_notifications/modules create mode 100644 osd_notifications/notifications.adoc create mode 100644 osd_planning/aws-ccs.adoc create mode 100644 osd_planning/gcp-ccs.adoc create mode 120000 osd_planning/images create mode 120000 osd_planning/modules create mode 120000 osd_policy/images create mode 120000 osd_policy/modules create mode 100644 osd_policy/osd-life-cycle.adoc create mode 100644 osd_policy/osd-service-definition.adoc create mode 100644 osd_policy/policy-process-security.adoc create mode 100644 osd_policy/policy-responsibility-matrix.adoc create mode 100644 osd_policy/policy-understand-availability.adoc create mode 100644 osd_private_connections/aws-private-connections.adoc create mode 120000 osd_private_connections/images create mode 120000 osd_private_connections/modules create mode 100644 osd_private_connections/private-cluster.adoc create mode 120000 osd_quickstart/images create mode 120000 osd_quickstart/modules create mode 100644 osd_quickstart/osd-quickstart.adoc create mode 120000 rosa_architecture/images create mode 120000 rosa_architecture/modules create mode 100644 rosa_architecture/rosa-architecture-models.adoc create mode 100644 rosa_architecture/rosa-basic-architecture-concepts.adoc create mode 100644 rosa_architecture/rosa-understanding.adoc create mode 120000 rosa_cli/images create mode 120000 rosa_cli/modules create mode 100644 rosa_cli/rosa-checking-acct-version-cli.adoc create mode 100644 rosa_cli/rosa-checking-logs-cli.adoc create mode 100644 rosa_cli/rosa-get-started-cli.adoc create mode 100644 rosa_cli/rosa-manage-objects-cli.adoc create mode 120000 rosa_getting_started/images create mode 120000 rosa_getting_started/modules create mode 100644 rosa_getting_started/rosa-accessing-cluster.adoc create mode 100644 rosa_getting_started/rosa-aws-prereqs.adoc create mode 100644 rosa_getting_started/rosa-aws-privatelink-creating-cluster.adoc create mode 100644 rosa_getting_started/rosa-config-aws-account.adoc create mode 100644 rosa_getting_started/rosa-config-identity-providers.adoc create mode 100644 rosa_getting_started/rosa-creating-cluster.adoc create mode 100644 rosa_getting_started/rosa-deleting-access-cluster.adoc create mode 100644 rosa_getting_started/rosa-deleting-cluster.adoc create mode 100644 rosa_getting_started/rosa-getting-started-workflow.adoc create mode 100644 rosa_getting_started/rosa-installing-rosa.adoc create mode 100644 rosa_getting_started/rosa-quickstart.adoc create mode 100644 rosa_getting_started/rosa-required-aws-service-quotas.adoc create mode 120000 rosa_getting_started_sts/images create mode 120000 rosa_getting_started_sts/modules create mode 100644 rosa_getting_started_sts/rosa-sts-accessing-cluster.adoc create mode 100644 rosa_getting_started_sts/rosa-sts-aws-prereqs.adoc create mode 100644 rosa_getting_started_sts/rosa-sts-config-identity-providers.adoc create mode 100644 rosa_getting_started_sts/rosa-sts-deleting-access-cluster.adoc create mode 100644 rosa_getting_started_sts/rosa-sts-deleting-cluster.adoc create mode 100644 rosa_getting_started_sts/rosa-sts-getting-started-workflow.adoc create mode 100644 rosa_getting_started_sts/rosa-sts-required-aws-service-quotas.adoc create mode 100644 rosa_getting_started_sts/rosa-sts-setting-up-environment.adoc create mode 120000 rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/images create mode 120000 rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/modules create mode 100644 rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-about-iam-resources.adoc create mode 100644 rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-creating-a-cluster-quickly.adoc create mode 100644 rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-creating-a-cluster-with-customizations.adoc create mode 100644 rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-interactive-mode-reference.adoc create mode 120000 rosa_planning/images create mode 120000 rosa_planning/modules create mode 100644 rosa_planning/rosa-limits-scalability.adoc create mode 100644 rosa_planning/rosa-planning-environment.adoc create mode 120000 rosa_policy/images create mode 120000 rosa_policy/modules create mode 100644 rosa_policy/rosa-life-cycle.adoc create mode 100644 rosa_policy/rosa-policy-process-security.adoc create mode 100644 rosa_policy/rosa-policy-responsibility-matrix.adoc create mode 100644 rosa_policy/rosa-policy-understand-availability.adoc create mode 100644 rosa_policy/rosa-service-definition.adoc create mode 120000 rosa_support/images create mode 120000 rosa_support/modules create mode 100644 rosa_support/rosa-getting-support.adoc create mode 100644 rosa_support/rosa-troubleshooting-deployments.adoc create mode 100644 rosa_support/rosa-troubleshooting-expired-tokens.adoc create mode 100644 rosa_support/rosa-troubleshooting-installations.adoc create mode 100644 storage/persistent_storage/osd-persistent-storage-aws.adoc create mode 100644 support/osd-managed-resources.adoc create mode 120000 upgrading/images create mode 120000 upgrading/modules create mode 100644 upgrading/osd-upgrades.adoc create mode 100644 upgrading/rosa-upgrading-sts.adoc create mode 100644 upgrading/rosa-upgrading.adoc diff --git a/_topic_maps/_topic_map_osd.yml b/_topic_maps/_topic_map_osd.yml new file mode 100644 index 0000000000..4c23d12ea3 --- /dev/null +++ b/_topic_maps/_topic_map_osd.yml @@ -0,0 +1,202 @@ +# This configuration file dictates the organization of the topic groups and +# topics on the main page of the doc site for this branch. Each record +# consists of the following: +# +# --- <= Record delimiter +# Name: Origin of the Species <= Display name of topic group +# Dir: origin_of_the_species <= Directory name of topic group +# Topics: +# - Name: The Majestic Marmoset <= Topic name +# File: the_majestic_marmoset <= Topic file under group dir +/- +# - Name: The Curious Crocodile <= Topic 2 name +# File: the_curious_crocodile <= Topic 2 file +# - Name: The Numerous Nematodes <= Sub-topic group name +# Dir: the_numerous_nematodes <= Sub-topic group dir +# Topics: +# - Name: The Wily Worm <= Sub-topic name +# File: the_wily_worm <= Sub-topic file under / +# - Name: The Acrobatic Ascarid <= Sub-topic 2 name +# File: the_acrobatic_ascarid <= Sub-topic 2 file under / +# +# The ordering of the records in this document determines the ordering of the +# topic groups and topics on the main page. + +--- +Name: About +Dir: welcome +Distros: openshift-dedicated +Topics: +- Name: Welcome + File: index +--- +Name: Architecture +Dir: osd_architecture +Distros: openshift-dedicated +Topics: +- Name: Introduction to OpenShift Dedicated + File: osd-understanding +- Name: Architecture concepts + File: osd-architecture +--- +Name: Planning +Dir: osd_planning +Distros: openshift-dedicated +Topics: +- Name: Customer Cloud Subscriptions on AWS + File: aws-ccs +- Name: Customer Cloud Subscriptions on GCP + File: gcp-ccs +--- +Name: Getting started +Dir: osd_quickstart +Distros: openshift-dedicated +Topics: +- Name: Quickstart for OpenShift Dedicated + File: osd-quickstart +--- +Name: Creating a cluster +Dir: osd_cluster_create +Distros: openshift-dedicated +Topics: +- Name: Creating your cluster + File: creating-your-cluster +--- +Name: Configuring identity providers +Dir: identity_providers +Distros: openshift-dedicated +Topics: +- Name: Configuring your identity providers + File: config-identity-providers +--- +Name: Administering your cluster +Dir: administering_a_cluster +Distros: openshift-dedicated +Topics: +- Name: Managing administration roles and users + File: osd-admin-roles +--- +Name: Upgrading +Dir: upgrading +Topics: +- Name: Upgrading OpenShift Dedicated + File: osd-upgrades + Distros: openshift-dedicated +--- +Name: Cluster notifications +Dir: osd_notifications +Distros: openshift-dedicated +Topics: +- Name: Notifications for OpenShift Dedicated clusters + File: notifications +--- +Name: Add-on services +Dir: adding_service_cluster +Distros: openshift-dedicated +Topics: +- Name: Adding services to a cluster + File: adding-service +- Name: Available services + File: available-services + Distros: openshift-dedicated +--- +Name: Configuring private connections +Dir: osd_private_connections +Distros: openshift-dedicated +Topics: +- Name: Configuring private connections for AWS + File: aws-private-connections +- Name: Configuring a private cluster + File: private-cluster +--- +Name: Networking +Dir: networking +Distros: openshift-dedicated +Topics: +- Name: OpenShift SDN default CNI network provider + Dir: openshift_sdn + Topics: + - Name: Enabling multicast for a project + File: enabling-multicast +--- +Name: Nodes +Dir: nodes +Distros: openshift-dedicated +Topics: +- Name: About machine pools + File: nodes-machinepools-about +- Name: About autoscaling nodes on a cluster + File: nodes-about-autoscaling-nodes +--- +Name: Monitoring user-defined projects +Dir: monitoring +Distros: openshift-dedicated +Topics: +- Name: Understanding the monitoring stack + File: osd-understanding-the-monitoring-stack +- Name: Configuring the monitoring stack + File: osd-configuring-the-monitoring-stack +- Name: Accessing monitoring for user-defined projects + File: osd-accessing-monitoring-for-user-defined-projects +- Name: Managing metrics + File: osd-managing-metrics +- Name: Managing alerts + File: osd-managing-alerts +- Name: Reviewing monitoring dashboards + File: osd-reviewing-monitoring-dashboards +- Name: Troubleshooting monitoring issues + File: osd-troubleshooting-monitoring-issues +--- +Name: Applications +Dir: applications +Distros: openshift-dedicated +Topics: +- Name: Deployments + Dir: deployments + Distros: openshift-dedicated + Topics: + - Name: Configuring custom domains for applications + File: osd-config-custom-domains-applications +--- +Name: Policies and service definition +Dir: osd_policy +Distros: openshift-dedicated +Topics: +- Name: OpenShift Dedicated service definition + File: osd-service-definition +- Name: Responsibility assignment matrix + File: policy-responsibility-matrix +- Name: Understanding process and security for OpenShift Dedicated + File: policy-process-security +- Name: About availability for OpenShift Dedicated + File: policy-understand-availability +- Name: Update life cycle + File: osd-life-cycle +--- +Name: Support +Dir: support +Distros: openshift-dedicated +Topics: +- Name: Getting support + File: getting-support + Distros: openshift-dedicated +- Name: Remote health monitoring with connected clusters + Dir: remote_health_monitoring + Distros: openshift-dedicated + Topics: + - Name: About remote health monitoring + File: about-remote-health-monitoring + - Name: Showing data collected by remote health monitoring + File: showing-data-collected-by-remote-health-monitoring + - Name: Opting out of remote health reporting + File: opting-out-of-remote-health-reporting + - Name: Using Insights to identify issues with your cluster + File: using-insights-to-identify-issues-with-your-cluster +- Name: Gathering data about your cluster + File: gathering-cluster-data + Distros: openshift-dedicated +- Name: Summarizing cluster specifications + File: summarizing-cluster-specifications + Distros: openshift-dedicated +- Name: OpenShift Dedicated managed resources + File: osd-managed-resources + Distros: openshift-dedicated diff --git a/_topic_maps/_topic_map_rosa.yml b/_topic_maps/_topic_map_rosa.yml new file mode 100644 index 0000000000..a34d0eaff0 --- /dev/null +++ b/_topic_maps/_topic_map_rosa.yml @@ -0,0 +1,259 @@ +# This configuration file dictates the organization of the topic groups and +# topics on the main page of the doc site for this branch. Each record +# consists of the following: +# +# --- <= Record delimiter +# Name: Origin of the Species <= Display name of topic group +# Dir: origin_of_the_species <= Directory name of topic group +# Topics: +# - Name: The Majestic Marmoset <= Topic name +# File: the_majestic_marmoset <= Topic file under group dir +/- +# - Name: The Curious Crocodile <= Topic 2 name +# File: the_curious_crocodile <= Topic 2 file +# - Name: The Numerous Nematodes <= Sub-topic group name +# Dir: the_numerous_nematodes <= Sub-topic group dir +# Topics: +# - Name: The Wily Worm <= Sub-topic name +# File: the_wily_worm <= Sub-topic file under / +# - Name: The Acrobatic Ascarid <= Sub-topic 2 name +# File: the_acrobatic_ascarid <= Sub-topic 2 file under / +# +# The ordering of the records in this document determines the ordering of the +# topic groups and topics on the main page. + +--- +Name: About +Dir: welcome +Distros: openshift-rosa +Topics: +- Name: Welcome + File: index +--- +Name: ROSA architecture +Dir: rosa_architecture +Distros: openshift-rosa +Topics: +- Name: Introduction to ROSA + File: rosa-understanding +- Name: Architecture models + File: rosa-architecture-models +- Name: Architecture concepts + File: rosa-basic-architecture-concepts +--- +Name: Planning, limits, and scalability +Dir: rosa_planning +Distros: openshift-rosa +Topics: +- Name: Limits and scalability + File: rosa-limits-scalability +- Name: Planning your environment + File: rosa-planning-environment +--- +Name: Setting up accounts and clusters +Dir: rosa_getting_started +Distros: openshift-rosa +Topics: +- Name: Getting started workflow + File: rosa-getting-started-workflow +- Name: AWS prerequisites for ROSA + File: rosa-aws-prereqs +- Name: Required AWS service quotas + File: rosa-required-aws-service-quotas +- Name: Configuring your AWS account + File: rosa-config-aws-account +- Name: Installing ROSA + File: rosa-installing-rosa +- Name: Creating a ROSA cluster + File: rosa-creating-cluster +- Name: Creating an AWS PrivateLink cluster on ROSA + File: rosa-aws-privatelink-creating-cluster +- Name: Accessing a ROSA cluster + File: rosa-accessing-cluster +- Name: Configuring identity providers using the OCM console + File: rosa-config-identity-providers +- Name: Deleting access to a ROSA cluster + File: rosa-deleting-access-cluster +- Name: Deleting a ROSA cluster + File: rosa-deleting-cluster +- Name: Command quick reference for creating clusters and users + File: rosa-quickstart +--- +Name: Setting up accounts and clusters using AWS security token service (STS) +Dir: rosa_getting_started_sts +Distros: openshift-rosa +Topics: +- Name: Getting started using STS workflow + File: rosa-sts-getting-started-workflow +- Name: AWS prerequisites for ROSA with STS + File: rosa-sts-aws-prereqs +- Name: Required AWS service quotas + File: rosa-sts-required-aws-service-quotas +- Name: Setting up the environment with STS + File: rosa-sts-setting-up-environment +- Name: Creating a ROSA cluster with STS + Dir: rosa_creating_a_cluster_with_sts + Topics: + - Name: Creating a ROSA cluster with STS quickly + File: rosa-sts-creating-a-cluster-quickly + - Name: Creating a ROSA cluster with STS using customizations + File: rosa-sts-creating-a-cluster-with-customizations + - Name: About IAM resources for ROSA with STS + File: rosa-sts-about-iam-resources + - Name: Interactive cluster creation mode reference + File: rosa-sts-interactive-mode-reference +- Name: Accessing a ROSA cluster + File: rosa-sts-accessing-cluster +- Name: Configuring identity providers using the OCM console + File: rosa-sts-config-identity-providers +- Name: Deleting access to a ROSA cluster + File: rosa-sts-deleting-access-cluster +- Name: Deleting a ROSA cluster + File: rosa-sts-deleting-cluster +--- +Name: Upgrading +Dir: upgrading +Topics: +- Name: Upgrading ROSA + File: rosa-upgrading + Distros: openshift-rosa +- Name: Upgrading ROSA with STS + File: rosa-upgrading-sts + Distros: openshift-rosa +--- +Name: Add-on services +Dir: adding_service_cluster +Distros: openshift-rosa +Topics: +- Name: Adding services to a cluster + File: adding-service +- Name: Available services + File: rosa-available-services + Distros: openshift-rosa +--- +Name: Networking +Dir: networking +Distros: openshift-rosa +Topics: +- Name: OpenShift SDN default CNI network provider + Dir: openshift_sdn + Topics: + - Name: Enabling multicast for a project + File: enabling-multicast +--- +Name: Nodes +Dir: nodes +Distros: openshift-rosa +Topics: +- Name: About machine pools + File: nodes-machinepools-about +- Name: Managing worker nodes + File: rosa-managing-worker-nodes + Distros: openshift-rosa +- Name: About autoscaling nodes on a cluster + File: nodes-about-autoscaling-nodes +--- +Name: Monitoring user-defined projects +Dir: monitoring +Distros: openshift-rosa +Topics: +- Name: Understanding the monitoring stack + File: osd-understanding-the-monitoring-stack +- Name: Configuring the monitoring stack + File: osd-configuring-the-monitoring-stack +- Name: Accessing monitoring for user-defined projects + File: osd-accessing-monitoring-for-user-defined-projects +- Name: Managing metrics + File: osd-managing-metrics +- Name: Managing alerts + File: osd-managing-alerts +- Name: Reviewing monitoring dashboards + File: osd-reviewing-monitoring-dashboards +- Name: Troubleshooting monitoring issues + File: osd-troubleshooting-monitoring-issues +--- +Name: Storage +Dir: storage +Distros: openshift-rosa +Topics: +- Name: Persistent storage + Dir: persistent_storage + Distros: openshift-rosa + Topics: + - Name: Persistent storage using AWS EFS + File: osd-persistent-storage-aws +--- +Name: Applications +Dir: applications +Distros: openshift-rosa +Topics: +- Name: Deployments + Dir: deployments + Distros: openshift-rosa + Topics: + - Name: Configuring custom domains for applications + File: osd-config-custom-domains-applications +--- +Name: Logging +Dir: logging +Distros: openshift-rosa +Topics: +- Name: Installing the cluster logging service + File: rosa-install-logging +- Name: Viewing cluster logs + File: rosa-viewing-logs +--- +Name: rosa CLI +Dir: rosa_cli +Distros: openshift-rosa +Topics: +- Name: Getting started with the rosa CLI + File: rosa-get-started-cli +- Name: Managing objects with the rosa CLI + File: rosa-manage-objects-cli +- Name: Checking account and version information with the rosa CLI + File: rosa-checking-acct-version-cli +- Name: Checking logs with the rosa cli + File: rosa-checking-logs-cli +--- +Name: Configuring private connections +Dir: cloud_infrastructure_access +Distros: openshift-rosa +Topics: +- Name: Configuring private connections + File: rosa-configuring-private-connections +- Name: Configuring AWS VPC peering + File: dedicated-aws-peering +- Name: Configuring AWS VPN + File: dedicated-aws-vpn +- Name: Configuring AWS Direct Connect + File: dedicated-aws-dc +- Name: Configuring a private cluster + File: rosa-private-cluster +--- +Name: Policies and service definition +Dir: rosa_policy +Distros: openshift-rosa +Topics: +- Name: ROSA service definition + File: rosa-service-definition +- Name: Responsibility assignment matrix + File: rosa-policy-responsibility-matrix +- Name: Understanding process and security for ROSA + File: rosa-policy-process-security +- Name: About availability for ROSA + File: rosa-policy-understand-availability +- Name: ROSA update life cycle + File: rosa-life-cycle +--- +Name: Support +Dir: rosa_support +Distros: openshift-rosa +Topics: +- Name: Support for ROSA + File: rosa-getting-support +- Name: Troubleshooting expired offline access tokens + File: rosa-troubleshooting-expired-tokens +- Name: Troubleshooting installations + File: rosa-troubleshooting-installations +- Name: Troubleshooting cluster deployments + File: rosa-troubleshooting-deployments diff --git a/adding_service_cluster/adding-service.adoc b/adding_service_cluster/adding-service.adoc new file mode 100644 index 0000000000..adcc72bca5 --- /dev/null +++ b/adding_service_cluster/adding-service.adoc @@ -0,0 +1,21 @@ +include::modules/attributes-openshift-dedicated.adoc[] +[id="adding-service"] += Adding services to a cluster using the OCM console +:context: adding-service + +toc::[] + +ifdef::openshift-rosa[] +== Prerequisites +* For the Amazon CloudWatch service, you must first install the `cluster-logging-operator` using the `rosa` CLI. +endif::[] + +include::modules/adding-service-existing.adoc[leveloffset=+1] +include::modules/access-service.adoc[leveloffset=+1] +include::modules/deleting-service.adoc[leveloffset=+1] +include::modules/deleting-service-cli.adoc[leveloffset=+1] + +ifdef::openshift-rosa[] +== Additional resources +* For information about the `cluster-logging-operator` and the AWS CloudWatch log forwarding service, see xref:../logging/rosa-install-logging.adoc#rosa-install-logging[Install the logging add-on service] +endif::[] diff --git a/adding_service_cluster/available-services.adoc b/adding_service_cluster/available-services.adoc new file mode 100644 index 0000000000..1272471ce2 --- /dev/null +++ b/adding_service_cluster/available-services.adoc @@ -0,0 +1,11 @@ +include::modules/attributes-openshift-dedicated.adoc[] +[id="available-services"] += Add-on services available for {product-title} +:context: available-services + +toc::[] + +You can add services to your existing {product-title} cluster using the xref:../adding_service_cluster/adding-service.adoc#adding-service[OpenShift Cluster Manager console]. + +include::modules/codeready-workspaces.adoc[leveloffset=+1] +include::modules/osd-rhoam.adoc[leveloffset=+1] diff --git a/adding_service_cluster/images b/adding_service_cluster/images new file mode 120000 index 0000000000..5e67573196 --- /dev/null +++ b/adding_service_cluster/images @@ -0,0 +1 @@ +../images \ No newline at end of file diff --git a/adding_service_cluster/modules b/adding_service_cluster/modules new file mode 120000 index 0000000000..464b823aca --- /dev/null +++ b/adding_service_cluster/modules @@ -0,0 +1 @@ +../modules \ No newline at end of file diff --git a/adding_service_cluster/rosa-available-services.adoc b/adding_service_cluster/rosa-available-services.adoc new file mode 100644 index 0000000000..acaa4edf24 --- /dev/null +++ b/adding_service_cluster/rosa-available-services.adoc @@ -0,0 +1,14 @@ +include::modules/attributes-openshift-dedicated.adoc[] +[id="rosa-available-services"] += Add-on services available for {product-title} +:context: rosa-available-services + + +You can add services to your existing {product-title} (ROSA) cluster using the xref:../adding_service_cluster/adding-service.adoc#adding-service[OpenShift Cluster Manager console]. + +These services can also be installed xref:../rosa_cli/rosa-manage-objects-cli.adoc#rosa-managing-objects-cli[using the `rosa` CLI]. + + +include::modules/aws-cloudwatch.adoc[leveloffset=+1] +include::modules/codeready-workspaces.adoc[leveloffset=+1] +include::modules/osd-rhoam.adoc[leveloffset=+1] diff --git a/administering_a_cluster/osd-admin-roles.adoc b/administering_a_cluster/osd-admin-roles.adoc new file mode 100644 index 0000000000..b9bef29a65 --- /dev/null +++ b/administering_a_cluster/osd-admin-roles.adoc @@ -0,0 +1,12 @@ +[id="osd-admin-roles"] += Managing administration roles and users +include::modules/attributes-openshift-dedicated.adoc[] +:context: osd-admin-roles + +toc::[] + +// TODO: needs intro + +include::modules/understanding-admin-roles.adoc[leveloffset=+1] + +include::modules/managing-dedicated-administrators.adoc[leveloffset=+1] diff --git a/applications/deployments/osd-config-custom-domains-applications.adoc b/applications/deployments/osd-config-custom-domains-applications.adoc new file mode 100644 index 0000000000..7e2cc7fde3 --- /dev/null +++ b/applications/deployments/osd-config-custom-domains-applications.adoc @@ -0,0 +1,8 @@ +[id="osd-config-custom-domains-applications"] += Configuring custom domains for applications +include::modules/attributes-openshift-dedicated.adoc[] +:context: osd-config-custom-domains-applications + +toc::[] + +include::modules/osd-applications-config-custom-domains.adoc[leveloffset=+1] diff --git a/cloud_infrastructure_access/dedicated-aws-peering.adoc b/cloud_infrastructure_access/dedicated-aws-peering.adoc index 4210a1654e..95f8c43eec 100644 --- a/cloud_infrastructure_access/dedicated-aws-peering.adoc +++ b/cloud_infrastructure_access/dedicated-aws-peering.adoc @@ -14,9 +14,12 @@ guide. include::modules/dedicated-aws-vpc-peering-terms.adoc[leveloffset=+1] include::modules/dedicated-aws-vpc-initiating-peering.adoc[leveloffset=+1] + +ifdef::openshift-dedicated[] .Additional resources -* xref:../cloud_infrastructure_access/dedicated-aws-access.adoc#dedicated-aws-ocm-iam-role[Logging into the Web Console for the OSD AWS Account] +* xref:../cloud_infrastructure_access/dedicated-aws-access.adoc#dedicated-aws-ocm-iam-role[Logging into the Web Console for the AWS Account] +endif::[] include::modules/dedicated-aws-vpc-accepting-peering.adoc[leveloffset=+1] include::modules/dedicated-aws-vpc-configuring-routing-tables.adoc[leveloffset=+1] diff --git a/cloud_infrastructure_access/rosa-configuring-private-connections.adoc b/cloud_infrastructure_access/rosa-configuring-private-connections.adoc new file mode 100644 index 0000000000..120333cae9 --- /dev/null +++ b/cloud_infrastructure_access/rosa-configuring-private-connections.adoc @@ -0,0 +1,19 @@ +[id="rosa-configuring-private-connections"] += Configuring private connections +include::modules/attributes-openshift-dedicated.adoc[] +:context: rosa-configuring-private-connections + +toc::[] + +Private cluster access can be implemented to suit the needs of your {product-title} (ROSA) environment. + +.Procedure +. Access your ROSA AWS account and use one or more of the following methods to establish a private connection to your cluster: + +- xref:../cloud_infrastructure_access/dedicated-aws-peering.adoc#dedicated-aws-peering[Configuring AWS VPC peering]: Enable VPC peering to route network traffic between two private IP addresses. + +- xref:../cloud_infrastructure_access/dedicated-aws-vpn.adoc#dedicated-aws-vpn[Configuring AWS VPN]: Establish a Virtual Private Network to securely connect your private network to your Amazon Virtual Private Cloud. + +- xref:../cloud_infrastructure_access/dedicated-aws-dc.adoc#dedicated-aws-dc[Configuring AWS Direct Connect]: Configure AWS Direct Connect to establish a dedicated network connection between your private network and an AWS Direct Connect location. + +. xref:../cloud_infrastructure_access/rosa-private-cluster.adoc#rosa-private-cluster[Configure a private cluster on ROSA]. diff --git a/cloud_infrastructure_access/rosa-private-cluster.adoc b/cloud_infrastructure_access/rosa-private-cluster.adoc new file mode 100644 index 0000000000..4091729e3f --- /dev/null +++ b/cloud_infrastructure_access/rosa-private-cluster.adoc @@ -0,0 +1,24 @@ +[id="rosa-private-cluster"] += Configuring a private cluster +include::modules/attributes-openshift-dedicated.adoc[] +:context: rosa-private-cluster + +toc::[] + +A {product-title} cluster can be made private so that internal applications can be hosted inside a corporate network. In addition, private clusters can be configured to have only internal API endpoints for increased security. + +// {product-title} administrators can choose between public and private cluster configuration from within the *OpenShift Cluster Manager* (OCM). + +Privacy settings can be configured during cluster creation or after a cluster is established. +//// +[NOTE] +==== +Red Hat Service Reliability Engineers (SREs) can access a public or private cluster through the `cloud-ingress-operator` and existing ElasticSearch Load Balancer or Amazon S3 framework. SREs can access clusters through a secure endpoint to perform maintenance and service tasks. +==== +//// +include::modules/rosa-enable-private-cluster-new.adoc[leveloffset=+1] +include::modules/rosa-enable-private-cluster-existing.adoc[leveloffset=+1] + +== Additional resources + +* xref:../rosa_getting_started/rosa-aws-privatelink-creating-cluster.adoc#rosa-aws-privatelink-creating-cluster[Creating an AWS PrivateLink cluster on ROSA] diff --git a/identity_providers/config-identity-providers.adoc b/identity_providers/config-identity-providers.adoc new file mode 100644 index 0000000000..d447337db6 --- /dev/null +++ b/identity_providers/config-identity-providers.adoc @@ -0,0 +1,17 @@ +include::modules/attributes-openshift-dedicated.adoc[] +[id="config-identity-providers"] += Configuring identity providers +:context: config-identity-providers + +toc::[] + +After your {product-title} cluster is created, you must configure identity providers to determine how users log in to access the cluster. + +include::modules/understanding-idp.adoc[leveloffset=+1] +include::modules/config-github-idp.adoc[leveloffset=+1] +include::modules/config-gitlab-idp.adoc[leveloffset=+1] +include::modules/config-google-idp.adoc[leveloffset=+1] +include::modules/config-ldap-idp.adoc[leveloffset=+1] +include::modules/config-openid-idp.adoc[leveloffset=+1] +include::modules/config-htpasswd-idp.adoc[leveloffset=+1] +include::modules/access-cluster.adoc[leveloffset=+1] diff --git a/identity_providers/images b/identity_providers/images new file mode 120000 index 0000000000..5e67573196 --- /dev/null +++ b/identity_providers/images @@ -0,0 +1 @@ +../images \ No newline at end of file diff --git a/identity_providers/modules b/identity_providers/modules new file mode 120000 index 0000000000..464b823aca --- /dev/null +++ b/identity_providers/modules @@ -0,0 +1 @@ +../modules \ No newline at end of file diff --git a/images/156_OpenShift_ROSA_Arch_0621_arch.svg b/images/156_OpenShift_ROSA_Arch_0621_arch.svg new file mode 100644 index 0000000000..bcbd1c0634 --- /dev/null +++ b/images/156_OpenShift_ROSA_Arch_0621_arch.svg @@ -0,0 +1 @@ +Control plane nodes (x3)apiserveretcdcontrollerWorker nodes (xN)Compute (xN)Persistent storagePublicnetworkPrivate networkInternal(API) NLBRed Hat(Console) ELBExternal/internalApp ELBRed Hat(API) ELBExternal/internal(API) NLBRoute53DNSInfra nodes (x2, x3)registryroutermonitoringAWS VPCAvailability zone(x1, x3)Availability zone(x1, x3)Availability zone(x1, x3)InternetRed HatManagementDeveloper156_OpenShift_0621 \ No newline at end of file diff --git a/images/156_OpenShift_ROSA_Arch_0621_privatelink.svg b/images/156_OpenShift_ROSA_Arch_0621_privatelink.svg new file mode 100644 index 0000000000..cf32299f8c --- /dev/null +++ b/images/156_OpenShift_ROSA_Arch_0621_privatelink.svg @@ -0,0 +1,322 @@ + + + + + + Private network + + + + + + + + + + + Route53DNS + + + + + + + + + + + + + + + + + + + + + Developer + + + + + + + Red HatManagement + + + + + + + PrivateLink + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Control plane nodes (x3) + + apiserver + + etcd + + controller + + + + + + + + + + + + Worker nodes (xN) + + Compute (xN) + + Persistent storage + + Internal(API) NLB + + + + + + + + + + + + + Red Hat(Console) ELB + + + + + + + + + + + + + External/internalApp ELB + + + + + + + + + + + + + + + + + + + + + + + + + + + Red Hat(API) ELB + + + + + + + + + + + + + External/internal(API) NLB + + + + + + + + + + + + + + + + + + + + + + + + + + + + Infra nodes (x2, x3) + + registry + + router + + monitoring + + AWS VPC + + + + + + Availability zone(x1, x3) + Availability zone(x1, x3) + Availability zone(x1, x3) + + 156_OpenShift_0621 + + + diff --git a/images/osd-monitoring-architecture.svg b/images/osd-monitoring-architecture.svg new file mode 100644 index 0000000000..9a648fdc37 --- /dev/null +++ b/images/osd-monitoring-architecture.svg @@ -0,0 +1 @@ +DeployDeployQueriesInstalled by defaultPrometheusOperatorNEPAGrafanaKSMOSMTelemeterClientDeployDeployAlertsDeployUserAlertmanagerOpenShiftProjectsUser-DefinedProjectsAlertsThanos RulerPrometheusThanosQuerierQueriesPrometheusPrometheusOperatorPlatform118_OpenShift_0920DeployDeployClusterMonitoringOperatorClusterVersionOperator \ No newline at end of file diff --git a/logging/rosa-install-logging.adoc b/logging/rosa-install-logging.adoc new file mode 100644 index 0000000000..24d68a41ab --- /dev/null +++ b/logging/rosa-install-logging.adoc @@ -0,0 +1,30 @@ +include::modules/attributes-openshift-dedicated.adoc[] +[id="rosa-install-logging"] += Installing logging add-on services +:context: rosa-install-logging +toc::[] + +This section describes how to install the logging add-on and Amazon Web Services (AWS) CloudWatch log forwarding add-on services on {product-title} (ROSA). + +The AWS CloudWatch log forwarding service on ROSA has the following approximate log throughput rates. Message rates greater than these can result in dropped log messages. + +.Approximate log throughput rates +[cols="30,70"] +|=== +|Message size (bytes) |Maximum expected rate (messages/second/node) + +|512 +|1,000 + +|1,024 +|650 + +|2,048 +|450 +|=== + +include::modules/rosa-install-logging-addon.adoc[leveloffset=+1] + +[id="additional-resources_adding-service"] +== Additional resources +* xref:../adding_service_cluster/adding-service.adoc#adding-service[Adding services to your cluster] diff --git a/logging/rosa-viewing-logs.adoc b/logging/rosa-viewing-logs.adoc new file mode 100644 index 0000000000..5c09a3152c --- /dev/null +++ b/logging/rosa-viewing-logs.adoc @@ -0,0 +1,9 @@ +include::modules/attributes-openshift-dedicated.adoc[] +[id="rosa-viewing-logs"] += Viewing cluster logs +:context: rosa-viewing-logs +toc::[] + +View forwarded cluster logs in the AWS console. + +include::modules/rosa-view-cloudwatch-logs.adoc[leveloffset=+1] diff --git a/modules/access-cluster.adoc b/modules/access-cluster.adoc new file mode 100644 index 0000000000..16cce695dc --- /dev/null +++ b/modules/access-cluster.adoc @@ -0,0 +1,27 @@ +// Module included in the following assemblies: +// +// * assemblies/accessing-cluster.adoc +// * assemblies/quickstart-osd.adoc + +[id="access-cluster_{context}"] += Accessing your cluster + + +After you have configured your identity providers, users can access the cluster from the {OCM}. + +.Prerequisites + +* You have created a cluster. +* Identity providers have been configured for your cluster. + +.Procedure + +. From {cloud-redhat-com}, click on the cluster you want to access. + +. Click *Open Console*. + +. Click on your identity provider and provide your credentials to log into the cluster. + +.Verification + +* After you have accessed the cluster, you are directed to the console for your {product-title} cluster. diff --git a/modules/access-service.adoc b/modules/access-service.adoc new file mode 100644 index 0000000000..ca8bd2f50d --- /dev/null +++ b/modules/access-service.adoc @@ -0,0 +1,34 @@ +// Module included in the following assemblies: +// +// * assemblies/adding-service.adoc + +[id="access-service_{context}"] + += Accessing installed services on your cluster + +// Should this be the OCM instead of "OpenShift console"? +After you successfully install a service on your cluster, you can access the service through the OpenShift console. + + +.Prerequisites + +* You have successfully installed a service on your {product-title} cluster. + + +.Procedure + +. Navigate to the *Clusters* page in link:https://cloud.redhat.com/openshift/[OpenShift Cluster Manager (OCM)]. + +. Select the cluster with an installed service you want to access. + +. Navigate to the *Add-ons* tab, and locate the installed service that you want to access. + +. Click *View on console* from the service option to open the OpenShift console. + +. Enter your credentials to log in to the OpenShift console. + +. Click the *Red Hat Applications* menu by clicking the three-by-three matrix icon in the upper right corner of the main screen. + +. Select the service you want to open from the drop-down menu. A new browser tab opens and you are required to authenticate through Red Hat Single Sign-On. + +You have now accessed your service and can begin using it. diff --git a/modules/add-user.adoc b/modules/add-user.adoc new file mode 100644 index 0000000000..47b1f5c59b --- /dev/null +++ b/modules/add-user.adoc @@ -0,0 +1,25 @@ +// Module included in the following assemblies: +// +// * assemblies/quickstart-osd.adoc + +[id="add-user_{context}"] += Adding a user + + +Administrator roles are managed using a `dedicated-admins` group on the cluster. You can add and remove users from OpenShift Cluster Manager (OCM). + +.Procedure + +. Navigate to the *Clusters* page and select the cluster you want to add users to. + +. Click the *Access control* tab. + +. Under the *Cluster administrative users* heading, click *Add User*. + +. Enter the user ID you want to add. + +. Click *Add user*. + +.Verification + +* You now see the user listed under the *Cluster administrative users* heading. diff --git a/modules/adding-service-existing.adoc b/modules/adding-service-existing.adoc new file mode 100644 index 0000000000..d98fd6b1a9 --- /dev/null +++ b/modules/adding-service-existing.adoc @@ -0,0 +1,40 @@ +// Module included in the following assemblies: +// +// * assemblies/adding-service.adoc + +[id="adding-service-existing_{context}"] + += Adding a service to a cluster + + +You can add a service to an existing {product-title} cluster through the OpenShift Cluster Manager (OCM). + + +.Prerequisites + +* You have created and provisioned a cluster for {product-title}. +* Your cluster meets all of the prerequisites for the service that you want to add on to your cluster. +* For paid add-on services, note the following considerations: +** If the organization has sufficient quota, and if the service is compatible with the cluster, the service appears in OCM. +** If the organization has never had quota, or if the cluster is not compatible, then the service does not display. +** If the organization had quota in the past, but the quota is currently `0`, the service is still visible but disabled in OCM until you get more quota. + +// TODO: Could this just be one of the above prereqs instead of its own NOTE? +[NOTE] +==== +To add a service to a cluster, you must be the cluster owner. +==== + +.Procedure + +. Navigate to the *Clusters* page in link:https://cloud.redhat.com/openshift/[OCM]. + +. Select the cluster you want to add a service to. + +. Click the *Add-ons* tab. + +. Click the service option you want to add, click *Install*. An installing icon appears, indicating that the service has begun installing. ++ +A green check mark appears in the service option when the installation is complete. You might have to refresh your browser to see the installation status. + +. When the service is *Installed*, click *View in console* to access the service. diff --git a/modules/attributes-openshift-dedicated.adoc b/modules/attributes-openshift-dedicated.adoc new file mode 100644 index 0000000000..5f45e798a4 --- /dev/null +++ b/modules/attributes-openshift-dedicated.adoc @@ -0,0 +1,12 @@ +// common attributes +:product-short-name: OpenShift Dedicated +:toc: +:toc-title: +:experimental: +:imagesdir: images +:OCP: OpenShift Container Platform +:OCM: OpenShift Cluster Manager (OCM) +:cloud-redhat-com: link:https://cloud.redhat.com/openshift[OpenShift Cluster Manager (OCM)] +:AWS: Amazon Web Services (AWS) +:GCP: Google Cloud Platform (GCP) +:kebab: image:kebab.png[title="Options menu"] diff --git a/modules/aws-cloudwatch.adoc b/modules/aws-cloudwatch.adoc new file mode 100644 index 0000000000..d311c17fd3 --- /dev/null +++ b/modules/aws-cloudwatch.adoc @@ -0,0 +1,14 @@ +// Module included in the following assemblies: +// +// * adding_service_cluster/rosa-available-services.adoc + +[id="aws-cloudwatch_{context}"] + += Amazon CloudWatch + +Amazon CloudWatch forwards logs from {product-title} (ROSA) to the AWS console for viewing. You must first install the ROSA `cluster-logging-operator` using the `rosa` CLI before installing the Amazon CloudWatch service through the OpenShift Cluster Manager (OCM) console. + +.Additional resources + +* See xref:../logging/rosa-install-logging.adoc#rosa-install-logging[Install the logging add-on service] for information about the Amazon CloudWatch log forwarding service. +* See link:https://aws.amazon.com/cloudwatch/[Amazon CloudWatch product information] for general details about Amazon Cloudwatch. diff --git a/modules/aws-direct-connect.adoc b/modules/aws-direct-connect.adoc new file mode 100644 index 0000000000..2891c3dfd6 --- /dev/null +++ b/modules/aws-direct-connect.adoc @@ -0,0 +1,40 @@ +// Module included in the following assemblies: +// +// * assemblies/aws-private-connections.adoc + +[id="aws-direct-connect_{context}"] += Configuring AWS Direct Connect + + +{AWS} Direct Connect requires a hosted Virtual Interface (VIF) connected to a Direct Connect Gateway (DXGateway), which is in turn associated to a Virtual Gateway (VGW) or a Transit Gateway in order to access a remote Virtual Private Cloud (VPC) in the same or another account. + +If you do not have an existing DXGateway, the typical process involves creating the hosted VIF, with the DXGateway and VGW being created in your AWS account. + +If you have an existing DXGateway connected to one or more existing VGWs, the process involves your AWS account sending an Association Proposal to the DXGateway owner. The DXGateway owner must ensure that the proposed CIDR will not conflict with any other VGWs they have associated. + +.Prerequisites + +* Confirm the CIDR range of the {product-title} VPC will not conflict with any other VGWs you have associated. +* Gather the following information: +** The Direct Connect Gateway ID. +** The AWS Account ID associated with the virtual interface. +** The BGP ASN assigned for the DXGateway. Optional: the Amazon default ASN may also be used. + +.Procedure + +. link:https://docs.aws.amazon.com/directconnect/latest/UserGuide/create-vif.html[Create a VIF] or link:https://docs.aws.amazon.com/directconnect/latest/UserGuide/viewvifdetails.html[view your existing VIFs] to determine the type of direct connection you need to create. + +. Create your gateway. +.. If the Direct Connect VIF type is *Private*, link:https://docs.aws.amazon.com/directconnect/latest/UserGuide/virtualgateways.html#create-virtual-private-gateway[create a virtual private gateway]. +.. If the Direct Connect VIF is *Public*, link:https://docs.aws.amazon.com/directconnect/latest/UserGuide/direct-connect-gateways-intro.html#create-direct-connect-gateway[create a Direct Connect gateway]. + +. If you have an existing gateway you want to use, link:https://docs.aws.amazon.com/directconnect/latest/UserGuide/multi-account-associate-vgw.html[create an association proposal] and send the proposal to the DXGateway owner for approval. ++ +[WARNING] +==== +When connecting to an existing DXGateway, you are responsible for the link:https://aws.amazon.com/directconnect/pricing/[costs]. +==== + +.Additional resources + +* For more information and troubleshooting help, see the link:https://docs.aws.amazon.com/directconnect/latest/UserGuide/Welcome.html[AWS Direct Connect] guide. diff --git a/modules/aws-limits.adoc b/modules/aws-limits.adoc new file mode 100644 index 0000000000..9431afa210 --- /dev/null +++ b/modules/aws-limits.adoc @@ -0,0 +1,85 @@ +// Module included in the following assemblies: +// +// * assemblies/config-aws-account.adoc + +[id="aws-limits_{context}"] += AWS account limits + + +The {product-title} cluster uses a number of Amazon Web Services (AWS) components, and the default link:https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html[service limits] affect your ability to install {product-title} clusters. If you use certain cluster configurations, deploy your cluster in certain AWS regions, or run multiple clusters from your account, you might need to request additional resources for your AWS account. + +The following table summarizes the AWS components whose limits can impact your ability to install and run {product-title} clusters. + + +[cols="3a,3a,3a,8a",options="header"] +|=== +|Component |Number of clusters available by default| Default AWS limit |Description + +|Instance Limits +|Varies +|Varies +|By default, each cluster creates the following instances: + +* One bootstrap machine, which is removed after installation +* Three master nodes +* Three worker nodes + +These instance type counts are within a new account's default limit. To deploy more worker nodes, enable autoscaling, deploy large workloads, or use a different instance type, review your account limits to ensure that your cluster can deploy the machines that you need. + +In most regions, the bootstrap and worker machines uses an `m4.large` machines and the master machines use `m4.xlarge` instances. In some regions, including all regions that do not support these instance types, `m5.large` and `m5.xlarge` instances are used instead. + +|Elastic IPs (EIPs) +|0 to 1 +|5 EIPs per account +|To provision the cluster in a highly available configuration, the installation program creates a public and private subnet for each link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html[availability zone within a region]. Each private subnet requires a link:https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html[NAT Gateway], and each NAT gateway requires a separate +link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ipaddresses-eip.html[elastic IP]. Review the link:https://aws.amazon.com/about-aws/global-infrastructure/[AWS region map] to determine how many availability zones are in each region. To take advantage of the default high availability, install the cluster in a region with at least three availability zones. To install a cluster in a region with more than five availability zones, you must increase the EIP limit. + +// TODO: The above elastic IP link is redirected. Find new link. Is it https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html ? + +[IMPORTANT] +==== +To use the `us-east-1` region, you must increase the EIP limit for your account. +==== + +|Virtual Private Clouds (VPCs) +|5 +|5 VPCs per region +|Each cluster creates its own VPC. + +|Elastic Load Balancing (ELB/NLB) +|3 +|20 per region +|By default, each cluster creates internal and external network load balancers for the master API server and a single classic elastic load balancer for the router. Deploying more Kubernetes LoadBalancer Service objects will create additional link:https://aws.amazon.com/elasticloadbalancing/[load balancers]. + + +|NAT Gateways +|5 +|5 per availability zone +|The cluster deploys one NAT gateway in each availability zone. + +|Elastic Network Interfaces (ENIs) +|At least 12 +|350 per region +|The default installation creates 21 ENIs and an ENI for each availability zone in your region. For example, the `us-east-1` region contains six availability zones, so a cluster that is deployed in that zone uses 27 ENIs. Review the link:https://aws.amazon.com/about-aws/global-infrastructure/[AWS region map] to determine how many availability zones are in each region. + +Additional ENIs are created for additional machines and elastic load balancers that are created by cluster usage and deployed workloads. + +|VPC Gateway +|20 +|20 per account +|Each cluster creates a single VPC Gateway for S3 access. + + +|S3 buckets +|99 +|100 buckets per account +|Because the installation process creates a temporary bucket and the registry component in each cluster creates a bucket, you can create only 99 {product-title} clusters per AWS account. + +|Security Groups +|250 +|2,500 per account +|Each cluster creates 10 distinct security groups. + | Fail, optionally surfacing response body to the user +|=== + +// TODO: what is this random text/cell on line 82^? diff --git a/modules/aws-vpc.adoc b/modules/aws-vpc.adoc new file mode 100644 index 0000000000..aaa42c5585 --- /dev/null +++ b/modules/aws-vpc.adoc @@ -0,0 +1,38 @@ +// Module included in the following assemblies: +// +// * assemblies/aws-private-connections.adoc + +[id="aws-vpc_{context}"] += Configuring AWS VPC peering + + +A Virtual Private Cloud (VPC) peering connection is a networking connection between two VPCs that enables you to route traffic between them using private IPv4 addresses or IPv6 addresses. You can configure an {AWS} VPC containing an {product-title} cluster to peer with another AWS VPC network. + +[WARNING] +==== +Private clusters cannot be fully deleted by the {OCM} if the VPC the cluster is installed in is peered. + +AWS supports inter-region VPC peering between all commercial regions link:https://aws.amazon.com/vpc/faqs/#Peering_Connections[excluding China]. +==== + +.Prerequisites + +* Gather the following information about the Customer VPC that is required to initiate the peering request: +** Customer AWS account number +** Customer VPC ID +** Customer VPC Region +** Customer VPC CIDR +* Check the CIDR block used by the {product-title} Cluster VPC. If it overlaps or matches the CIDR block for the Customer VPC, then peering between these two VPCs is not possible; see the Amazon VPC link:https://docs.aws.amazon.com/vpc/latest/peering/invalid-peering-configurations.html[Unsupported VPC peering configurations] documentation for details. If the CIDR blocks do not overlap, you can proceed with the procedure. + +.Procedure + +. link:https://docs.aws.amazon.com/vpc/latest/peering/create-vpc-peering-connection.html#create-vpc-peering-connection-local[Initiate the VPC peering request]. + +. link:https://docs.aws.amazon.com/vpc/latest/peering/create-vpc-peering-connection.html#accept-vpc-peering-connection[Accept the VPC peering request]. + +. link:https://docs.aws.amazon.com/vpc/latest/peering/vpc-peering-routing.html[Update your Route tables for the VPC peering connection]. + + +.Additional resources + +* For more information and troubleshooting help, see the link:https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html[AWS VPC] guide. diff --git a/modules/aws-vpn.adoc b/modules/aws-vpn.adoc new file mode 100644 index 0000000000..4b6e450d38 --- /dev/null +++ b/modules/aws-vpn.adoc @@ -0,0 +1,45 @@ +// Module included in the following assemblies: +// +// * assemblies/aws-private-connections.adoc + +[id="aws-vpn_{context}"] += Configuring an AWS VPN + + +You can configure an {AWS} {product-title} cluster to use a customer’s on-site hardware Virtual Private Network (VPN) device. By default, instances that you launch into an AWS Virtual Private Cloud (VPC) cannot communicate with your own (remote) network. You can enable access to your remote network from your VPC by creating an AWS Site-to-Site VPN connection, and configuring routing to pass traffic through the connection. + +[NOTE] +==== +AWS VPN does not currently provide a managed option to apply NAT to VPN traffic. See the link:https://aws.amazon.com/premiumsupport/knowledge-center/configure-nat-for-vpn-traffic/[AWS Knowledge Center] for more details. + +Routing all traffic, for example `0.0.0.0/0`, through a private connection is not supported. This requires deleting the internet gateway, which disables SRE management traffic. +==== + +.Prerequisites + +* Hardware VPN gateway device model and software version, for example Cisco ASA running version 8.3. See the link:https://docs.aws.amazon.com/vpc/latest/adminguide/Introduction.html#DevicesTested[AWS documentation] to confirm whether your gateway device is supported by AWS. +* Public, static IP address for the VPN gateway device. +* BGP or static routing: if BGP, the ASN is required. If static routing, you must +configure at least one static route. +* Optional: IP and port/protocol of a reachable service to test the VPN connection. + +.Procedure + +. link:https://docs.aws.amazon.com/vpn/latest/s2svpn/SetUpVPNConnections.html#vpn-create-cgw[Create a customer gateway] to configure the VPN connection. + +. If you do not already have a Virtual Private Gateway attached to the intended VPC, link:https://docs.aws.amazon.com/vpn/latest/s2svpn/SetUpVPNConnections.html#vpn-create-target-gateway[create and attach] a Virtual Private Gateway. + +. link:https://docs.aws.amazon.com/vpn/latest/s2svpn/SetUpVPNConnections.html#vpn-configure-route-tables[Configure routing and enable VPN route propagation]. + +. link:https://docs.aws.amazon.com/vpn/latest/s2svpn/SetUpVPNConnections.html#vpn-configure-security-groups[Update your security group]. + +. link:https://docs.aws.amazon.com/vpn/latest/s2svpn/SetUpVPNConnections.html#vpn-create-vpn-connection[Establish the Site-to-Site VPN connection]. ++ +[NOTE] +==== +Note the VPC subnet information, which you must add to your configuration as the remote network. +==== + +.Additional resources + +* For more information and troubleshooting help, see the link:https://docs.aws.amazon.com/vpn/latest/s2svpn/VPC_VPN.html[AWS VPN] guide. diff --git a/modules/ccs-aws-customer-procedure.adoc b/modules/ccs-aws-customer-procedure.adoc new file mode 100644 index 0000000000..16784e0028 --- /dev/null +++ b/modules/ccs-aws-customer-procedure.adoc @@ -0,0 +1,24 @@ +// Module included in the following assemblies: +// +// * assemblies/aws-ccs.adoc + +[id="ccs-aws-customer-procedure_{context}"] += Required customer procedure +// TODO: Better procedure heading that tells you what this is doing + +The Customer Cloud Subscription (CCS) model allows Red Hat to deploy and manage {product-title} into a customer’s Amazon Web Services (AWS) account. Red Hat requires several prerequisites in order to provide these services. + +.Procedure + +. If the customer is using AWS Organizations, you must either use an AWS account within your organization or link:https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html#orgs_manage_accounts_create-new[create a new one]. + +. To ensure that Red Hat can perform necessary actions, you must either create a Service Control Policy (SCP) or ensure that none is applied to the AWS account. + +. link:https://docs.aws.amazon.com/organizations/latest/userguide/orgs_introduction.html[Attach] the SCP to the AWS account. + +. Within the AWS account, you must link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html[create] an `osdCcsAdmin` IAM user with the following requirements: +** This user needs at least *Programmatic access* enabled. +** This user must have the `AdministratorAccess` policy attached to it. + +. Provide the IAM user credentials to Red Hat. +** You must provide the *access key ID* and *secret access key* in the {cloud-redhat-com}. diff --git a/modules/ccs-aws-customer-requirements.adoc b/modules/ccs-aws-customer-requirements.adoc new file mode 100644 index 0000000000..3fe3e4fc2c --- /dev/null +++ b/modules/ccs-aws-customer-requirements.adoc @@ -0,0 +1,72 @@ +// Module included in the following assemblies: +// +// * assemblies/assembly.adoc + +[id="ccs-aws-customer-requirements_{context}"] += Customer requirements + + +{product-title} clusters using a Customer Cloud Subscription (CCS) model on Amazon Web Services (AWS) must meet several prerequisites before they can be deployed. + +[id="ccs-requirements-account_{context}"] +== Account + +* The customer ensures that link:https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html[AWS limits] are sufficient to support {product-title} provisioned within the customer-provided AWS account. + +* The customer-provided AWS account should be in the customer's AWS Organization with the applicable Service Control Policy (SCP) applied. ++ +[NOTE] +==== +It is not a requirement that the customer-provided account be within an AWS Organization or for the SCP to be applied, however Red Hat must be able to perform all the actions listed in the SCP without restriction. +==== + +* The customer-provided AWS account must not be transferable to Red Hat. + +* The customer may not impose AWS usage restrictions on Red Hat activities. Imposing restrictions severely hinders Red Hat's ability to respond to incidents. + +* Red Hat deploys monitoring into AWS to alert Red Hat when a highly privileged account, such as a root account, logs into the customer-provided AWS account. + +* The customer can deploy native AWS services within the same customer-provided AWS account. ++ +[NOTE] +==== +Customers are encouraged, but not mandated, to deploy resources in a Virtual Private Cloud (VPC) separate from the VPC hosting {product-title} and other Red Hat supported services. +==== + +[id="ccs-requirements-access_{context}"] +== Access requirements + +* To appropriately manage the {product-title} service, Red Hat must have the `AdministratorAccess` policy applied to the administrator role at all times. ++ +[NOTE] +==== +This policy only provides Red Hat with permissions and capabilities to change resources in the customer-provided AWS account. +==== + +* Red Hat must have AWS console access to the customer-provided AWS account. This access is protected and managed by Red Hat. + +* The customer must not utilize the AWS account to elevate their permissions within the {product-title} cluster. + +* Actions available in the {cloud-redhat-com} must not be directly performed in the customer-provided AWS account. + +[id="ccs-requirements-support_{context}"] +== Support requirements + +* Red Hat recommends that the customer have at least link:https://aws.amazon.com/premiumsupport/plans/[Business Support] from AWS. + +* Red Hat has authority from the customer to request AWS support on their behalf. + +* Red Hat has authority from the customer to request AWS resource limit increases on the customer-provided account. + +* Red Hat manages the restrictions, limitations, expectations, and defaults for all {product-title} clusters in the same manner, unless otherwise specified in this requirements section. + +[id="ccs-requirements-security_{context}"] +== Security requirements + +* The customer-provided IAM credentials must be unique to the customer-provided AWS account and must not be stored anywhere in the customer-provided AWS account. + +* Volume snapshots will remain within the customer-provided AWS account and customer-specified region. + +* Red Hat must have ingress access to EC2 hosts and the API server through white-listed Red Hat machines. + +* Red Hat must have egress allowed to forward system and audit logs to a Red Hat managed central logging stack. diff --git a/modules/ccs-aws-iam.adoc b/modules/ccs-aws-iam.adoc new file mode 100644 index 0000000000..b64eb69d51 --- /dev/null +++ b/modules/ccs-aws-iam.adoc @@ -0,0 +1,117 @@ +// Module included in the following assemblies: +// +// * assemblies/aws-ccs.adoc + +[id="ccs-aws-iam_{context}"] += Red Hat managed IAM references for AWS + +Red Hat is responsible for creating and managing the following Amazon Web Services (AWS) resources: IAM policies, IAM users, and IAM roles. + +[id="aws-policy-iam-policies_{context}"] +== IAM policies + +[NOTE] +==== +IAM policies are subject to modification as the capabilities of {product-title} change. +==== + +* The `AdministratorAccess` policy is used by the administration role. This policy provides Red Hat the access necessary to administer the {product-title} cluster in the customer-provided AWS account. ++ +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": "*", + "Resource": "*", + "Effect": "Allow" + } + ] +} +---- + +* The `CustomerAdministatorAccess` role provides the customer access to administer a subset of services within the AWS account. At this time, the following are allowed: + +** VPC Peering +** VPN Setup +** Direct Connect (only available if granted through the Service Control Policy) ++ +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVpnGateway", + "ec2:DescribeVpnConnections", + "ec2:AcceptVpcPeeringConnection", + "ec2:DeleteVpcPeeringConnection", + "ec2:DescribeVpcPeeringConnections", + "ec2:CreateVpnConnectionRoute", + "ec2:RejectVpcPeeringConnection", + "ec2:DetachVpnGateway", + "ec2:DeleteVpnConnectionRoute", + "ec2:DeleteVpnGateway", + "ec2:DescribeVpcs", + "ec2:CreateVpnGateway", + "ec2:ModifyVpcPeeringConnectionOptions", + "ec2:DeleteVpnConnection", + "ec2:CreateVpcPeeringConnection", + "ec2:DescribeVpnGateways", + "ec2:CreateVpnConnection", + "ec2:DescribeRouteTables", + "ec2:CreateTags", + "ec2:CreateRoute", + "directconnect:*" + ], + "Resource": "*" + } + ] +} +---- + + +* If enabled, the `BillingReadOnlyAccess` role provides read-only access to view billing and usage information for the account. ++ +Billing and usage access is only granted if the root account in the AWS Organization has it enabled. This is an optional step the customer must perform to enable read-only billing and usage access and does not impact the creation of this profile and the role that uses it. If this role is not enabled, users will not see billing and usage information. See this tutorial on link:https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_billing.html#tutorial-billing-step1[how to enable access to billing data]. ++ +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "aws-portal:ViewAccount", + "aws-portal:ViewBilling" + ], + "Resource": "*" + } + ] +} +---- + +[id="aws-policy-iam-users_{context}"] +== IAM users + +The `osdManagedAdmin` user is created immediately after taking control of the customer-provided AWS account. This is the user that will perform the {product-title} cluster installation. + +[id="aws-policy-iam-roles_{context}"] +== IAM roles + +* The `network-mgmt` role provides customer-federated administrative access to the AWS account through a separate AWS account. It also has the same access as a read-only role. The following policies are attached to the role: + +** AmazonEC2ReadOnlyAccess +** CustomerAdministratorAccess + +* The `read-only` role provides customer-federated read-only access to the AWS account through a separate AWS account. The following policies are attached to the role: + +** AWSAccountUsageReportAccess +** AmazonEC2ReadOnlyAccess +** AmazonS3ReadOnlyAccess +** IAMReadOnlyAccess +** BillingReadOnlyAccess diff --git a/modules/ccs-aws-provisioned.adoc b/modules/ccs-aws-provisioned.adoc new file mode 100644 index 0000000000..ec2f83bdcb --- /dev/null +++ b/modules/ccs-aws-provisioned.adoc @@ -0,0 +1,77 @@ +// Module included in the following assemblies: +// +// * assemblies/aws-ccs.adoc + +[id="ccs-aws-provisioned_{context}"] += Provisioned AWS Infrastructure + + +This is an overview of the provisioned Amazon Web Services (AWS) components on a deployed {product-title} cluster. For a more detailed listing of all provisioned AWS components, see the link:https://access.redhat.com/documentation/en-us/openshift_container_platform/[{OCP} documentation]. + +[id="aws-policy-ec2_{context}"] +== AWS Elastic Computing (EC2) instances + +AWS EC2 instances are required to deploy the control plane and data plane functions of {product-title} in the AWS public cloud. Instance types might vary for control plane and infrastructure nodes depending on worker node count. + +* Single availability zone +** 3 m5.2xlarge minimum (control plane nodes) +** 2 r5.xlarge minimum (infrastructure nodes) +** 2 m5.xlarge minimum but highly variable (worker nodes) + +* Multiple availability zones +** 3 m5.2xlarge minimum (control plane nodes) +** 3 r5.xlarge minimum (infrastructure nodes) +** 3 m5.xlarge minimum but highly variable (worker nodes) + +[id="aws-policy-ebs-storage_{context}"] +== AWS Elastic Block Store (EBS) storage + +Amazon EBS block storage is used for both local node storage and persistent volume storage. + +Volume requirements for each EC2 instance: + +- Control plane volumes +* Size: 350 GB +* Type: io1 +* Input/output operations per second: 1000 + +- Infrastructure volumes +* Size: 300 GB +* Type: gp2 +* Input/output operations per second: 100 + +- Worker volumes +* Size: 300 GB +* Type: gp2 +* Input/output operations per second: 100 + +[id="aws-policy-elastic-load-balancers_{context}"] +== Elastic load balancers + +Up to two Network Elastic Load Balancers (ELBs) for API and up to two Classic ELBs for application router. For more information, see the link:https://aws.amazon.com/elasticloadbalancing/features/#Details_for_Elastic_Load_Balancing_Products[ELB documentation for AWS]. + +[id="aws-policy-s3-storage_{context}"] +== S3 storage +The image registry and Elastic Block Store (EBS) volume snapshots are backed by AWS S3 storage. Pruning of resources is performed regularly to optimize S3 usage and cluster performance. + +[NOTE] +==== +Two buckets are required with a typical size of 2 TB each. +==== + +[id="aws-policy-vpc_{context}"] +== VPC +Customers should expect to see one VPC per cluster. Additionally, the VPC needs the following configurations: + +* *Subnets*: Two subnets for a cluster with a single availability zone, or six subnets for a cluster with multiple availability zones. + +* *Router tables*: One router table per private subnet, and one additional table per cluster. + +* *Internet gateways*: One Internet Gateway per cluster. + +* *NAT gateways*: One NAT Gateway per public subnet. + +[id="aws-policy-security-groups_{context}"] +== Security groups + +AWS security groups provide security at the protocol and port-access level; they are associated with EC2 instances and Elastic Load Balancing. Each security group contains a set of rules that filter traffic coming in and out of an EC2 instance. You must ensure the ports required for the link:https://docs.openshift.com/container-platform/4.7/installing/installing_aws/installing-aws-user-infra.html#installation-aws-user-infra-other-infrastructure_installing-aws-user-infra[{OCP} installation] are open on your network and configured to allow access between hosts. diff --git a/modules/ccs-aws-scp.adoc b/modules/ccs-aws-scp.adoc new file mode 100644 index 0000000000..c59dd77d48 --- /dev/null +++ b/modules/ccs-aws-scp.adoc @@ -0,0 +1,205 @@ +// Module included in the following assemblies: +// +// * assemblies/aws-ccs.adoc + +[id="ccs-aws-scp_{context}"] += Minimum required Service Control Policy (SCP) + + +Service Control Policy (SCP) management is the responsibility of the customer. These policies are maintained in the AWS Organization and control what services are available within the attached AWS accounts. + +[cols="2a,2a,2a,2a",options="header"] + +|=== +| Required/optional +| Service +| Actions +| Effect + +.15+| Required +|Amazon EC2 | All |Allow +|Amazon EC2 Auto Scaling | All |Allow +|Amazon S3| All |Allow +|Identity And Access Management | All |Allow +|Elastic Load Balancing | All |Allow +|Elastic Load Balancing V2| All |Allow +|Amazon CloudWatch | All |Allow +|Amazon CloudWatch Events | All |Allow +|Amazon CloudWatch Logs | All |Allow +|AWS Support | All |Allow +|AWS Key Management Service | All |Allow +|AWS Security Token Service | All |Allow +|AWS Resource Tagging | All |Allow +|AWS Route53 DNS | All |Allow +|AWS Service Quotas | ListServices + +GetRequestedServiceQuotaChange + +GetServiceQuota + +RequestServiceQuotaIncrease + +ListServiceQuotas +| Allow + + +.3+|Optional + +| AWS Billing +| ViewAccount + +Viewbilling + +ViewUsage +| Allow + +|AWS Cost and Usage Report +|All +|Allow + +|AWS Cost Explorer Services +|All +|Allow + + +|=== + +// TODO: Need some sort of intro into whatever this is +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "autoscaling:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "s3:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "iam:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "cloudwatch:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "events:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "logs:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "support:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "kms:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "sts:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "tag:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "route53:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "servicequotas:ListServices", + "servicequotas:GetRequestedServiceQuotaChange", + "servicequotas:GetServiceQuota", + "servicequotas:RequestServiceQuotaIncrease", + "servicequotas:ListServiceQuotas" + ], + "Resource": [ + "*" + ] + } + ] +} +---- diff --git a/modules/ccs-aws-understand.adoc b/modules/ccs-aws-understand.adoc new file mode 100644 index 0000000000..273fc439dd --- /dev/null +++ b/modules/ccs-aws-understand.adoc @@ -0,0 +1,13 @@ +// Module included in the following assemblies: +// +// * assemblies/aws-ccs.adoc + +[id="ccs-aws-understand_{context}"] += Understanding Customer Cloud Subscriptions on AWS + + +To deploy {product-title} into your existing Amazon Web Services (AWS) account using the Customer Cloud Subscription (CCS) model, Red Hat requires several prerequisites be met. + +Red Hat recommends the usage of an AWS Organization to manage multiple AWS accounts. The AWS Organization, managed by the customer, hosts multiple AWS accounts. There is a root account in the organization that all accounts will refer to in the account hierarchy. + +It is recommended for the {product-title} cluster using a CCS model to be hosted in an AWS account within an AWS Organizational Unit. A Service Control Policy (SCP) is created and applied to the AWS Organizational Unit that manages what services the AWS sub-accounts are permitted to access. The SCP applies only to available permissions within a single AWS account for all AWS sub-accounts within the Organizational Unit. It is also possible to apply a SCP to a single AWS account. All other accounts in the customer’s AWS Organization are managed in whatever manner the customer requires. Red Hat Site Reliability Engineers (SRE) will not have any control over SCPs within the AWS Organization. diff --git a/modules/ccs-gcp-customer-procedure.adoc b/modules/ccs-gcp-customer-procedure.adoc new file mode 100644 index 0000000000..62bc98b36b --- /dev/null +++ b/modules/ccs-gcp-customer-procedure.adoc @@ -0,0 +1,107 @@ +[id="ccs-gcp-customer-procedure_{context}"] + += Required customer procedure +// TODO: Same as other module - Better procedure heading that tells you what this is doing + + +The Customer Cloud Subscription (CCS) model allows Red Hat to deploy and manage {product-title} into a customer’s Google Cloud Platform (GCP) project. Red Hat requires several prerequisites in order to provide these services. + +[WARNING] +==== +To use {product-title} in your GCP project, the GCP organizational policy constraint, `constraints/iam.allowedPolicyMemberDomains`, cannot be in place. +==== + +.Procedure + +. link:https://cloud.google.com/resource-manager/docs/creating-managing-projects[Create a Google Cloud project] to host the {product-title} cluster. ++ +[NOTE] +==== +The project name must be 10 characters or less. +==== + +. link:https://cloud.google.com/service-usage/docs/enable-disable#enabling[Enable] the following required APIs in the project that hosts your {product-title} cluster: ++ +.Required API services +[cols="2a,3a",options="header"] +|=== +|API service |Console service name + + +|link:https://console.cloud.google.com/apis/library/deploymentmanager.googleapis.com?pli=1&project=openshift-gce-devel&folder=&organizationId=[Cloud Deployment Manager V2 API] +|`deploymentmanager.googleapis.com` + + +|link:https://console.cloud.google.com/apis/library/compute.googleapis.com?project=openshift-gce-devel&folder=&organizationId=[Compute Engine API] +|`compute.googleapis.com` + +|link:https://console.cloud.google.com/apis/library/cloudapis.googleapis.com?project=openshift-gce-devel&folder=&organizationId=[Google Cloud APIs] +|`cloudapis.googleapis.com` + +|link:https://console.cloud.google.com/apis/library/cloudresourcemanager.googleapis.com?project=openshift-gce-devel&folder=&organizationId=[Cloud Resource Manager API] +|`cloudresourcemanager.googleapis.com` + +|link:https://console.cloud.google.com/apis/library/dns.googleapis.com?project=openshift-gce-devel&folder=&organizationId=[Google DNS API] +|`dns.googleapis.com` + +|link:https://console.cloud.google.com/apis/library/networksecurity.googleapis.com?project=openshift-gce-devel&folder=&organizationId=[Network Security API] +|`networksecurity.googleapis.com` + +|link:https://console.cloud.google.com/apis/library/iamcredentials.googleapis.com[IAM Service Account Credentials API] +|`iamcredentials.googleapis.com` + +|link:https://console.cloud.google.com/apis/library/iam.googleapis.com?project=openshift-gce-devel&folder=&organizationId=[Identity and Access Management (IAM) API] +|`iam.googleapis.com` + +|link:https://console.cloud.google.com/apis/library/servicemanagement.googleapis.com?project=openshift-gce-devel&folder=&organizationId=[Service Management API] +|`servicemanagement.googleapis.com` + +|link:https://console.cloud.google.com/apis/library/serviceusage.googleapis.com?project=openshift-gce-devel&folder=&organizationId=[Service Usage API] +|`serviceusage.googleapis.com` + +|link:https://console.cloud.google.com/apis/library/storage-api.googleapis.com?project=openshift-gce-devel&folder=&organizationId=[Google Cloud Storage JSON API] +|`storage-api.googleapis.com` + +|link:https://console.cloud.google.com/apis/library/storage-component.googleapis.com?project=openshift-gce-devel&folder=&organizationId=[Cloud Storage] +|`storage-component.googleapis.com` + +|=== + +. To ensure that Red Hat can perform necessary actions, you must create an `osd-ccs-admin` IAM link:https://cloud.google.com/iam/docs/creating-managing-service-accounts#creating_a_service_account[service account] user within the GCP project. ++ +The following roles must be link:https://cloud.google.com/iam/docs/granting-roles-to-service-accounts#granting_access_to_a_service_account_for_a_resource[granted to the service account]: ++ +.Required roles +[cols="2a,3a",options="header"] + +|=== + +|Role|Console role name + +|Compute Admin +|`roles/compute.admin` + +|DNS Admin +|`roles/dns.admin` + +|Organizational Policy Viewer +|`roles/orgpolicy.policyViewer` + +|Owner +|`roles/owner` + +|Project IAM Admin +|`roles/resourcemanager.projectIamAdmin` + +|Service Management Administrator +|`roles/servicemanagement.admin` + +|Service Usage Admin +|`roles/serviceusage.serviceUsageAdmin` + +|Storage Admin +|`roles/storage.admin` + +|=== + +. link:https://cloud.google.com/iam/docs/creating-managing-service-account-keys#creating_service_account_keys[Create the service account key] for the `osd-ccs-admin` IAM service account. Export the key to a file named `osServiceAccount.json`; this JSON file will be uploaded in {OCM} when you create your cluster. diff --git a/modules/ccs-gcp-customer-requirements.adoc b/modules/ccs-gcp-customer-requirements.adoc new file mode 100644 index 0000000000..16769506ff --- /dev/null +++ b/modules/ccs-gcp-customer-requirements.adoc @@ -0,0 +1,67 @@ +// Module included in the following assemblies: +// +// * assemblies/assembly.adoc + +[id="ccs-gcp-customer-requirements_{context}"] += Customer requirements + + +{product-title} clusters using a Customer Cloud Subscription (CCS) model on Google Cloud Platform (GCP) must meet several prerequisites before they can be deployed. + +[id="ccs-gcp-requirements-account_{context}"] +== Account + +* The customer ensures that link:https://cloud.google.com/storage/quotas[Google Cloud limits] are sufficient to support {product-title} provisioned within the customer-provided GCP account. + +* The customer-provided GCP account should be in the customer's Google Cloud Organization with the applicable Service Account applied. + +* The customer-provided GCP account must not be transferable to Red Hat. + +* The customer may not impose GCP usage restrictions on Red Hat activities. Imposing restrictions severely hinders Red Hat's ability to respond to incidents. + +* Red Hat deploys monitoring into GCP to alert Red Hat when a highly privileged account, such as a root account, logs into the customer-provided GCP account. + +* The customer can deploy native GCP services within the same customer-provided GCP account. ++ +[NOTE] +==== +Customers are encouraged, but not mandated, to deploy resources in a Virtual Private Cloud (VPC) separate from the VPC hosting {product-title} and other Red Hat supported services. +==== + +[id="ccs-gcp-requirements-access_{context}"] +== Access requirements + +* To appropriately manage the {product-title} service, Red Hat must have the `AdministratorAccess` policy applied to the administrator role at all times. ++ +[NOTE] +==== +This policy only provides Red Hat with permissions and capabilities to change resources in the customer-provided GCP account. +==== + +* Red Hat must have GCP console access to the customer-provided GCP account. This access is protected and managed by Red Hat. + +* The customer must not utilize the GCP account to elevate their permissions within the {product-title} cluster. + +* Actions available in the {cloud-redhat-com} must not be directly performed in the customer-provided GCP account. + +[id="ccs-gcp-requirements-support_{context}"] +== Support requirements + +* Red Hat recommends that the customer have at least link:https://cloud.google.com/support[Production Support] from GCP. + +* Red Hat has authority from the customer to request GCP support on their behalf. + +* Red Hat has authority from the customer to request GCP resource limit increases on the customer-provided account. + +* Red Hat manages the restrictions, limitations, expectations, and defaults for all {product-title} clusters in the same manner, unless otherwise specified in this requirements section. + +[id="ccs-gcp-requirements-security_{context}"] +== Security requirements + +* The customer-provided IAM credentials must be unique to the customer-provided GCP account and must not be stored anywhere in the customer-provided GCP account. + +* Volume snapshots will remain within the customer-provided GCP account and customer-specified region. + +* Red Hat must have ingress access to the API server through white-listed Red Hat machines. + +* Red Hat must have egress allowed to forward system and audit logs to a Red Hat managed central logging stack. diff --git a/modules/ccs-gcp-iam.adoc b/modules/ccs-gcp-iam.adoc new file mode 100644 index 0000000000..a81cfbe6f7 --- /dev/null +++ b/modules/ccs-gcp-iam.adoc @@ -0,0 +1,105 @@ +[id="ccs-gcp-iam_{context}"] + += Red Hat managed Google Cloud resources + + +Red Hat is responsible for creating and managing the following IAM Google Cloud Platform (GCP) resources. + +[id="ccs-gcp-iam-service-account-roles_{context}"] +== IAM service account and roles + +The `osd-managed-admin` IAM service account is created immediately after taking control of the customer-provided GCP account. This is the user that will perform the {product-title} cluster installation. + +The following roles are attached to the service account: + +.IAM roles for osd-managed-admin +[cols="2a,3a,2a",options="header"] + +|=== + +|Role |Console role name |Description + +|Compute Admin +|`roles/compute.admin` +|Provides full control of all Compute Engine resources. + +|DNS Administrator +|`roles/dns.admin` +|Provides read-write access to all Cloud DNS resources. + +|Security Admin +|`roles/iam.securityAdmin` +|Security admin role, with permissions to get and set any IAM policy. + +|Storage Admin +|`roles/storage.admin` +|Grants full control of objects and buckets. + +When applied to an individual *bucket*, control applies only to the specified bucket and objects within the bucket. + +|Service Account Admin +|`roles/iam.serviceAccountAdmin` +|Create and manage service accounts. + +|Service Account Key Admin +|`roles/iam.serviceAccountKeyAdmin` +|Create and manage (and rotate) service account keys. + +|Service Account User +|`roles/iam.serviceAccountUser` +|Run operations as the service account. + +|=== + +[id="ccs-gcp-iam-group-roles_{context}"] +== IAM group and roles + +The `sd-sre-platform-gcp-access` Google group is granted access to the GCP project to allow Red Hat Site Reliability Engineering (SRE) access to the console for emergency troubleshooting purposes. + +The following roles are attached to the group: + +.IAM roles for sd-sre-platform-gcp-access +[cols="2a,3a,2a",options="header"] + +|=== + +|Role |Console role name |Description + +|Compute Admin +|`roles/compute.admin` +|Provides full control of all Compute Engine resources. + +|Editor +|`roles/editor` +|Provides all viewer permissions, plus permissions for actions that modify state. + +|Organization Policy Viewer +|`roles/orgpolicy.policyViewer` +|Provides access to view Organization Policies on resources. + +|Project IAM Admin +|`roles/resourcemanager.projectIamAdmin` +|Provides permissions to administer IAM policies on projects. + +|Quota Administrator +|`roles/servicemanagement.quotaAdmin` +|Provides access to administer service quotas. + +|Role Administrator +|`roles/iam.roleAdmin` +|Provides access to all custom roles in the project. + +|Service Account Admin +|`roles/iam.serviceAccountAdmin` +|Create and manage service accounts. + + +|Service Usage Admin +|`roles/serviceusage.serviceUsageAdmin` +|Ability to enable, disable, and inspect service states, inspect operations, and consume quota and billing for a consumer project. + +|Tech Support Editor +|`roles/cloudsupport.techSupportEditor` +|Provides full read-write access to technical support cases. + +|=== diff --git a/modules/ccs-gcp-understand.adoc b/modules/ccs-gcp-understand.adoc new file mode 100644 index 0000000000..72017a5165 --- /dev/null +++ b/modules/ccs-gcp-understand.adoc @@ -0,0 +1,13 @@ +// Module included in the following assemblies: +// +// * assemblies/gcp-ccs.adoc + +[id="ccs-gcp-understand_{context}"] += Understanding Customer Cloud Subscriptions on GCP + + +Red Hat {product-title} provides a Customer Cloud Subscription (CCS) model that allows Red Hat to deploy and manage {product-title} into a customer's existing {GCP} account. Red Hat requires several prerequisites be met in order to provide this service. + +Red Hat recommends the usage of GCP project, managed by the customer, to organize all of your GCP resources. A project consists of a set of users and APIs, as well as billing, authentication, and monitoring settings for those APIs. + +It is recommended for the {product-title} cluster using a CCS model to be hosted in a GCP project within a GCP organization. The Organization resource is the root node of the GCP resource hierarchy and all resources that belong to an organization are grouped under the organization node. An IAM service account with certain roles granted is created and applied to the GCP project. When you make calls to the API, you typically provide service account keys for authentication. Each service account is owned by a specific project, but service accounts can be provided roles to access resources for other projects. diff --git a/modules/codeready-workspaces.adoc b/modules/codeready-workspaces.adoc new file mode 100644 index 0000000000..b963480c4f --- /dev/null +++ b/modules/codeready-workspaces.adoc @@ -0,0 +1,17 @@ +// Module included in the following assemblies: +// +// * adding_service_cluster/available-services.adoc +// * adding_service_cluster/rosa-available-services.adoc + +[id="codeready-workspaces_{context}"] += Red Hat CodeReady Workspaces + +The Red Hat CodeReady Workspaces service is available as an add-on to your {product-title} cluster. CodeReady Workspaces is a developer tool that makes cloud-native development practical for teams, using Kubernetes and containers to provide any member of the development or IT team with a consistent, preconfigured development environment. Developers can create code, build, and test in containers running on Red Hat OpenShift. + +[NOTE] +==== +When using this service with {product-title}, CodeReady Workspace can be deployed to any namespace except `openshift-workspaces`. +==== + +.Additional resources +* link:https://access.redhat.com/documentation/en-us/red_hat_codeready_workspaces/2.10/html/installation_guide/installing-codeready-workspaces_crw#creating-a-project-in-openshift-web-console_crw[Red Hat CodeReady Workspaces Operator] documentation diff --git a/modules/config-aws-access.adoc b/modules/config-aws-access.adoc new file mode 100644 index 0000000000..c9fd1f7e69 --- /dev/null +++ b/modules/config-aws-access.adoc @@ -0,0 +1,74 @@ +// Module included in the following assemblies: +// +// * assemblies/aws-private-connections.adoc + +[id="config-aws-access_{context}"] + += Configuring AWS infrastructure access + +// TODO: I see {AWS} and {GCP} only used a handful of time, but their written out form much more. Should all hardcoded instances be updated to use the attributes? +{AWS} infrastructure access allows link:https://access.redhat.com/node/3610411[Customer Portal Organization Administrators] and cluster owners to enable AWS Identity and Access Management (IAM) users to have federated access to the AWS Management Console for their {product-title} cluster. Administrators can select between `Network Management` or `Read-only` access options. + +.Prerequisites + +* An AWS account with IAM permissions. + +.Procedure + +. Log in to your AWS account. If necessary, you can create a new AWS account by following the link:https://aws.amazon.com/premiumsupport/knowledge-center/create-and-activate-aws-account/[AWS documentation]. + +. Create an IAM user with `STS:AllowAssumeRole` permissions within the AWS account. + +.. Open the link:https://console.aws.amazon.com/iam/home#/home[IAM dashboard] of the AWS Management Console. +.. In the *Policies* section, click *Create Policy*. +.. Select the *JSON* tab and replace the existing text with the following: ++ +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "sts:AssumeRole", + "Resource": "*" + } + ] +} +---- + +.. Click *Next:Tags*. +.. Optional: Add tags. Click *Next:Review* +.. Provide an appropriate name and description, then click *Create Policy*. +.. In the *Users* section, click *Add user*. +.. Provide an appropriate user name. +.. Select *AWS Management Console access* as the AWS access type. +.. Adjust the password requirements as necessary for your organization, then click *Next:Permissions*. +.. Click the *Attach existing policies directly* option. Search for and check the policy created in previous steps. ++ +[NOTE] +==== +It is not recommended to set a permissions boundary. +==== + +.. Click *Next: Tags*, then click *Next: Review*. Confirm the configuration is correct. +.. Click *Create user*, a success page appears. +.. Gather the IAM user’s Amazon Resource Name (ARN). The ARN will have the following format: `arn:aws:iam::000111222333:user/username`. Click *Close*. + +. Open the {cloud-redhat-com} in your browser and select the cluster you want to allow AWS infrastructure access. + +. Select the *Access control* tab, and scroll to the *AWS Infrastructure Access* section. + +. Paste the *AWS IAM ARN* and select *Network Management* or *Read-only* permissions, then click *Grant role*. + +. Copy the *AWS OSD console URL* to your clipboard. + +. Sign in to your AWS account with your Account ID or alias, IAM user name, and password. + +. In a new browser tab, paste the AWS OSD Console URL that will be used to route to the AWS Switch Role page. + +. Your account number and role will be filled in already. Choose a display name if necessary, then click *Switch Role*. + +.Verification + +* You now see *VPC* under *Recently visited services*. diff --git a/modules/config-github-idp.adoc b/modules/config-github-idp.adoc new file mode 100644 index 0000000000..1db58721d5 --- /dev/null +++ b/modules/config-github-idp.adoc @@ -0,0 +1,68 @@ +// Module included in the following assemblies: +// +// * assemblies/config-identity-providers.adoc +// * getting_started/quickstart-osd.adoc + +[id="config-github-idp_{context}"] += Configuring a GitHub identity provider + + +Configure a GitHub identity provider to validate user names and passwords against GitHub or GitHub Enterprise’s OAuth authentication server and access your {product-title} cluster. OAuth facilitates a token exchange flow between {product-title} and GitHub or GitHub Enterprise. + +[WARNING] +==== +Configuring GitHub authentication allows users to log in to {product-title} with their GitHub credentials. To prevent anyone with any GitHub user ID from logging in to your {product-title} cluster, you must restrict access to only those in specific GitHub organizations or teams. +==== + +.Prerequisites + +* The OAuth application must be created directly within the GitHub link:https://docs.github.com/en/github/setting-up-and-managing-organizations-and-teams/managing-organization-settings[organization settings] by the GitHub organization administrator. +* link:https://docs.github.com/en/github/setting-up-and-managing-organizations-and-teams[GitHub organizations or teams] are set up in your GitHub account. + +.Procedure + +. From {cloud-redhat-com}, navigate to the *Clusters* page and select the cluster that you need to configure identity providers for. + +. Click the *Access control* tab. + +. Click *Add identity provider*. ++ +[NOTE] +==== +You can also click the *Add Oauth configuration* link in the warning message displayed after cluster creation to configure your identity providers. +==== + +. Select *GitHub* from the drop-down menu. + +. Enter a unique name for the identity provider. This name cannot be changed later. +** An *OAuth callback URL* is automatically generated in the provided field. You will use this to register the GitHub application. ++ +---- +https://oauth-openshift.apps../oauth2callback/ +---- ++ +For example: ++ +---- +https://oauth-openshift.apps.example-openshift-cluster.com/oauth2callback/github/ +---- + +. link:https://docs.github.com/en/developers/apps/creating-an-oauth-app[Register an application on GitHub]. + +. Return to {product-title} and select a mapping method from the drop-down menu. *Claim* is recommended in most cases. + +. Enter the *Client ID* and *Client secret* provided by GitHub. + +. Enter a *hostname*. A hostname must be entered when using a hosted instance of GitHub Enterprise. + +. Optional: You can use a certificate authority (CA) file to validate server certificates for the configured GitHub Enterprise URL. Click *Browse* to locate and attach a *CA file* to the identity provider. + +. Select *Use organizations* or *Use teams* to restrict access to a particular GitHub organization or a GitHub team. + +. Enter the name of the organization or team you would like to restrict access to. Click *Add more* to specify multiple organizations or teams that users can be a member of. + +. Click *Confirm*. + +.Verification + +* The configured identity provider is now visible on the *Access control* tab of the *Clusters* page. diff --git a/modules/config-gitlab-idp.adoc b/modules/config-gitlab-idp.adoc new file mode 100644 index 0000000000..b72b72b7ff --- /dev/null +++ b/modules/config-gitlab-idp.adoc @@ -0,0 +1,57 @@ +// Module included in the following assemblies: +// +// * assemblies/config-identity-providers.adoc + +[id="config-gitlab-idp_{context}"] += Configuring a GitLab identity provider + + +Configure a GitLab identity provider to use link:https://gitlab.com/[GitLab.com] or any other GitLab instance as an identity provider. + +.Prerequisites + +- If you use GitLab version 7.7.0 to 11.0, you connect using the link:http://doc.gitlab.com/ce/integration/oauth_provider.html[OAuth integration]. If you use GitLab version 11.1 or later, you can use link:https://docs.gitlab.com/ce/integration/openid_connect_provider.html[OpenID Connect] (OIDC) to connect instead of OAuth. + +.Procedure + +. From {cloud-redhat-com}, navigate to the *Clusters* page and select the cluster that you need to configure identity providers for. + +. Click the *Access control* tab. + +. Click *Add identity provider*. ++ +[NOTE] +==== +You can also click the *Add Oauth configuration* link in the warning message displayed after cluster creation to configure your identity providers. +==== + +. Select *GitLab* from the drop-down menu. + +. Enter a unique name for the identity provider. This name cannot be changed later. +** An *OAuth callback URL* is automatically generated in the provided field. You will provide this URL to GitLab. ++ +---- +https://oauth-openshift.apps../oauth2callback/ +---- ++ +For example: ++ +---- +https://oauth-openshift.apps.example-openshift-cluster.com/oauth2callback/gitlab/ +---- + +. link:https://docs.gitlab.com/ee/integration/oauth_provider.html[Add a new application in GitLab]. + +. Return to {product-title} and select a mapping method from the drop-down menu. *Claim* is recommended in most cases. + +. Enter the *Client ID* and *Client secret* provided by GitLab. + +. Enter the *URL* of your GitLab provider. + +. Optional: You can use a certificate authority (CA) file to validate server certificates for the configured GitLab URL. Click *Browse* to locate and attach a *CA file* to the identity provider. + +. Click *Confirm*. + +.Verification + +* The configured identity provider is now visible on the *Access control* tab of the *Clusters* page. diff --git a/modules/config-google-idp.adoc b/modules/config-google-idp.adoc new file mode 100644 index 0000000000..a2224efff5 --- /dev/null +++ b/modules/config-google-idp.adoc @@ -0,0 +1,58 @@ +// Module included in the following assemblies: +// +// * assemblies/config-identity-providers.adoc + +[id="config-google-idp_{context}"] += Configuring a Google identity provider + + +Configure a Google identity provider to allow users to authenticate with their Google credentials. + +[WARNING] +==== +Using Google as an identity provider allows any Google user to authenticate to your server. +You can limit authentication to members of a specific hosted domain with the +`hostedDomain` configuration attribute. +==== + +.Procedure + +. From {cloud-redhat-com}, navigate to the *Clusters* page and select the cluster that you need to configure identity providers for. + +. Click the *Access control* tab. + +. Click *Add identity provider*. ++ +[NOTE] +==== +You can also click the *Add Oauth configuration* link in the warning message displayed after cluster creation to configure your identity providers. +==== + +. Select *Google* from the drop-down menu. + +. Enter a unique name for the identity provider. This name cannot be changed later. +** An *OAuth callback URL* is automatically generated in the provided field. You will provide this URL to Google. ++ +---- +https://oauth-openshift.apps../oauth2callback/ +---- ++ +For example: ++ +---- +https://oauth-openshift.apps.example-openshift-cluster.com/oauth2callback/github/ +---- + +. Configure a Google identity provider using link:https://developers.google.com/identity/protocols/OpenIDConnect[Google's OpenID Connect integration]. + +. Return to {product-title} and select a mapping method from the drop-down menu. *Claim* is recommended in most cases. + +. Enter the *Client ID* of a registered Google project and the *Client secret* issued by Google. + +. Enter a hosted domain to restrict users to a Google Apps domain. + +. Click *Confirm*. + +.Verification + +* The configured identity provider is now visible on the *Access control* tab of the *Clusters* page. diff --git a/modules/config-htpasswd-idp.adoc b/modules/config-htpasswd-idp.adoc new file mode 100644 index 0000000000..17ea7636cc --- /dev/null +++ b/modules/config-htpasswd-idp.adoc @@ -0,0 +1,53 @@ +// Module included in the following assemblies: +// +// * identity_providers/config-identity-providers.adoc + +[id="config-htpasswd-idp_{context}"] += Configuring an HTPasswd identity provider + +Configure an HTPasswd identity provider to create a single, static user with cluster administration privileges. You can log in to your cluster as the user to troubleshoot issues. + +.Procedure + +. From {cloud-redhat-com}, navigate to the *Clusters* page and select the cluster that you need to configure identity providers for. + +. Click the *Access control* tab. + +. Click *Add identity provider*. ++ +[NOTE] +==== +You can also click the *Add Oauth configuration* link in the warning message displayed after cluster creation to configure your identity providers. +==== + +. Select *HTPasswd* from the *Identity Provider* drop-down menu. + +. Add a unique name in the *Name* field for the identity provider. + +. Use the suggested username and password for the static user, or create your own. ++ +[NOTE] +==== +The credentials defined in this step are not visible after you select *Confirm* in the following step. If you lose the credentials, you must recreate the identity provider and define the credentials again. +==== + +. Select *Confirm* to create the HTPasswd identity provider and the user. + +. Grant the static user permission to manage the cluster: +.. Select *Add user* in the *Cluster administrative users* section of the *Access control* page. +.. Enter the username that you defined in the preceding step into the *User ID* field. +.. Select *Add user* to grant standard administration privileges to the user. ++ +[NOTE] +==== +The user is added to the `dedicated-admins` group. +==== + +.Verification + +* The configured identity provider is now visible on the *Access control* tab of the *Clusters* page. ++ +[NOTE] +==== +After creating the identity provider, synchronization usually completes within two minutes. You can login to the cluster as the user after the HTPasswd identity provider becomes available. +==== diff --git a/modules/config-idp.adoc b/modules/config-idp.adoc new file mode 100644 index 0000000000..e31702e599 --- /dev/null +++ b/modules/config-idp.adoc @@ -0,0 +1,67 @@ +// Module included in the following assemblies: +// +// * assemblies/osd-quickstart.adoc + +[id="config-idp_{context}"] += Configuring an identity provider + + +After your {product-title} cluster is created, you must configure identity providers to determine how users log in to access the cluster. This example configures a GitHub identity provider. + +[WARNING] +==== +Configuring GitHub authentication allows users to log in to {product-title} with their GitHub credentials. To prevent anyone with any GitHub user ID from logging in to your {product-title} cluster, you must restrict access to only those in specific GitHub organizations or teams. +==== + +.Prerequisites + +* The OAuth application must be created directly within the GitHub link:https://docs.github.com/en/github/setting-up-and-managing-organizations-and-teams/managing-organization-settings[organization settings] by the GitHub organization administrator. +* link:https://docs.github.com/en/github/setting-up-and-managing-organizations-and-teams[GitHub organizations or teams] are set up in your GitHub account. + +.Procedure + +. Navigate to the *Clusters* page and select the cluster that you need to configure identity providers for. + +. Click the *Access control* tab. + +. Click *Add identity provider*. ++ +[NOTE] +==== +You can also click the *Add Oauth configuration* link in the warning message displayed after cluster creation to configure your identity providers. +==== + +. Select *GitHub* from the drop-down menu. + +. Enter a unique name for the identity provider. This name cannot be changed later. +** An *OAuth callback URL* is automatically generated in the provided field. You will use this to register the GitHub application. ++ +---- +https://oauth-openshift.apps../oauth2callback/ +---- ++ +For example: ++ +---- +https://oauth-openshift.apps.example-openshift-cluster.com/oauth2callback/github/ +---- + +. link:https://docs.github.com/en/developers/apps/creating-an-oauth-app[Register an application on GitHub]. + +. Return to {product-title} and select a mapping method from the drop-down menu. *Claim* is recommended in most cases. + +. Enter the *Client ID* and *Client secret* provided by GitHub. + +. Enter a *hostname*. A hostname must be entered when using a hosted instance of GitHub Enterprise. + +. Optional: You can use a certificate authority (CA) file to validate server certificates for the configured GitHub Enterprise URL. Click *Browse* to locate and attach a *CA file* to the identity provider. + +. Select *Use organizations* or *Use teams* to restrict access to a particular GitHub organization or a GitHub team. + +. Enter the name of the organization or team you would like to restrict access to. Click *Add more* to specify multiple organizations or teams that users can be a member of. + +. Click *Confirm*. + +.Verification + +* The configured identity provider is now visible on the *Access control* tab of the *Clusters* page. diff --git a/modules/config-ldap-idp.adoc b/modules/config-ldap-idp.adoc new file mode 100644 index 0000000000..6f514dd559 --- /dev/null +++ b/modules/config-ldap-idp.adoc @@ -0,0 +1,94 @@ +// Module included in the following assemblies: +// +// * assemblies/config-identity-providers.adoc + +[id="config-ldap-idp_{context}"] += Configuring a LDAP identity provider + + +Configure the LDAP identity provider to validate user names and passwords against an LDAPv3 server, using simple bind authentication. + +.Prerequisites + +* When configuring a LDAP identity provider, you will need to enter a configured *LDAP URL*. The configured URL is an RFC 2255 URL, which specifies the LDAP host and +search parameters to use. The syntax of the URL is: ++ +---- +ldap://host:port/basedn?attribute?scope?filter +---- ++ +[cols="2a,8a",options="header"] +|=== +|URL component | Description +.^|`ldap` | For regular LDAP, use the string `ldap`. For secure LDAP +(LDAPS), use `ldaps` instead. +.^|`host:port` | The name and port of the LDAP server. Defaults to +`localhost:389` for ldap and `localhost:636` for LDAPS. +.^|`basedn` | The DN of the branch of the directory where all searches should +start from. At the very least, this must be the top of your directory tree, but +it could also specify a subtree in the directory. +.^|`attribute` | The attribute to search for. Although RFC 2255 allows a +comma-separated list of attributes, only the first attribute will be used, no +matter how many are provided. If no attributes are provided, the default is to +use `uid`. It is recommended to choose an attribute that will be unique across +all entries in the subtree you will be using. +.^|`scope` | The scope of the search. Can be either `one` or `sub`. +If the scope is not provided, the default is to use a scope of `sub`. +.^|`filter` | A valid LDAP search filter. If not provided, defaults to +`(objectClass=*)` +|=== ++ +When doing searches, the attribute, filter, and provided user name are combined +to create a search filter that looks like: ++ +---- +(&()(=)) +---- ++ +[IMPORTANT] +If the LDAP directory requires authentication to search, specify a `bindDN` and +`bindPassword` to use to perform the entry search. + + +.Procedure + +. From {cloud-redhat-com}, navigate to the *Clusters* page and select the cluster that you need to configure identity providers for. + +. Click the *Access control* tab. + +. Click *Add identity provider*. ++ +[NOTE] +==== +You can also click the *Add Oauth configuration* link in the warning message displayed after cluster creation to configure your identity providers. +==== + +. Select *LDAP* from the drop-down menu. + +. Enter a unique name for the identity provider. This name cannot be changed later. + +. Select a mapping method from the drop-down menu. *Claim* is recommended in most cases. + +. Enter a *LDAP URL* to specify the LDAP search parameters to use. + +. Optional: Enter a *Bind DN* and *Bind password*. + +. Enter the attributes that will map LDAP attributes to identities. +** Enter an *ID* attribute whose value should be used as the user ID. Click *Add more* to add multiple ID attributes. +** Optional: Enter a *Preferred username* attribute whose value should be used as the display name. Click *Add more* to add multiple preferred username attributes. +** Optional: Enter an *Email* attribute whose value should be used as the email address. Click *Add more* to add multiple email attributes. + +. Optional: Click *Show advanced Options* to add a certificate authority (CA) file to your LDAP identity provider to validate server certificates for the configured URL. Click *Browse* to locate and attach a *CA file* to the identity provider. + +. Optional: Under the advanced options, you can choose to make the LDAP provider *Insecure*. If you select this option, a CA file cannot be used. ++ +[IMPORTANT] +==== +If you are using an insecure LDAP connection (ldap:// or port 389), then you must check the *Insecure* option in the configuration wizard. +==== + +. Click *Confirm*. + +.Verification + +* The configured identity provider is now visible on the *Access control* tab of the *Clusters* page. diff --git a/modules/config-openid-idp.adoc b/modules/config-openid-idp.adoc new file mode 100644 index 0000000000..ffb5267080 --- /dev/null +++ b/modules/config-openid-idp.adoc @@ -0,0 +1,105 @@ +// Module included in the following assemblies: +// +// * assemblies/config-identity-providers.adoc + +[id="config-openid-idp_{context}"] += Configuring an OpenID identity provider + + +Configure an OpenID identity provider to integrate with an OpenID Connect identity provider using an link:http://openid.net/specs/openid-connect-core-1_0.html#CodeFlowAuth[Authorization Code Flow]. + +[IMPORTANT] +==== +The Authentication Operator in {product-title} requires that the configured +OpenID Connect identity provider implements the +link:https://openid.net/specs/openid-connect-discovery-1_0.html[OpenID Connect Discovery] +specification. +==== + +Claims are read from the JWT `id_token` returned from the OpenID identity +provider and, if specified, from the JSON returned by the Issuer URL. + +At least one claim must be configured to use as the user's identity. + +You can also indicate which claims to use as the user's preferred user name, +display name, and email address. If multiple claims are specified, the first one +with a non-empty value is used. The standard claims are: + +[cols="1,2",options="header"] +|=== + +|Claim +|Description + +|`preferred_username` +|The preferred user name when provisioning a user. A +shorthand name that the user wants to be referred to as, such as `janedoe`. Typically +a value that corresponding to the user's login or username in the authentication +system, such as username or email. + +|`email` +|Email address. + +|`name` +|Display name. + +|=== + +See the +link:http://openid.net/specs/openid-connect-core-1_0.html#StandardClaims[OpenID claims documentation] +for more information. + +.Prerequisites +* Before you configure OpenID Connect, check the installation prerequisites for any Red Hat product or service you want to use with your {product-title} cluster. + +.Procedure + +. From {cloud-redhat-com}, navigate to the *Clusters* page and select the cluster that you need to configure identity providers for. + +. Click the *Access control* tab. + +. Click *Add identity provider*. ++ +[NOTE] +==== +You can also click the *Add Oauth configuration* link in the warning message displayed after cluster creation to configure your identity providers. +==== + +. Select *OpenID* from the drop-down menu. + +. Enter a unique name for the identity provider. This name cannot be changed later. +** An *OAuth callback URL* is automatically generated in the provided field. ++ +---- +https://oauth-openshift.apps../oauth2callback/ +---- ++ +For example: ++ +---- +https://oauth-openshift.apps.example-openshift-cluster.com/oauth2callback/openid/ +---- + +. link:https://openid.net/specs/openid-connect-core-1_0.html#AuthRequest[Create an authorization request using an Authorization Code Flow]. + +. Return to {product-title} and select a mapping method from the drop-down menu. *Claim* is recommended in most cases. + +. Enter a *Client ID* and *Client secret* provided from OpenID. + +. Enter an *Issuer URL*. This is the URL that the OpenID provider asserts as the Issuer Identifier. It must use the https scheme with no URL query parameters or fragments. + +. Enter an *Email* attribute whose value should be used as the email address. Click *Add more* to add multiple email attributes. + +. Enter a *Name* attribute whose value should be used as the preferred username. Click *Add more* to add multiple preferred usernames. + +. Enter a *Preferred username* attribute whose value should be used as the display name. Click *Add more* to add multiple display names. + +. Optional: Click *Show advanced Options* to add a certificate authority (CA) file to your OpenID identity provider. + +. Optional: Under the advanced options, you can add *Additional scopes*. By default, the `OpenID` scope is requested. + +. Click *Confirm*. + +.Verification + +* The configured identity provider is now visible on the *Access control* tab of the *Clusters* page. diff --git a/modules/container-benefits.adoc b/modules/container-benefits.adoc new file mode 100644 index 0000000000..2b328aca5e --- /dev/null +++ b/modules/container-benefits.adoc @@ -0,0 +1,27 @@ + +// Module included in the following assemblies: +// +// * assemblies/osd-architecture.adoc + +[id="container-benefits_{context}"] += The benefits of containerized applications + + +Applications were once expected to be installed on operating systems that included all of the dependencies for the application. However, containers provide a standard way to package your application code, configurations, and dependencies into a single unit that can run as a resource-isolated process on a compute server. To run your app in Kubernetes on {product-title}, you must first containerize your app by creating a container image that you store in a container registry. + +[id="operating-system-benefits_{context}"] +== Operating system benefits + +Containers use small, dedicated Linux operating systems without a kernel. The file system, networking, cgroups, process tables, and namespaces are separate from the host Linux system, but the containers can integrate with the +hosts seamlessly when necessary. Being based on Linux allows containers to use all the advantages that come with the open source development model of rapid innovation. + +Because each container uses a dedicated operating system, you can deploy applications that require conflicting software dependencies on the same host. Each container carries its own dependent software and manages its own interfaces, such as networking and file systems, so applications never need to compete for those assets. + +[id="deployment-scaling-benefits_{context}"] +== Deployment benefits + +If you employ rolling upgrades between major releases of your application, you can continuously improve your applications without downtime and still maintain compatibility with the current release. + +You can also deploy and test a new version of an application alongside the existing version. Deploy the new application version in addition to the current version. If the container passes your tests, simply deploy more new containers and remove the old ones.  + +Since all the software dependencies for an application are resolved within the container itself, you can use a generic operating system on each host in your data center. You do not need to configure a specific operating system for each application host. When your data center needs more capacity, you can deploy another generic host system. diff --git a/modules/create-aws-cluster.adoc b/modules/create-aws-cluster.adoc new file mode 100644 index 0000000000..379ed7902b --- /dev/null +++ b/modules/create-aws-cluster.adoc @@ -0,0 +1,91 @@ +// Module included in the following assemblies: +// +// * assemblies/creating-your-cluster.adoc + +[id="create-aws-cluster_{context}"] += Creating a cluster on AWS + + +You can create an {product-title} cluster on {AWS} using a standard cloud account owned by Red Hat or with your own cloud account using the Customer Cloud Subscription (CCS) model. + +Using the CCS model to deploy and manage {product-title} into your AWS account requires several prerequisites to be met. + +.Prerequisites + +* Your AWS account is configured for use with {product-title}. +* No services are deployed in your AWS account. +* The necessary quotas and limits needed to support the desired cluster size are available in your AWS account. +* An IAM user called `osdCcsAdmin` exists with the `AdministratorAccess` policy attached. +* An Organization Service Control Policy (SCP) is set up. +* It is recommended that you have at least *Business Support* from AWS. + +.Procedure + +. Log in to {cloud-redhat-com}. + +. Click *Create Cluster* -> *Red Hat OpenShift Dedicated* -> *Create Cluster*. + +. Select *AWS* as your infrastructure provider. + +. Select your billing model. +** *Standard* is selected by default. +** If you select the *Customer cloud subscription* model, an informational dialogue window will open. Review the prerequisites for installing an AWS CCS cluster and click *Close*. You must provide the following AWS account details before continuing with your cluster creation: +... Enter your *AWS account ID*. +... Enter your *AWS access key ID* and *AWS secret access key* to input your AWS IAM user credentials. ++ +[NOTE] +==== +Revoking these credentials in AWS will result in a loss of access to any cluster created with these credentials. +==== +... Optional: You can select *Bypass AWS Service Control Policy (SCP) checks*. Some AWS SCPs will cause the installation to fail, even if the credentials have the correct permissions. Disabling SCP checks allows installation to proceed. The SCP will still be enforced even if the checks are bypassed. + +. Enter your *Cluster name*. + +. Select a *Region* and choose either a *Single zone* or *Multizone* availability. + +. Select your *Compute node instance type* and the *Compute node count (per zone)*. After your cluster is created, you can change the number of compute nodes in your cluster, but you can not change the worker node instance type. The number and types of nodes available to you depend on your {product-title} subscription. + +. Optional: Expand *Edit node labels* to add labels to your nodes. Click *Add label* to add more node labels. + +. If you are creating a standard {product-title} cluster, select the amount of *Persistent storage* and *Load balancers* you want to set on the deployed cluster. You can also accept the provided defaults. + +. Select your preferred network configuration. +** *Basic* is selected by default. This setting creates a new VPC for your cluster using the default values. +** Select *Advanced* if you want to install into an existing VPC (CCS clusters only), configure your networking IP ranges, or set your cluster privacy. +... Enter the *Availability zone*, *Private subnet ID*, and the *Public subnet ID* to install into an existing VPC. +... Enter the desired values to configure your network IP ranges or enter the following defaults: + +.... Node CIDR: 10.0.0.0/16 + +.... Service CIDR: 172.30.0.0/16 + +.... Pod CIDR: 10.128.0.0/14 + +.... Host Prefix: /23 + +... Select your preferred cluster privacy. *Public* is selected by default. + ++ +[IMPORTANT] +==== +CIDR configurations cannot be changed later. Confirm your selections with your network administrator before proceeding. + +If the cluster privacy is set to *Private*, you will not be able to access your cluster until you configure private connections in your cloud provider. +==== + +. Select your cluster update method. +** *Manual* is selected by default. With this option, you are responsible for updating your cluster. If your cluster version falls too far behind, it will be automatically updated. +** Select *Automatic* if you want your cluster to be automatically upgraded when new versions are available. If you opt for automatic upgrades, you must specify the preferred day of the week and the time (UTC) for the upgrade to start. ++ +[WARNING] +==== +High and Critical security concerns (CVEs) will be patched automatically within 48 hours, regardless of your chosen update strategy. +==== + +. Optional: You can set a grace period for *Node Draining* during cluster upgrades. A *1 hour* grace period is set by default. + +. Click *Create cluster*. The cluster creation process begins and takes about 30-40 minutes to complete. + +.Verification + +* The *Installing cluster* heading, under the *Overview* tab, indicates that the cluster is installing and you can view the installation logs from this heading. The *Status* indicator under the *Details* heading indicates when your cluster is *Ready* for use. diff --git a/modules/create-cluster.adoc b/modules/create-cluster.adoc new file mode 100644 index 0000000000..594fcece0f --- /dev/null +++ b/modules/create-cluster.adoc @@ -0,0 +1,42 @@ +// Module included in the following assemblies: +// +// * assemblies/quickstart-osd.adoc + + +[id="create-cluster_{context}"] += Creating a cluster + + +You can quickly create a standard {product-title} cluster, which deploys in cloud provider accounts owned by Red Hat. + +.Procedure + +. Log in to {cloud-redhat-com}. + +. Click *Create Cluster* -> *Red Hat OpenShift Dedicated* -> *Create Clusters*. + +. Select a cloud infrastructure provider. + +. Select *Standard* for the billing model. + +. Enter a *Cluster name*. + +. Select a *Region* and choose either a *Single zone* or *Multizone* availability. + +. Select your *Compute node instance type* and the *Compute node count*. The number and types of nodes available to you depend on your {product-title} subscription. + +. Optional: Expand *Edit node labels* to add labels to your nodes. Click *Add label* to add more node labels. + +. Select the amount of *Persistent storage* and *Load balancers* you want set on the deployed cluster or accept the defaults. + +. The *Basic* Network configuration is selected by default. This setting creates a new VPC for your cluster using the default values. + +. Select your cluster update method. *Manual* is selected by default. If you want your clusters to be automatically upgraded when new versions are available, select the *Automatic* option. If you opt for automatic upgrades, you must specify the preferred day of the week and the time (UTC) for the upgrade to start. + +. Optional: You can set a grace period for *Node Draining* during cluster upgrades. A *1 hour* grace period is set by default. + +. Click *Create cluster*. The cluster creation process begins and takes about 30-40 minutes to complete. + +.Verification + +* The *Installing cluster* heading, under the *Overview* tab, indicates that the cluster is installing and you can view the installation logs from this heading. The *Status* indicator under the *Details* heading indicates when your cluster is *Ready* for use. diff --git a/modules/create-gcp-cluster.adoc b/modules/create-gcp-cluster.adoc new file mode 100644 index 0000000000..fe84b7a9a2 --- /dev/null +++ b/modules/create-gcp-cluster.adoc @@ -0,0 +1,99 @@ +// Module included in the following assemblies: +// +// * assemblies/creating-your-cluster.adoc + +[id="create-gcp-cluster_{context}"] += Creating a cluster on GCP + + +You can create an {product-title} cluster on {GCP} using a standard cloud account owned by Red Hat or with your own cloud account using the Customer Cloud Subscription (CCS) model. + +Using the CCS model to deploy and manage {product-title} into your GCP account requires several prerequisites to be met. + +.Prerequisites + +* Your GCP account has been configured for use with {product-title}. +* The necessary resource quotas and limits needed to support the desired cluster size are available in your GCP account. +* A GCP project has already been created. ++ +[NOTE] +==== +The project name must be 10 characters or less. +==== + +* An IAM service account in GCP called `osd-ccs-admin` with the following roles attached: + ** DNS Administrator + ** Organization Policy Viewer Owner + ** Project IAM Admin + ** Service Management Administrator + ** Service Usage Admin + ** Storage Admin + +* A key has been created for your GCP service account and exported to a file named `osServiceAccount.json`. +* It is recommended that you have at least *Production Support* from GCP. +* To prevent potential conflicts, it is recommended that no other resources are provisioned in the project prior to provisioning {product-title}. + +.Procedure + +. Log in to {cloud-redhat-com}. + +. Click *Create Cluster* -> *Red Hat OpenShift Dedicated* -> *Create cluster*. + +. Select *Google Cloud* as your infrastructure provider. + +. Select your billing model. +** *Standard* is selected by default. +** If you select the *Customer cloud subscription* model, an informational dialogue window will open. Review the prerequisites for installing a GCP CCS cluster and click *Close*. You must provide your GCP service account information with a JSON file. Click *Browse* to locate and attach the *Service account JSON* file to your cluster. + +. Enter your *Cluster name*. + +. Select a *Region* and choose either a *Single zone* or *Multizone* availability. + +. Select your *Compute node instance type* and the *Compute node count (per zone)*. After your cluster is created, you can change the number of compute nodes in your cluster, but you can not change the worker node instance type. The number and types of nodes available to you depend on your {product-title} subscription. + +. Optional: Expand *Edit node labels* to add labels to your nodes. Click *Add label* to add more node labels. + +. If you are creating a standard {product-title} cluster, select the amount of *Persistent storage* and *Load balancers* you want to set on the deployed cluster. You can also accept the provided defaults. + +. Select your preferred network configuration. +** *Basic* is selected by default. This setting creates a new VPC for your cluster using the default values. +** Select *Advanced* if you want to configure your networking IP ranges or set your cluster privacy. +... Enter the desired values to configure your network IP ranges or enter the following defaults: + +.... Node CIDR: 10.0.0.0/16 + +.... Service CIDR: 172.30.0.0/16 + +.... Pod CIDR: 10.128.0.0/14 + +.... Host Prefix: /23 + +... If you are creating a CCS {product-title} cluster, you can enable private clusters. This option is not available for standard clusters. Select your preferred cluster privacy. *Private* is selected by default. + ++ +[IMPORTANT] +==== +CIDR configurations cannot be changed later. Confirm your selections with your network administrator before proceeding. + +If the cluster privacy is set to *Private*, you will not be able to access your cluster until you configure private connections in your cloud provider. +==== + + + +. Select your cluster update method. +** *Manual* is selected by default. With this option, you are responsible for updating your cluster. If your cluster version falls too far behind, it will be automatically updated. +** Select *Automatic* if you want your cluster to be automatically upgraded when new versions are available. If you opt for automatic upgrades, you must specify the preferred day of the week and the time (UTC) for the upgrade to start. + ++ +[WARNING] +==== +High and Critical security concerns (CVEs) are patched automatically within 48 hours, regardless of your chosen update strategy. +==== + +. Optional: You can set a grace period for *Node Draining* during cluster upgrades. A *1 hour* grace period is set by default. + +. Click *Create cluster*. The cluster creation process begins and takes about 30-40 minutes to complete. + +.Verification + +* The *Installing cluster* heading, under the *Overview* tab, indicates that the cluster is installing and you can view the installation logs from this heading. The *Status* indicator under the *Details* heading indicates when your cluster is *Ready* for use. diff --git a/modules/dedicated-aws-dc-existing.adoc b/modules/dedicated-aws-dc-existing.adoc index 585cf14e6b..be05174539 100644 --- a/modules/dedicated-aws-dc-existing.adoc +++ b/modules/dedicated-aws-dc-existing.adoc @@ -7,7 +7,7 @@ .Prerequisites -* Confirm the CIDR range of the OSD VPC will not conflict with any other VGWs you have associated. +* Confirm the CIDR range of the {product-title} VPC will not conflict with any other VGWs you have associated. * Gather the following information: ** The Direct Connect Gateway ID. ** The AWS Account ID associated with the virtual interface. @@ -15,14 +15,14 @@ .Procedure -. Log in to the OSD AWS Account Dashboard and select the correct region. -. From the OSD AWS Account region, select *VPC* from the *Services* menu. +. Log in to the {product-title} AWS Account Dashboard and select the correct region. +. From the {product-title} AWS Account region, select *VPC* from the *Services* menu. . From *VPN Connections*, select *Virtual Private Gateways*. . Select *Create Virtual Private Gateway*. . Give the Virtual Private Gateway a suitable name. . Click *Custom ASN* and enter the *Amazon side ASN* value gathered previously or use the Amazon Provided ASN. . Create the Virtual Private Gateway. -. In the *Navigation* pane of the OSD AWS Account Dashboard, choose *Virtual private gateways* and select the virtual private gateway. Choose *View details*. +. In the *Navigation* pane of the {product-title} AWS Account Dashboard, choose *Virtual private gateways* and select the virtual private gateway. Choose *View details*. . Choose *Direct Connect gateway associations* and click *Associate Direct Connect gateway*. . Under *Association account type*, for Account owner, choose *Another account*. . For *Direct Connect gateway owner*, enter the ID of the AWS account that owns the Direct Connect gateway. diff --git a/modules/dedicated-aws-dc-hvif.adoc b/modules/dedicated-aws-dc-hvif.adoc index 6613dbe4de..f6995de451 100644 --- a/modules/dedicated-aws-dc-hvif.adoc +++ b/modules/dedicated-aws-dc-hvif.adoc @@ -7,7 +7,7 @@ .Prerequisites -* Gather OSD AWS Account ID. +* Gather {product-title} AWS Account ID. [id="dedicated-aws-dc-hvif-type"] == Determining the type of Direct Connect connection @@ -17,7 +17,7 @@ connection. .Procedure -. Log in to the OSD AWS Account Dashboard and select the correct region. +. Log in to the {product-title} AWS Account Dashboard and select the correct region. . Select *Direct Connect* from the *Services* menu. . There will be one or more Virtual Interfaces waiting to be accepted, select one of them to view the *Summary*. . View the Virtual Interface type: private or public. @@ -35,7 +35,7 @@ is Private. .Procedure -. Log in to the OSD AWS Account Dashboard and select the correct region. +. Log in to the {product-title} AWS Account Dashboard and select the correct region. . From the AWS region, select *VPC* from the *Services* menu. . Select *Virtual Private Gateways* from *VPN Connections*. . Click *Create Virtual Private Gateway*. @@ -43,7 +43,7 @@ is Private. . Select *Custom ASN* and enter the *Amazon side ASN* value gathered previously. . Create the Virtual Private Gateway. . Click the newly created Virtual Private Gateway and choose *Attach to VPC* from the *Actions* tab. -. Select the *OSD Cluster VPC* from the list, and attach the Virtual Private Gateway to the VPC. +. Select the *{product-title} Cluster VPC* from the list, and attach the Virtual Private Gateway to the VPC. . From the *Services* menu, click *Direct Connect*. Choose one of the Direct Connect Virtual Interfaces from the list. . Acknowledge the *I understand that Direct Connect port charges apply once I click Accept Connection* message, then choose *Accept Connection*. . Choose to *Accept* the Virtual Private Gateway Connection and select the Virtual Private Gateway that was created in the previous steps. @@ -58,8 +58,8 @@ is Public. .Procedure -. Log in to the OSD AWS Account Dashboard and select the correct region. -. From the OSD AWS Account region, select *Direct Connect* from the *Services* menu. +. Log in to the {product-title} AWS Account Dashboard and select the correct region. +. From the {product-title} AWS Account region, select *Direct Connect* from the *Services* menu. . Select *Direct Connect Gateways* and *Create Direct Connect Gateway*. . Give the Direct Connect Gateway a suitable name. . In the *Amazon side ASN*, enter the Amazon side ASN value gathered previously. @@ -79,13 +79,13 @@ period and view the status of the Interfaces. .Procedure -. Log in to the OSD AWS Account Dashboard and select the correct region. -. From the OSD AWS Account region, select *Direct Connect* from the *Services* menu. +. Log in to the {product-title} AWS Account Dashboard and select the correct region. +. From the {product-title} AWS Account region, select *Direct Connect* from the *Services* menu. . Select one of the Direct Connect Virtual Interfaces from the list. . Check the Interface State has become *Available* . Check the Interface BGP Status has become *Up*. . Repeat this verification for any remaining Direct Connect Interfaces. After the Direct Connect Virtual Interfaces are available, you can log in to the -OSD AWS Account Dashboard and download the Direct Connect configuration file for +{product-title} AWS Account Dashboard and download the Direct Connect configuration file for configuration on your side. diff --git a/modules/dedicated-aws-dc-methods.adoc b/modules/dedicated-aws-dc-methods.adoc index 83a4814340..43e0f5233a 100644 --- a/modules/dedicated-aws-dc-methods.adoc +++ b/modules/dedicated-aws-dc-methods.adoc @@ -7,14 +7,14 @@ A Direct Connect connection requires a hosted Virtual Interface (VIF) connected to a Direct Connect Gateway (DXGateway), which is in turn associated to a -Virtual Gateway (VGW) or a Transit Gateway to access a remote VPC in +Virtual Gateway (VGW) or a Transit Gateway in order to access a remote VPC in the same or another account. If you do not have an existing DXGateway, the typical process involves creating -the hosted VIF, with the DXGateway and VGW being created in the OSD AWS Account. +the hosted VIF, with the DXGateway and VGW being created in the {product-title} AWS Account. If you have an existing DXGateway connected to one or more existing VGWs, the -process involves the OSD AWS Account sending an Association Proposal +process involves the {product-title} AWS Account sending an Association Proposal to the DXGateway owner. The DXGateway owner must ensure that the proposed CIDR will not conflict with any other VGWs they have associated. diff --git a/modules/dedicated-aws-vpc-accepting-peering.adoc b/modules/dedicated-aws-vpc-accepting-peering.adoc index ade593e17d..92440358a4 100644 --- a/modules/dedicated-aws-vpc-accepting-peering.adoc +++ b/modules/dedicated-aws-vpc-accepting-peering.adoc @@ -19,5 +19,5 @@ Customer AWS Account. . Go to *Peering Connections*. . Click on *Pending peering connection*. . Confirm the AWS Account and VPC ID that the request originated from. This should -be from the OSD AWS Account and {product-title} Cluster VPC. +be from the {product-title} AWS Account and {product-title} Cluster VPC. . Click *Accept Request*. diff --git a/modules/dedicated-aws-vpc-configuring-routing-tables.adoc b/modules/dedicated-aws-vpc-configuring-routing-tables.adoc index 64d5028f56..7d26657da4 100644 --- a/modules/dedicated-aws-vpc-configuring-routing-tables.adoc +++ b/modules/dedicated-aws-vpc-configuring-routing-tables.adoc @@ -14,7 +14,7 @@ to communicate across the peering connection. .Procedure -. Log in to the AWS Web Console for the OSD AWS Account. +. Log in to the AWS Web Console for the {product-title} AWS Account. . Navigate to the *VPC Service*, then *Route Tables*. . Select the Route Table for the {product-title} Cluster VPC. + @@ -30,7 +30,7 @@ Select the private one that has a number of explicitly associated subnets. . Click *Save*. . You must complete the same process with the other VPC's CIDR block: -.. Log in to the Customer AWS Web Console → *VPC Service* → *Route Tables*. +.. Log into the Customer AWS Web Console → *VPC Service* → *Route Tables*. .. Select the Route Table for your VPC. .. Select the *Routes* tab, then *Edit*. .. Enter the {product-title} Cluster VPC CIDR block in the *Destination* text box. diff --git a/modules/dedicated-aws-vpc-initiating-peering.adoc b/modules/dedicated-aws-vpc-initiating-peering.adoc index 7322bdbf4a..00229e24d0 100644 --- a/modules/dedicated-aws-vpc-initiating-peering.adoc +++ b/modules/dedicated-aws-vpc-initiating-peering.adoc @@ -5,7 +5,7 @@ [id="dedicated-aws-vpc-initiating-peering"] = Initiating the VPC peer request -You can send a VPC peering connection request from the OSD AWS Account to the +You can send a VPC peering connection request from the {product-title} AWS Account to the Customer AWS Account. .Prerequisites @@ -25,9 +25,10 @@ with the procedure. .Procedure -. Log in to the Web Console for the OSD AWS Account and navigate to the +. Log in to the Web Console for the {product-title} AWS Account and navigate to the *VPC Dashboard* in the region where the cluster is being hosted. -. Go to the *Peering Connections* page and click *Create Peering Connection*. +. Go to the *Peering Connections* page and click the *Create Peering Connection* +button. . Verify the details of the account you are logged in to and the details of the account and VPC you are connecting to: .. *Peering connection name tag*: Set a descriptive name for the VPC Peering Connection. diff --git a/modules/dedicated-aws-vpc-peering-terms.adoc b/modules/dedicated-aws-vpc-peering-terms.adoc index 964506b559..0a47b902a0 100644 --- a/modules/dedicated-aws-vpc-peering-terms.adoc +++ b/modules/dedicated-aws-vpc-peering-terms.adoc @@ -9,9 +9,9 @@ When setting up a VPC peering connection between two VPCs on two separate AWS accounts, the following terms are used: [horizontal] -OSD AWS Account:: The AWS account that contains the {product-title} cluster. -OSD Cluster VPC:: The VPC that contains the {product-title} cluster. -Customer AWS Account:: Your non-OSD AWS Account that you would like to peer with. +{product-title} AWS Account:: The AWS account that contains the {product-title} cluster. +{product-title} Cluster VPC:: The VPC that contains the {product-title} cluster. +Customer AWS Account:: Your non-{product-title} AWS Account that you would like to peer with. Customer VPC:: The VPC in your AWS Account that you would like to peer with. Customer VPC Region:: The region where the customer's VPC resides. diff --git a/modules/dedicated-aws-vpn-creating.adoc b/modules/dedicated-aws-vpn-creating.adoc index 4bcc110bc3..b0ca927cb5 100644 --- a/modules/dedicated-aws-vpn-creating.adoc +++ b/modules/dedicated-aws-vpn-creating.adoc @@ -24,7 +24,7 @@ configure at least one static route. .Procedure -. Log in to the OSD AWS Account Dashboard, and navigate to the VPC Dashboard. +. Log in to the {product-title} AWS Account Dashboard, and navigate to the VPC Dashboard. . Click on *Your VPCs* and identify the name and VPC ID for the VPC containing the {product-title} cluster. . From the VPC Dashboard, click *Customer Gateway*. . Click *Create Customer Gateway* and give it a meaningful name. diff --git a/modules/dedicated-aws-vpn-troubleshooting.adoc b/modules/dedicated-aws-vpn-troubleshooting.adoc index c60d024893..ea7d31503d 100644 --- a/modules/dedicated-aws-vpn-troubleshooting.adoc +++ b/modules/dedicated-aws-vpn-troubleshooting.adoc @@ -44,7 +44,6 @@ as in the *Down* state. The AWS Notification is as follows: -[source,text] ---- You have new non-redundant VPN connections diff --git a/modules/deleting-cluster.adoc b/modules/deleting-cluster.adoc new file mode 100644 index 0000000000..8121dc5124 --- /dev/null +++ b/modules/deleting-cluster.adoc @@ -0,0 +1,21 @@ + +// Module included in the following assemblies: +// +// * assemblies/deleting_your_cluster.adoc + +[id="deleting-cluster_{context}"] + += Deleting your cluster + + +You can delete your {product-title} cluster in the {OCM}. + +.Procedure + +. From {cloud-redhat-com}, click on the cluster you want to delete. + +. Click *Actions* -> *Delete Cluster*. + +. Type the name of the cluster highlighted in bold, then click *Delete*. + +Cluster deletion occurs automatically. diff --git a/modules/deleting-service-cli.adoc b/modules/deleting-service-cli.adoc new file mode 100644 index 0000000000..a77b9adf6e --- /dev/null +++ b/modules/deleting-service-cli.adoc @@ -0,0 +1,20 @@ + +// Module included in the following assemblies: +// +// * assemblies/adding-service.adoc + +[id="deleting-service-cli_{context}"] += Deleting a service from the CLI + +You can delete the add-on services from your {product-title} cluster through the OCM CLI. + +// TODO: Should there be a prereq to have the OCM CLI installed? + +.Procedure + +* To delete the add-on service from your cluster through the OCM CLI, enter the following command: ++ +[source,terminal] +---- +$ ocm delete api/clusters_mgmt/v1/clusters//addons/ +---- diff --git a/modules/deleting-service.adoc b/modules/deleting-service.adoc new file mode 100644 index 0000000000..1a358dc2e3 --- /dev/null +++ b/modules/deleting-service.adoc @@ -0,0 +1,23 @@ + +// Module included in the following assemblies: +// +// * assemblies/adding-service.adoc + +[id="deleting-service_{context}"] += Deleting a service from OCM + +You can delete the add-on services from your {product-title} cluster through the {OCM}. + +.Procedure + +. Navigate to the *Clusters* page in {cloud-redhat-com}. + +. Click the cluster with the installed service that you want to delete. + +. Navigate to the *Add-ons* tab, and locate the installed service that you want to delete. + +. From the installed service option, click the menu and select *Uninstall add-on* from the drop-down menu. + +. You must type the name of the service that you want to delete in the confirmation message that appears. + +. Click *Uninstall*. You are returned to the *Add-ons* tab and an uninstalling state icon is present on the service option you deleted. diff --git a/modules/deploy-app.adoc b/modules/deploy-app.adoc new file mode 100644 index 0000000000..3cb593b972 --- /dev/null +++ b/modules/deploy-app.adoc @@ -0,0 +1,60 @@ + +// Module included in the following assemblies: +// +// * assemblies/quickstart-osd.adoc + +[id="deploy-app_{context}"] += Deploying an app with the OpenShift service catalog + + +From the OpenShift web console, you can deploy one of the built-in service catalog apps and expose the app with a route. + +.Prerequisites + +- An actively running cluster. + +.Procedure + +. From OpenShift Cluster Manager (OCM), click *Open console*. + +. From the side navigation menu in the *Administrator* perspective, click *Home* -> *Projects* and then click *Create Project*. + +. Enter a name for your project. Optional: Add a *Display Name* and *Description*. Click *Create*. + +. Switch to the Developer perspective from the side navigation menu to create an app. + +. Click *+Add* from the side navigation menu. From the *Add pane* menu bar, make sure that the *Project* is the one that you just created. + +. Click *From Catalog*. The Developer Catalog opens in the pane. + +. From the navigation menu in the pane, click *Languages* -> *JavaScript*. + +. Click *Node.js*, and then click *Create Application*. After you select *Node.js*, the *Create Source-to-Image Application* pane opens. ++ +[NOTE] +==== +You might need to click *Clear All Filters* to display the *Node.js* option. +==== + +. In the *Git* section, click *Try Sample*. + +. Scroll to confirm that *Deployment* and *Create a route to the application* are selected. + +. Click *Create*. It will take a few minutes for the pods to deploy. + +. Optional: You can check the status of the pods from the *Topology* pane. Click your *nodejs* app and review its sidebar. You must see that the `nodejs` build is complete, and that the `nodejs` pod is in a *Running* state to continue. + +. When the deployment is complete, click the route location URL, which has a format similar to the following: ++ +---- +http://nodejs-.-..containers.appdomain.cloud +---- ++ + +A new tab in your browser opens with a message similar to the following. ++ +---- +Welcome to your Node.js application on OpenShift +---- + +. Optional: To clean up the resources that you created, select *Administrator* from the perspective switcher, navigate to *Home* -> *Projects*, click your project's action menu, and click *Delete Project*. diff --git a/modules/enable-aws-access.adoc b/modules/enable-aws-access.adoc new file mode 100644 index 0000000000..8d10bb6aa0 --- /dev/null +++ b/modules/enable-aws-access.adoc @@ -0,0 +1,29 @@ +//Specify the module-type as either "CONCEPT, PROCEDURE, or REFERENCE" + +// Module included in the following assemblies: +// +// * osd_private_connections/aws_private_connections + +[id="enable-aws-access"] += Understanding AWS cloud infrastructure access + +[NOTE] +==== +AWS cloud infrastructure access does not apply to the Customer Cloud Subscription (CCS) infrastructure type that is chosen when you create a cluster because CCS clusters are deployed onto your account. +==== + + +{AWS} infrastructure access permits link:https://access.redhat.com/node/3610411[Customer Portal Organization Administrators] and cluster owners to enable AWS Identity and Access Management (IAM) users to have federated access to the AWS Management Console for their {product-title} cluster. AWS access can be granted for customer AWS users, and private cluster access can be implemented to suit the needs of your {product-title} environment. + +. Get started with configuring AWS infrastructure access for your {product-title} cluster. By creating an AWS user and account and providing that user with access to the {product-title} AWS account. + +. After you have access to the {product-title} AWS account, use one or more of the following methods to establish a private connection to your cluster: + +- Configuring AWS VPC peering: Enable VPC peering to route network traffic between two private IP addresses. + +- Configuring AWS VPN: Establish a Virtual Private Network to securely connect your private network to your Amazon Virtual Private Cloud. + +- Configuring AWS Direct Connect: Configure AWS Direct Connect to establish a dedicated network connection between your private network and an AWS Direct Connect location. + +// TODO: Was this supposed to be an xref that got yanked? Looks a little odd as is. I'd yank this and add it as an xref in an additional resources or next steps section in the assembly. +After configuring your cloud infrastructure access, learn more about Configuring a private cluster. diff --git a/modules/enable-private-cluster-existing.adoc b/modules/enable-private-cluster-existing.adoc new file mode 100644 index 0000000000..80afda0a5e --- /dev/null +++ b/modules/enable-private-cluster-existing.adoc @@ -0,0 +1,40 @@ + +// Module included in the following assemblies: +// +// * assemblies/private-cluster.adoc + +[id="enable-private-cluster-existing_{context}"] += Enabling an existing cluster to be private + + +After a cluster has been created, you can later enable the cluster to be private. + +.Prerequisites + +* The following private connections must be configured to allow private access: +** VPC Peering +** Cloud VPN +** DirectConnect (AWS only) +** TransitGateway (AWS only) +** Cloud Interconnect (GCP only) + +.Procedure + +. Log in to {cloud-redhat-com}. + +. Select the public cluster you would like to make private. + +. On the *Networking* tab, select *Make API private* under *Control Plane API endpoint*. ++ + +[WARNING] +==== +When set to *Private*, you cannot access your cluster unless you have configured the private connections in your cloud provider as outlined in the prerequisites. +==== + +. Click *Change settings*. ++ +[NOTE] +==== +Transitioning your cluster between private and public can take several minutes to complete. +==== diff --git a/modules/enable-private-cluster-new.adoc b/modules/enable-private-cluster-new.adoc new file mode 100644 index 0000000000..a6c78d480a --- /dev/null +++ b/modules/enable-private-cluster-new.adoc @@ -0,0 +1,39 @@ + +// Module included in the following assemblies: +// +// * assemblies/private-cluster.adoc + +[id="enable-private-cluster-new_{context}"] += Enabling a private cluster during cluster creation + + +You can enable private cluster settings when creating a new cluster. + +.Prerequisites + +* The following private connections must be configured to allow private access: +** VPC Peering +** Cloud VPN +** DirectConnect (AWS only) +** TransitGateway (AWS only) +** Cloud Interconnect (GCP only) + + +.Procedure + +. Log in to {cloud-redhat-com}. +. Click *Create cluster* -> *{product-title}* -> *Create cluster*. +. Configure your cluster details. +. When selecting your preferred network configuration, select *Advanced*. +. Select *Private*. ++ +[WARNING] +==== +When set to *Private*, you cannot access your cluster unless you have configured the private connections in your cloud provider as outlined in the prerequisites. +==== + +. Click *Create cluster*. The cluster creation process begins and takes about 30-40 minutes to complete. + +.Verification + +* The *Installing cluster* heading, under the *Overview* tab, indicates that the cluster is installing and you can view the installation logs from this heading. The *Status* indicator under the *Details* heading indicates when your cluster is *Ready* for use. diff --git a/modules/enable-public-cluster.adoc b/modules/enable-public-cluster.adoc new file mode 100644 index 0000000000..ab713d24f4 --- /dev/null +++ b/modules/enable-public-cluster.adoc @@ -0,0 +1,25 @@ + +// Module included in the following assemblies: +// +// * assemblies/private-cluster.adoc + +[id="enable-public-cluster_{context}"] += Enabling an existing private cluster to be public +// TODO: These wordings of "enabling the cluster "to be public/private" could probably be improved. At the very least, these two modules should probably use "Configuring" instead of "Enabling", as it is worded now. + +After a private cluster has been created, you can later enable the cluster to be public. + +.Procedure + +. Log in to {cloud-redhat-com}. + +. Select the private cluster you would like to make public. + +. On the *Networking* tab, deselect *Make API private* under *Control Plane API endpoint*. + +. Click *Change settings*. ++ +[NOTE] +==== +Transitioning your cluster between private and public can take several minutes to complete. +==== diff --git a/modules/gcp-limits.adoc b/modules/gcp-limits.adoc new file mode 100644 index 0000000000..57a19cab54 --- /dev/null +++ b/modules/gcp-limits.adoc @@ -0,0 +1,64 @@ + +// Module included in the following assemblies: +// +// * assemblies/config-gcp-account.adoc + + +[id="gcp-limits_{context}"] += GCP account limits + + +The {product-title} cluster uses a number of Google Cloud Platform (GCP) components, but the default link:https://cloud.google.com/docs/quota[quotas] do not affect your ability to install an {product-title} cluster. + +A standard {product-title} cluster uses the following resources. Note that some resources are required only during the bootstrap process and are removed after the cluster deploys. + +.GCP resources used in a default cluster + +[cols="2a,2a,2a,2a,2a",options="header"] +|=== +|Service +|Component +|Location +|Total resources required +|Resources removed after bootstrap + + +|Service account |IAM |Global |5 |0 +|Firewall Rules |Compute |Global |11 |1 +|Forwarding Rules |Compute |Global |2 |0 +|In-use global IP addresses |Compute |Global |4 |1 +|Health checks |Compute |Global |3 |0 +|Images |Compute |Global |1 |0 +|Networks |Compute |Global |2 |0 +|Static IP addresses |Compute |Region |4 |1 +|Routers |Compute |Global |1 |0 +|Routes |Compute |Global |2 |0 +|Subnetworks |Compute |Global |2 |0 +|Target Pools |Compute |Global |3 |0 +|CPUs |Compute |Region |28 |4 +|Persistent Disk SSD (GB) |Compute |Region |896 |128 + +|=== + +[NOTE] +==== +If any of the quotas are insufficient during installation, the installation program displays an error that states both which quota was exceeded and the region. +==== + +Be sure to consider your actual cluster size, planned cluster growth, and any usage from other clusters that are associated with your account. The CPU, Static IP addresses, and Persistent Disk SSD (Storage) quotas are the ones that are most likely to be insufficient. + +If you plan to deploy your cluster in one of the following regions, you will exceed the maximum storage quota and are likely to exceed the CPU quota limit: + +* asia-east2 +* asia-northeast2 +* asia-south1 +* australia-southeast1 +* europe-north1 +* europe-west2 +* europe-west3 +* europe-west6 +* northamerica-northeast1 +* southamerica-east1 +* us-west2 + +You can increase resource quotas from the link:https://console.cloud.google.com/iam-admin/quotas[GCP console], but you might need to file a support ticket. Be sure to plan your cluster size early so that you can allow time to resolve the support ticket before you install your {product-title} cluster. diff --git a/modules/kubernetes-about.adoc b/modules/kubernetes-about.adoc new file mode 100644 index 0000000000..0ba9dfac92 --- /dev/null +++ b/modules/kubernetes-about.adoc @@ -0,0 +1,16 @@ + +// Module included in the following assemblies: +// +// * assemblies/osd-architecture.adoc + +[id="kubernetes-about_{context}"] += About Kubernetes + +Kubernetes is an open source container orchestration engine for automating deployment, scaling, and management of containerized applications. The general concept of Kubernetes is fairly simple: + +* Start with one or more worker nodes to run the container workloads. +* Manage the deployment of those workloads from one or more control nodes. +* Wrap containers in a deployment unit called a pod. Using pods provides extra metadata with the container and offers the ability to group several containers in a single deployment entity. +* Create special kinds of assets. For example, services are represented by a set of pods and a policy that defines how they are accessed. This policy allows containers to connect to the services that they need even if they do not have the specific IP addresses for the services. Replication controllers are another special asset that indicates how many pod Replicas are required to run at a time. You can use this capability to automatically scale your application to adapt to its current demand. + +To learn more about Kubernetes, see the link:https://kubernetes.io/docs/home/?path=users&persona=app-developer&level=foundational[Kubernetes documentation]. diff --git a/modules/life-cycle-dates.adoc b/modules/life-cycle-dates.adoc new file mode 100644 index 0000000000..cd17ab05a1 --- /dev/null +++ b/modules/life-cycle-dates.adoc @@ -0,0 +1,29 @@ +// Module included in the following assemblies: +// +// * rosa_policy/rosa-life-cycle.adoc + +[id="rosa-life-cycle-dates_{context}"] += Life cycle dates + +[options="header"] +|=== +|Version |General availability |End of life +|4.9 |Oct 18, 2021 |Jul 18, 2022 +|4.8 |Jul 27, 2021 |Apr 27, 2022 + +ifeval::["{product-title}" == "OpenShift Dedicated"] +|4.7 |Feb 24, 2021 |Dec 17, 2021 footnote:[4.7 minor version follows previous Y-1 life cycle] +|4.6 |Oct 27, 2020 |Aug 26, 2021 +|4.5 |Sep 23, 2020 |Mar 26, 2021 +|4.4 |Sep 15, 2020 |Nov 26, 2020 +|4.3 |Feb 19, 2020 |Oct 23, 2020 +|4.2 |Nov 12, 2019 |Oct 15, 2020 +|4.1 |Jun 11, 2019 |Mar 20, 2020 +|3.11 |Oct 10, 2018 |Jul 31, 2021 footnote:[https://access.redhat.com/articles/5254001] +endif::[] + +ifeval::["{product-title}" == "Red Hat OpenShift Service on AWS"] +|4.7 |Mar 24, 2021 |Dec 17, 2021 footnote:[4.7 minor version follows previous Y-1 life cycle] +endif::[] + +|=== diff --git a/modules/life-cycle-definitions.adoc b/modules/life-cycle-definitions.adoc new file mode 100644 index 0000000000..2362624556 --- /dev/null +++ b/modules/life-cycle-definitions.adoc @@ -0,0 +1,41 @@ +// Module included in the following assemblies: +// +// * rosa_policy/rosa-life-cycle.adoc + +[id="rosa-life-cycle-definitions_{context}"] += Definitions + +.Version reference +[options="header"] +|=== +|Version format |Major |Minor |Patch |Major.minor.patch +| |x |y |z |x.y.z +|Example |4 |5 |21 |4.5.21 +|=== + +Major releases or X-releases:: Referred to only as _major releases_ or _X-releases_ (X.y.z). ++ +-- +.Examples +* "Major release 5" -> 5.y.z +* "Major release 4" -> 4.y.z +* "Major release 3" -> 3.y.z +-- + +Minor releases or Y-releases:: Referred to only as _minor releases_ or _Y-releases_ (x.Y.z). ++ +-- +.Examples +* "Minor release 4" -> 4.4.z +* "Minor release 5" -> 4.5.z +* "Minor release 6" -> 4.6.z +-- + +Patch releases or Z-releases:: Referred to only as _patch releases_ or _Z-releases_ (x.y.Z). ++ +-- +.Examples +* "Patch release 14 of minor release 5" -> 4.5.14 +* "Patch release 25 of minor release 5" -> 4.5.25 +* "Patch release 26 of minor release 6" -> 4.6.26 +-- diff --git a/modules/life-cycle-install.adoc b/modules/life-cycle-install.adoc new file mode 100644 index 0000000000..c59f63dadc --- /dev/null +++ b/modules/life-cycle-install.adoc @@ -0,0 +1,9 @@ +// Module included in the following assemblies: +// +// * rosa_policy/rosa-life-cycle.adoc + +[id="rosa-install-policy_{context}"] += Installation policy + +While Red Hat recommends installation of the latest support release, {product-title} supports +installation of any supported release as covered by the preceding policy. diff --git a/modules/life-cycle-limited-support.adoc b/modules/life-cycle-limited-support.adoc new file mode 100644 index 0000000000..7b20c38f17 --- /dev/null +++ b/modules/life-cycle-limited-support.adoc @@ -0,0 +1,15 @@ +// Module included in the following assemblies: +// +// * rosa_policy/rosa-life-cycle.adoc + +[id="rosa-limited-support_{context}"] += Limited support status + +While operating outside of the supported versions list, you may be asked to upgrade the cluster to +a supported version when requesting support. Additionally, Red Hat does not make any runtime or SLA +guarantees for clusters outside of the supported versions list at the end of their 9 month end of +life cycle date. + +Red Hat provides commercially reasonable support to ensure an upgrade path from an unsupported +release to a supported release is available. However, if a supported upgrade path is no longer +available, you may be required to create a new cluster and migrate your workloads. diff --git a/modules/life-cycle-major-versions.adoc b/modules/life-cycle-major-versions.adoc new file mode 100644 index 0000000000..adf09cfab7 --- /dev/null +++ b/modules/life-cycle-major-versions.adoc @@ -0,0 +1,14 @@ +// Module included in the following assemblies: +// +// * rosa_policy/rosa-life-cycle.adoc + +[id="rosa-major-versions_{context}"] += Major versions (X.y.z) + +Major versions of {product-title}, for example version 4, are supported for one year following the +release of a subsequent major version or the retirement of the product. + +.Example +* If version 5 were made available on {product-title} on January 1, version 4 would be allowed to + continue running on managed clusters for 12 months, until December 31. After this time, clusters + would need to be upgraded or migrated to version 5. diff --git a/modules/life-cycle-mandatory-upgrades.adoc b/modules/life-cycle-mandatory-upgrades.adoc new file mode 100644 index 0000000000..8cdd58ea47 --- /dev/null +++ b/modules/life-cycle-mandatory-upgrades.adoc @@ -0,0 +1,15 @@ +// Module included in the following assemblies: +// +// * rosa_policy/rosa-life-cycle.adoc + +[id="rosa-mandatory-upgrades_{context}"] += Mandatory upgrades + +In the event that a Critical or Important CVE, or other bug identified by Red Hat, significantly +impacts the security or stability of the cluster, the customer must upgrade to the next supported +patch release within 48 hours. + +In extreme circumstances and based on Red Hat's assessment of the CVE criticality to the +environment, if the upgrade to the next supported patch release has not been performed within 48 +hours of notification, the cluster will be automatically updated to the latest patch release to +mitigate potential security breach or instability. diff --git a/modules/life-cycle-minor-versions.adoc b/modules/life-cycle-minor-versions.adoc new file mode 100644 index 0000000000..5c4fc1fce9 --- /dev/null +++ b/modules/life-cycle-minor-versions.adoc @@ -0,0 +1,22 @@ +// Module included in the following assemblies: +// +// * rosa_policy/rosa-life-cycle.adoc + +[id="rosa-minor-versions_{context}"] += Minor versions (x.Y.z) + +Starting with the 4.8 OpenShift Container Platform minor version, Red Hat supports all minor +versions within a 9 month period following general availability of the given minor version. Patch +versions do not affect the 9 month supportability period. + +Customers are notified 60, 30, and 15 days prior to the end of the 9 month period. Clusters must be upgraded to +a supported minor version prior to the end of the 9 month period, or the cluster will enter +a "Limited Support" status. + +.Example +. A customer's cluster is currently running on 4.8.14. The 4.8 minor version became generally + available on July 27, 2021. +. On Feb 26, March 28, and April 12, 2022, the customer is notified that their cluster will enter "Limited Support" status + on April 27, 2022 if the cluster has not already been upgraded to a supported minor version. +. The cluster must be upgraded to 4.9 or later by April 27, 2022. +. If the upgrade has not been performed, the cluster will be flagged as being in a "Limited Support" status. diff --git a/modules/life-cycle-overview.adoc b/modules/life-cycle-overview.adoc new file mode 100644 index 0000000000..756bd9063b --- /dev/null +++ b/modules/life-cycle-overview.adoc @@ -0,0 +1,17 @@ +// Module included in the following assemblies: +// +// * rosa_policy/rosa-life-cycle.adoc + +[id="life-cycle-overview_{context}"] += Overview + +Red Hat provides a published product life cycle for {product-title} in order for customers and +partners to effectively plan, deploy, and support their applications running on the platform. Red +Hat publishes this life cycle in order to provide as much transparency as possible and might make +exceptions from these policies as conflicts arise. + +{product-title} is a managed instance of Red Hat OpenShift and maintains an independent release +schedule. More details about the managed offering can be found in the {product-title} service +definition. The availability of Security Advisories and Bug Fix Advisories for a specific version +are dependent upon the Red Hat OpenShift Container Platform life cycle policy and subject to the +{product-title} maintenance schedule. diff --git a/modules/life-cycle-patch-versions.adoc b/modules/life-cycle-patch-versions.adoc new file mode 100644 index 0000000000..de84194995 --- /dev/null +++ b/modules/life-cycle-patch-versions.adoc @@ -0,0 +1,17 @@ +// Module included in the following assemblies: +// +// * rosa_policy/rosa-life-cycle.adoc + +[id="rosa-patch-versions_{context}"] += Patch versions (x.y.Z) + +During the period in which a minor version is supported, Red Hat supports all OpenShift Container +Platform patch versions unless otherwise specified. + +For reasons of platform security and stability, a patch release may be deprecated, which would +prevent installations of that release and trigger mandatory upgrades off that release. + +.Example +. 4.7.6 is found to contain a critical CVE. +. Any releases impacted by the CVE will be removed from the supported patch release list. In + addition, any clusters running 4.7.6 will be scheduled for automatic upgrades within 48 hours. diff --git a/modules/life-cycle-supported-versions.adoc b/modules/life-cycle-supported-versions.adoc new file mode 100644 index 0000000000..dde284b9b0 --- /dev/null +++ b/modules/life-cycle-supported-versions.adoc @@ -0,0 +1,10 @@ +// Module included in the following assemblies: +// +// * rosa_policy/rosa-life-cycle.adoc + +[id="rosa-supported-versions_{context}"] += Supported versions exception policy + +Red Hat reserves the right to add or remove new or existing versions, or delay upcoming minor +release versions, that have been identified to have one or more critical production impacting bugs +or security issues without advance notice. diff --git a/modules/managing-dedicated-administrators.adoc b/modules/managing-dedicated-administrators.adoc new file mode 100644 index 0000000000..1a661f951c --- /dev/null +++ b/modules/managing-dedicated-administrators.adoc @@ -0,0 +1,39 @@ +// Module included in the following assemblies: +// +// administering_a_cluster/osd-admin-roles.adoc + +[id="managing-dedicated-administrators_{context}"] += Managing {product-title} administrators + +Administrator roles are managed using a `cluster-admin` or `dedicated-admin` group on the cluster. Existing members of this group can edit membership through {cloud-redhat-com}. + +// TODO: These two procedures should be separated and created as proper procedure modules. + +[id="dedicated-administrators-adding-user_{context}"] +== Adding a user + +.Procedure + +. Navigate to the *Cluster Details* page and *Access Control* tab. +. Click the *Add user* button (first user only). +. Enter the user name and select the group. +. Click the *Add* button. + + +[NOTE] +==== +Adding a user to the `cluster-admin` group can take several minutes to complete. +==== + +[NOTE] +==== +Existing `dedicated-admin` users cannot also be added to the `cluster-admin` group. You must first remove the user from the `dedicated-admin` group before adding the user to the `cluster-admin` group. +==== + +[id="dedicated-administrators-removing-user_{context}"] +== Removing a user + +.Procedure + +. Navigate to the *Cluster Details* page and *Access Control* tab. +. Click the Options menu {kebab} to the right of the user and group combination and click *Delete*. diff --git a/modules/notification-subscribe.adoc b/modules/notification-subscribe.adoc new file mode 100644 index 0000000000..4c8e1cb0c2 --- /dev/null +++ b/modules/notification-subscribe.adoc @@ -0,0 +1,22 @@ + +// Module included in the following assemblies: +// +// * assemblies/notifications.adoc + +[id="notification-subscribe{context}"] += Subscribing to notifications + + +You can add notification contacts for your {product-title} cluster. When an event occurs that triggers a cluster notification email, subscribed users are notified. + +.Procedure + +. From link:https://cloud.redhat.com/openshift[OpenShift Cluster Manager (OCM)], navigate to the *Clusters* page and select your cluster. + +. On the *Support* tab, under the *Notification contacts* heading, click *Add notification contact*. + +. Enter the Red Hat username or email of the contact you want to add. + +. Click *Add contact*. + +A confirmation message appears when the contact was added successfully, and the user is then listed under the *Notification contacts* heading on the *Support* tab. diff --git a/modules/ocm-disabling-autoscaling-nodes.adoc b/modules/ocm-disabling-autoscaling-nodes.adoc new file mode 100644 index 0000000000..76a2c9a1bd --- /dev/null +++ b/modules/ocm-disabling-autoscaling-nodes.adoc @@ -0,0 +1,21 @@ + +// Module included in the following assemblies: +// +// nodes/nodes/nodes-disabling-autoscaling-nodes.adoc + +[id="ocm-disabling-autoscaling_{context}"] += Disabling autoscaling nodes in an existing cluster using OCM + +Disable autoscaling for worker nodes in the machine pool definition from the OCM console. + +.Procedure + +. From the link:https://console.redhat.com/[OCM console], navigate to the *Clusters* page and select the cluster with autoscaling that must be disabled. + +. On the selected cluster, select the *Machine pools* tab. + +. Click the Options menu {kebab} at the end of the machine pool with autoscaling and select *Scale*. + +. On the "Edit node count" dialog, deselect the *Enable autoscaling* checkbox. + +. Select *Apply* to save these changes and disable autoscaling from the cluster. diff --git a/modules/ocm-enabling-autoscaling-nodes.adoc b/modules/ocm-enabling-autoscaling-nodes.adoc new file mode 100644 index 0000000000..9f680252b7 --- /dev/null +++ b/modules/ocm-enabling-autoscaling-nodes.adoc @@ -0,0 +1,21 @@ + +// Module included in the following assemblies: +// +// nodes/nodes/nodes-about-autoscaling-nodes.adoc + +[id="ocm-enabling-autoscaling_{context}"] += Enabling autoscaling nodes in an existing cluster using OCM + +Enable autoscaling for worker nodes in the machine pool definition from the OCM console. + +.Procedure + +. From the link:https://console.redhat.com/[OCM console], navigate to the *Clusters* page and select the cluster that you want to enable autoscaling for. + +. On the selected cluster, select the *Machine pools* tab. + +. Click the Options menu {kebab} at the end of the machine pool that you want to enable autoscaling for and select *Scale*. + +. On the *Edit node count* dialog, select the *Enable autoscaling* checkbox. + +. Select *Apply* to save these changes and enable autoscaling for the cluster. diff --git a/modules/osd-applications-config-custom-domains.adoc b/modules/osd-applications-config-custom-domains.adoc new file mode 100644 index 0000000000..8eaaa62b6a --- /dev/null +++ b/modules/osd-applications-config-custom-domains.adoc @@ -0,0 +1,107 @@ +// Module included in the following assemblies for OSD and ROSA: +// +// * applications/deployments/osd-config-custom-domains-applications.adoc + +[id="osd-applications-config-custom-domains.adoc_{context}"] += Configuring custom domains for applications + +Custom domains are specific wildcard domains that can be used with {product-title} applications. The top-level domains (TLDs) are owned by the customer that is operating the {product-title} cluster. The Custom Domains Operator sets up a new `ingresscontroller` with a custom certificate as a second day operation. The public DNS record for this `ingresscontroller` can then be used by an external DNS to create a wildcard CNAME record for use with a custom domain. + +[NOTE] +==== +Custom API domains are not supported because Red Hat controls the API domain. However, customers can change their application domains. For private custom domains with a private `IngressController`, set ``.spec.scope` to `Internal` in the `CustomDomain` CR. +==== + +.Prerequisites + +* A user account with `dedicated-admin` privileges +* A unique wildcard domain, such as `*.apps..io` +* A wildcard custom certificate, such as `CN=*.apps..io` +* Access to a cluster with the latest version of the `oc` CLI installed + +[IMPORTANT] +Do not use the reserved names `default` or `apps*`, such as `apps` or `apps2`, in the `metadata/name:` section of the `CustomDomain` CR. + +.Procedure + +. Create a new TLS secret from a private key and a public certificate, where `fullchain.pem` and `privkey.pem` are your public or private wildcard certificates. ++ +.Example +[source,terminal] +---- +$ oc create secret tls -tls --cert=fullchain.pem --key=privkey.pem -n +---- + +. Create a new `CustomDomain` custom resource (CR): ++ +.Example `-custom-domain.yaml` +[source,yaml] +---- +apiVersion: managed.openshift.io/v1alpha1 +kind: CustomDomain +metadata: + name: +spec: + domain: apps.companyname.io <1> + scope: External + certificate: + name: -tls <2> + namespace: +---- +<1> The custom domain. +<2> The secret created in the previous step. + +. Apply the CR: ++ +.Example +[source,terminal] +---- +$ oc apply -f -custom-domain.yaml +---- + +. Get the status of your newly created CR: ++ +[source,terminal] +---- +$ oc get customdomains +---- ++ +.Example output +[source,terminal] +---- +NAME ENDPOINT DOMAIN STATUS + xxrywp..cluster-01.opln.s1.openshiftapps.com *.apps..io Ready +---- + +. Using the endpoint value, add a new wildcard CNAME recordset to your managed DNS provider, such as Route53, Azure DNS, or Google DNS. ++ +.Example ++ +[source,terminal] +---- +*.apps..io -> xxrywp..cluster-01.opln.s1.openshiftapps.com +---- + +. Create a new application and expose it: ++ +.Example +[source,terminal] +---- +$ oc new-app --docker-image=docker.io/openshift/hello-openshift -n my-project +---- ++ +[source,terminal] +---- +$ oc create route edge --service=hello-openshift hello-openshift-tls --hostname hello-openshift-tls-my-project.apps.acme.io -n my-project +---- ++ +[source,terminal] +---- +$ oc get route -n my-project +---- ++ +[source,terminal] +---- +$ curl https://hello-openshift-tls-my-project.apps..io +Hello OpenShift! +---- diff --git a/modules/osd-aws-privatelink-about.adoc b/modules/osd-aws-privatelink-about.adoc new file mode 100644 index 0000000000..ca745ffc35 --- /dev/null +++ b/modules/osd-aws-privatelink-about.adoc @@ -0,0 +1,6 @@ +[id="osd-aws-privatelink-about.adoc_{context}"] += Understanding AWS PrivateLink + +A {product-title} cluster can be created without any requirements on public subnets, internet gateways, or network address translation (NAT) gateways. In this configuration, Red Hat uses AWS PrivateLink to manage and monitor a cluster in order to avoid all public ingress network traffic. + +For more information, see link:https://aws.amazon.com/privatelink/[AWS PrivateLink] on the AWS website. diff --git a/modules/osd-aws-privatelink-architecture.adoc b/modules/osd-aws-privatelink-architecture.adoc new file mode 100644 index 0000000000..717e8a172e --- /dev/null +++ b/modules/osd-aws-privatelink-architecture.adoc @@ -0,0 +1,43 @@ +[id="osd-aws-privatelink-architecture.adoc_{context}"] += AWS PrivateLink architecture + +The Red Hat managed infrastructure that creates AWS PrivateLink clusters is hosted on private subnets. The connection between Red Hat and the customer-provided infrastructure is created through AWS PrivateLink VPC endpoints. + +[NOTE] +==== +AWS PrivateLink is supported on existing VPCs only. +==== + +The following diagram shows a multiple availability zone (Multi-AZ) PrivateLink cluster deployed on private subnets. + +.Multi-AZ AWS PrivateLink cluster deployed on private subnets + +image::156_OpenShift_ROSA_Arch_0621_privatelink.svg[Multi-AZ AWS PrivateLink cluster deployed on private subnets] + += AWS reference architectures + +AWS provides multiple reference architectures that can be useful to customers when planning how to set up a configuration that uses AWS PrivateLink. Here are three examples: + +* VPC with a private subnet and AWS Site-to-Site VPN access. ++ +This configuration enables you to extend your network into the cloud without exposing your network to the internet. ++ +To enable communication with your network over an Internet Protocol Security (IPsec) VPN tunnel, this configuration contains a virtual private cloud (VPC) with a single private subnet and a virtual private gateway. Communication over the internet does not use an internet gateway. ++ +For more information, see link:https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Scenario4.html[VPC with a private subnet only and AWS Site-to-Site VPN access] in the AWS documentation. + +* VPC with public and private subnets (NAT) ++ +This configuration enables you to isolate your network so that the public subnet is reachable from the internet but the private subnet is not. ++ +Only the public subnet can send outbound traffic directly to the internet. The private subnet can access the internet by using a network address translation (NAT) gateway that resides in the public subnet. This allows database servers to connect to the internet for software updates using the NAT gateway, but does not allow connections to be made directly from the internet to the database servers. ++ +For more information, see link:https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Scenario2.html[VPC with public and private subnets (NAT)] in the AWS documentation. + +* VPC with public and private subnets and AWS Site-to-Site VPN access ++ +This configuration enables you to extend your network into the cloud and to directly access the internet from your VPC. ++ +You can run a multi-tiered application with a scalable web front end in a public subnet, and house your data in a private subnet that is connected to your network by an IPsec AWS Site-to-Site VPN connection. ++ +For more information, see https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Scenario3.html[VPC with public and private subnets and AWS Site-to-Site VPN access] in the AWS documentation. diff --git a/modules/osd-aws-privatelink-config-dns-forwarding.adoc b/modules/osd-aws-privatelink-config-dns-forwarding.adoc new file mode 100644 index 0000000000..ce8bd117d2 --- /dev/null +++ b/modules/osd-aws-privatelink-config-dns-forwarding.adoc @@ -0,0 +1,30 @@ +// Module included in the following assemblies: +// +// * rosa_getting_started/rosa-aws-privatelink-creating-cluster.adoc + +[id="osd-aws-privatelink-config-dns-forwarding_{context}"] += Configuring AWS PrivateLink DNS forwarding + +With AWS PrivateLink clusters, a public hosted zone and a private hosted zone are created in Route 53. With the private hosted zone, records within the zone are resolvable only from within the VPC to which it is assigned. + +The _Let's Encrypt DNS-01_ validation requires a public zone so that valid, publicly trusted certificates can be issued for the domain. The validation records are deleted after _Let's Encrypt_ validation is complete; however, the zone is still required for issuing and renewing these certificates, which are typically required every 60 days. While these zones usually appear empty, it is serving a critical role in the validation process. + +For more information about private hosted zones, see link:https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/hosted-zones-private.html[AWS private hosted zones documentation]. For more information about private hosted zones, see link:https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/AboutHZWorkingWith.html[AWS public hosted zones documentation]. + +.Prerequisites + +* Your corporate network or other VPC has connectivity +* UDP port 53 and TCP port 53 ARE enabled across your networks to allow for DNS queries +* You have created an AWS PrivateLink cluster using {product-title} + +.Procedure + +. To allow for records such as `api.`` and ``*.apps.`` to resolve outside of the VPC, link:https://aws.amazon.com/premiumsupport/knowledge-center/route53-resolve-with-inbound-endpoint/[configure a Route 53 Resolver Inbound Endpoint]. + +. When you configure the inbound endpoint, select the VPC and private subnets that were used when you created the cluster. + +. After the endpoints are operational and associated, configure your corporate network to forward DNS queries to those IP addresses for the top-level cluster domain, such as `drow-pl-01.htno.p1.openshiftapps.com`. + +. If you are forwarding DNS queries from one VPC to another VPC, link:https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resolver-rules-managing.html[configure forwarding rules]. + +. If you are configuring your remote network DNS server, see your specific DNS server documentation to configure selective DNS forwarding for the installed cluster domain. diff --git a/modules/osd-aws-privatelink-firewall-prerequisites.adoc b/modules/osd-aws-privatelink-firewall-prerequisites.adoc new file mode 100644 index 0000000000..ac80e35936 --- /dev/null +++ b/modules/osd-aws-privatelink-firewall-prerequisites.adoc @@ -0,0 +1,291 @@ +// Module included in the following assemblies: +// +// * rosa_getting_started/rosa-aws-prereqs.adoc + +[id="osd-aws-privatelink-firewall-prerequisites"] += AWS PrivateLink firewall prerequisites + +If you are using a firewall and want to create your cluster using AWS PrivateLink, you must configure your firewall to grant access to the sites that {product-title} requires. + +.Procedure + +. Allowlist the following URLs that are used to install and download packages and tools: ++ +[cols="3,4,1",options="header"] +|=== +|URL | Function | Port +|`registry.redhat.io` +|Required. Provides core components such as dev tools and operator based add-ons, and Red Hat provided container images including middleware and Universal Base Image. +|443, 80 + +|`quay.io` +|Required. Used by the cluster to download the platform container images. +|443, 80 + +|`.quay.io` +|Required. Used by the cluster to download the platform container images. +|443, 80 + +|`sso.redhat.com` +|Required. The `https://cloud.redhat.com/openshift` site uses authentication from `sso.redhat.com` to download the pull secret and use Red Hat SaaS solutions to facilitate monitoring of your subscriptions, cluster inventory, chargeback reporting, and so on. +|443, 80 + +|`pull.q1w2.quay.rhcloud.com` +|Recommended. Provides a fallback registry used by the cluster when quay.io is not available. +|443, 80 + +|`.q1w2.quay.rhcloud.com` +|Recommended. Enables a CNAME to resolve to a regionalised endpoint. +|443, 80 + +|`openshift.org` +|Required. Provides Red Hat Enterprise Linux CoreOS (RHCOS) images. +|443, 80 + +|`console.redhat.com` +|Required. Allows interactions between the cluster and OpenShift Console Manager (OCM) to enable functionality, such as scheduling upgrades. +|443, 80 + +|`quay-registry.s3.amazonaws.com` +|Required. Used to access Quay image content in AWS. +|443, 80 +|=== ++ +When you add a site such as `quay.io` to your allowlist, do not add a wildcard entry such as `*.quay.io` to your denylist. In most cases, image registries use a content delivery network (CDN) to serve images. If a firewall blocks access, then image downloads are denied when the initial download request is redirected to a host name such as `cdn01.quay.io`. ++ +CDN host names, such as `cdn01.quay.io`, are covered when you add a wildcard entry, such as `.quay.io`, in your allowlist. ++ +Allowlist any site that provides resources for a language or framework that your builds require.
 + +. Managed clusters require telemetry to be enabled to allow Red Hat to react more quickly to problems, to better support our customers, and to better understand how product upgrades impact clusters. +See link:https://docs.openshift.com/container-platform/4.6/support/remote_health_monitoring/about-remote-health-monitoring.html[About remote health monitoring] for more information about how remote health monitoring data is used by Red Hat. ++ +[cols="3,4,1",options="header"] +|=== +|URL | Function | Port + +|`https://cloud.redhat.com` +|Required. Used by the cluster for the `insights operator` that integrates with the SaaS Red Hat Insights. +|443, 80 + +|`cert-api.access.redhat.com` +|Required. Used by telemetry. +|443, 80 + +|`api.access.redhat.com` +|Required. Used by telemetry. +|443, 80 + +|`infogw.api.openshift.com` +|Required. Used by telemetry. +|443, 80 +|=== + +. For Amazon Web Services (AWS), you must grant access to the URLs that provide the AWS API and DNS services: +* You can grant access by allowing the `.amazonaws.com` wildcard: ++ +[cols="3,4,1",options="header"] +|=== +|URL | Function | Requirement + +|`.amazonaws.com` +|Required. Used to access AWS services and resources. +|443, 80 + +|`oso-rhc4tp-docker-registry.s3-us-west-2.amazonaws.com` +|Required. Used to access AWS services and resources when using strict security requirements. Review the AWS Service Endpoints in the AWS documentation to determine the exact endpoints to allow for the regions that you use. +|443, 80 +|=== ++ +* Alternatively, you can grant access by allowing the following regional AWS service endpoints: ++ +[cols="3,4,1",options="header"] +|=== +|URL | Function | Port +|`ec2..amazonaws.com` +|Required. Used for regional access to Amazon EC2 services. EC2 instances are required to deploy the control plane and data plane functions of ROSA. Replace `` with an AWS region code, for example `us-east-1`. +|443, 80 + +|`elasticloadbalancing..amazonaws.com` +|Required. Used to access Amazon Elastic Load Balancers (ELBs) for API and application load balancing. +|443, 80 + +|`-..amazonaws.com` +|Required. Used for access to the registry. +|443, 80 +|=== ++ +The cluster ID and shard information is generated at installation time. Use `rosa describe cluster --cluster=` to get the ``, ``, and `` values. ++ +You can grant access to `..amazonaws.com` when you create a cluster and later refine the firewall configuration by specifying the cluster ID and shard. ++ +Review the link:https://docs.aws.amazon.com/general/latest/gr/rande.html[AWS Service Endpoints] in the AWS documentation to determine the exact endpoints to allow for the regions that you use. + +. Allowlist the following URLs: ++ +[cols="3,4,1",options="header"] +|=== +|URL | Function | Port + +|`mirror.openshift.com` +|Used to access mirrored installation content and images. This site is also a source of release image signatures, although the Cluster Version Operator needs only a single functioning source. Required if you do not allow `storage.googleapis.com/openshift-release`. +|443, 80 + +|`storage.googleapis.com/openshift-release` +|Recommended. Alternative site to mirror.openshift.com/. Used to download platform release signatures that are used by the cluster to know what images to pull from quay.io. +|443, 80 + +|`.apps..` +|Required. Used to access the default cluster routes unless you set an ingress wildcard during installation. +|443, 80 + +|`quay-registry.s3.amazonaws.com` +|Required. Used to access Quay image content in AWS. +|443, 80 + +|`api.openshift.com` +|Required. Used to check if updates are available for the cluster. +|443, 80 + +|`art-rhcos-ci.s3.amazonaws.com` +|Required. Specifies the {op-system-first} images to download. +|443, 80 + +|`cloud.redhat.com/openshift` +|Required. Used for cluster tokens. +|443, 80 + +|`registry.access.redhat.com` +|Required. Used to access the `odo` CLI tool that helps developers build on OpenShift and Kubernetes. +|443, 80 + +|`quayio-production-s3.s3.amazonaws.com` +|Required. Used to install and manage clusters in an AWS environment. +|443, 80 + +|`cm-quay-production-s3.s3.amazonaws.com` +|Required. Used to install and manage clusters in an AWS environment. +|443, 80 + +|`ec2.amazonaws.com` +|Required. Used to install and manage clusters in an AWS environment. +|443, 80 + +|`events.amazonaws.com` +|Required. Used to install and manage clusters in an AWS environment. +|443, 80 + +|`iam.amazonaws.com` +|Required. Used to install and manage clusters in an AWS environment. +|443, 80 + +|`route53.amazonaws.com` +|Required. Used to install and manage clusters in an AWS environment. +|443, 80 + +|`sts.amazonaws.com` +|Required. Used to install and manage clusters in an AWS environment. +|443, 80 + +|`ec2..amazonaws.com` +|Required. Region dependent. Must be added per cluster and per region. +|443, 80 + +|`CLUSTER-NAME-k5bxz-image-registry--lsiflffxtmfyikx.s3.dualstack.us-east-1.amazonaws.com` +|Required. Region dependent. Must be added per cluster and per region. +|443, 80 + +|`elasticloadbalancing..amazonaws.com` +|Required. Region dependent. Must be added per cluster and per region. +|443, 80 +|=== ++ +Region is created during installation. To find the region, run: ++ +[source,terminal] +---- + $ rosa describe cluster --cluster= +---- ++ +To retrieve the endpoint, run: ++ +[source,terminal] +---- +$ oc -n openshift-image-registry get pod -l docker-registry=default -o json | jq '.items[].spec.containers[].env[] | select(.name=="REGISTRY_STORAGE_S3_BUCKET")' +---- + +. Operators require route access to perform health checks. Specifically, the authentication and web console Operators connect to two routes to verify that the routes work. If you are the cluster administrator and do not want to allow *.apps.., then you must allow these routes: ++ +[cols="3,4,1",options="header"] +|=== +|URL | Function | Port + +|`oauth-openshift.apps...` +|Required. +|443 + +|`console-openshift-console.apps...`, or the host name that is specified in the `spec.route.hostname` field of the `consoles.operator/cluster` object if the field is not empty +|Required. +|443 + +|`canary-openshift-ingress-canary.apps...s1.devshift.org` +|Required. +|443 +|=== + +. If you use a default Red Hat Network Time Protocol (NTP) server, allowlist the following URLs: ++ +* 1.rhel.pool.ntp.org +* 2.rhel.pool.ntp.org +* 3.rhel.pool.ntp.org ++ +[NOTE] +==== +If you do not use a default Red Hat NTP server, verify the NTP server for your platform and allowlist it in your firewall. +==== + +. Allowlist the following OpenShift Dedicated URLs: ++ +[cols="4,3,1",options="header"] +|=== +|URL | Function | Port + +|`api.pagerduty.com` and `events.pagerduty.com` +|Required. This alerting service is used by the in-cluster alertmanager to send alerts notifying Red Hat SRE of an event to take action on. +|443 + +|`api.deadmanssnitch.com` and `nosnch.in` +|Required. Alerting service used by OpenShift Dedicated to send periodic pings that indicate whether the cluster is available and running. +|443 + +|`sftp.access.redhat.com` +|Recommended. The FTP server used by `must-gather-operator` to upload diagnostic logs to help troubleshoot issues with the cluster. +|443 + +|`.osdsecuritylogs.splunkcloud.com` +`inputs1.osdsecuritylogs.splunkcloud.com` +`inputs2.osdsecuritylogs.splunkcloud.com` +`inputs4.osdsecuritylogs.splunkcloud.com` +`inputs5.osdsecuritylogs.splunkcloud.com` +`inputs6.osdsecuritylogs.splunkcloud.com` +`inputs7.osdsecuritylogs.splunkcloud.com` +`inputs8.osdsecuritylogs.splunkcloud.com` +`inputs9.osdsecuritylogs.splunkcloud.com` +`inputs10.osdsecuritylogs.splunkcloud.com` +`inputs11.osdsecuritylogs.splunkcloud.com` +`inputs12.osdsecuritylogs.splunkcloud.com` +`inputs13.osdsecuritylogs.splunkcloud.com` +`inputs14.osdsecuritylogs.splunkcloud.com` +`inputs15.osdsecuritylogs.splunkcloud.com` + +`http-inputs-osdsecuritylogs.splunkcloud.com` +|Required. Used by the `splunk-forwarder-operator` as a logging forwarding endpoint to be used by Red Hat SRE for log-based alerting. +|443 + +|`observatorium.api.openshift.com` +|Required. Used for Managed OpenShift-specific telemetry. +|443 +|=== ++ +. Allowlist any site that provides resources for a language or framework that your builds require. +. Allowlist any outbound URLs that depend on the languages and frameworks used in OpenShift. See link:https://access.redhat.com/solutions/2998411[OpenShift Outbound URLs to Allow] for a list of recommended URLs to be allowed on the firewall or proxy. diff --git a/modules/osd-aws-privatelink-required-resources.adoc b/modules/osd-aws-privatelink-required-resources.adoc new file mode 100644 index 0000000000..f052f4042a --- /dev/null +++ b/modules/osd-aws-privatelink-required-resources.adoc @@ -0,0 +1,40 @@ +[id="osd-aws-privatelink-required-resources.adoc_{context}"] += Requirements for using AWS PrivateLink clusters + +For AWS PrivateLink clusters, internet gateways, NAT gateways and public subnets are not required, but the private subnets must have internet connectivity provided to install required components. At least one single private subnet is required for Single-AZ clusters and at least 3 private subnets are required for Multi-AZ clusters. The following table shows the AWS resources that are required for a successful installation: + +.Required AWS resources +[cols="1a,2a,3a",options="header"] +|=== +| Component | AWS Type | Description +| VPC +|* AWS::EC2::VPC +* AWS::EC2::VPCEndpoint +| You must provide a VPC for the cluster to use. +| Network access control +|* AWS::EC2::NetworkAcl +* AWS::EC2::NetworkAclEntry +| + +You must allow access to the following ports: +[cols="35%,65%",options="header"] +!=== +!Port !Reason +! 80 +! Inbound HTTP traffic +! 443 +! Inbound HTTPS traffic +! 22 +! Inbound SSH traffic +! 1024-65535 +! Inbound ephemeral traffic +! 0-65535 +! Outbound ephemeral traffic +!=== +| Private subnets +|* AWS::EC2::Subnet +* AWS::EC2::RouteTable +* AWS::EC2::SubnetRouteTableAssociation +| Your VPC must have private subnets in 1 availability zone for Single-AZ deployments or 3 availability zones for Multi-AZ deployments. +You must provide appropriate routes and route tables. +|=== diff --git a/modules/osd-intro.adoc b/modules/osd-intro.adoc new file mode 100644 index 0000000000..530d52b9b0 --- /dev/null +++ b/modules/osd-intro.adoc @@ -0,0 +1,58 @@ + +// Module included in the following assemblies: +// +// * assemblies/osd-architecture.adoc + +[id="osd-intro_{context}"] += Introduction to {product-title} + +{product-title} is professionally managed by Red Hat and hosted on {AWS} or {GCP}. Each {product-title} cluster comes with a fully managed link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.7/html/architecture/control-plane[control plane] (Control and Infrastructure nodes), application nodes, installation and management by Red Hat Site Reliability Engineers (SRE), premium Red Hat Support, and cluster services such as logging, metrics, monitoring, notifications portal, and a cluster portal. + +{product-title} provides enterprise-ready enhancements to Kubernetes, including the following enhancements: + +* {product-title} clusters are deployed on AWS or GCP environments and can be used as part of a hybrid approach for application management. + +* Integrated Red Hat technology. Major components in {product-title} come from Red Hat Enterprise Linux and related Red Hat technologies. {product-title} benefits from the intense testing and certification initiatives for Red Hat’s enterprise quality software. + +* Open source development model. Development is completed in the open, and the source code is available from public software repositories. This open collaboration fosters rapid innovation and development. + +To learn about options for assets you can create when you build and deploy containerized Kubernetes applications in {OCP}, see link:https://docs.openshift.com/container-platform/4.7/architecture/understanding-development.html[Understanding {OCP} development] in the {OCP} documentation. + + +[id="rhcos_{context}"] +== Custom operating system +{product-title} uses Red Hat Enterprise Linux CoreOS (RHCOS), a container-oriented operating system that combines some of the best features and functions of the CoreOS and Red Hat Atomic Host operating systems. RHCOS is specifically designed for running containerized applications from {product-title} and works with new tools to provide fast installation, Operator-based management, and simplified upgrades. + +RHCOS includes: + +- Ignition, which {product-title} uses as a firstboot system configuration for initially bringing up and configuring machines. +- CRI-O, a Kubernetes native container runtime implementation that integrates closely with the operating system to deliver an efficient and optimized Kubernetes experience. CRI-O provides facilities for running, stopping, and restarting containers. +- Kubelet, the primary node agent for Kubernetes that is responsible for launching and monitoring containers. + +[id="osd-key-features_{context}"] +== Other key features +Operators are both the fundamental unit of the {product-title} code base and a convenient way to deploy applications and software components for your applications to use. In {product-title}, Operators serve as the platform foundation and remove the need for manual upgrades of operating systems and control plane applications. {product-title} Operators such as the Cluster Version Operator and Machine Config Operator allow simplified, cluster-wide management of those critical components. + +Operator Lifecycle Manager (OLM) and the OperatorHub provide facilities for storing and distributing Operators to people developing and deploying applications. + +The Red Hat Quay Container Registry is a Quay.io container registry that serves most of the container images and Operators to {product-title} clusters. Quay.io is a public registry version of Red Hat Quay that stores millions of images and tags. + +Other enhancements to Kubernetes in {product-title} include improvements in software defined networking (SDN), authentication, log aggregation, monitoring, and routing. {product-title} also offers a comprehensive web console and the custom OpenShift CLI (`oc`) interface. + +[id="telemetry_{context}"] +== Internet and Telemetry access for {product-title} + +In {product-title}, you require access to the Internet to install your cluster. The Telemetry service, which runs by default to provide metrics about cluster health and the success of updates, also requires Internet access. If your cluster is connected to the Internet, Telemetry runs automatically, and your cluster is registered to the {cloud-redhat-com}. + +Once you confirm that your Red Hat OpenShift Cluster Manager inventory is correct, either maintained automatically by Telemetry or manually using OCM, use link:https://access.redhat.com/documentation/en-us/subscription_central/2020-04/html/getting_started_with_subscription_watch/con-how-to-select-datacollection-tool_assembly-requirements-and-your-responsibilities-ctxt#red_hat_openshift[subscription watch] to track your {product-title} subscriptions at the account or multi-cluster level. + +You must have Internet access to: + +* Access the {cloud-redhat-com} page to download the installation program and perform subscription management. If the cluster has Internet access and you do not disable Telemetry, that service automatically entitles your cluster. + +* Obtain the packages that are required to perform cluster updates. + +[IMPORTANT] +==== +If your cluster cannot have direct Internet access, you can perform a restricted network installation on some types of infrastructure that you provision. During that process, you download the content that is required and use it to populate a mirror registry with the packages that you need to install a cluster and generate the installation program. With some installation types, the environment that you install your cluster in will not require Internet access. Before you update the cluster, you update the content of the mirror registry. +==== diff --git a/modules/osd-monitoring-assigning-tolerations-to-monitoring-components.adoc b/modules/osd-monitoring-assigning-tolerations-to-monitoring-components.adoc new file mode 100644 index 0000000000..9d69d216e9 --- /dev/null +++ b/modules/osd-monitoring-assigning-tolerations-to-monitoring-components.adoc @@ -0,0 +1,68 @@ +// Module included in the following assemblies: +// +// * monitoring/osd-configuring-the-monitoring-stack.adoc + +[id="assigning-tolerations-to-monitoring-components_{context}"] += Assigning tolerations to components that monitor user-defined projects + +You can assign tolerations to the components that monitor user-defined projects, to enable moving them to tainted worker nodes. Scheduling is not permitted on master or infrastructure nodes. + +.Prerequisites + +* You have access to the cluster as a user with the `dedicated-admin` role. +* You have created the `user-workload-monitoring-config` `ConfigMap` object in the `openshift-user-workload-monitoring` namespace. +* The OpenShift CLI (`oc`) is installed. + +.Procedure + +. Edit the `ConfigMap` object: +.. Edit the `user-workload-monitoring-config` `ConfigMap` object in the `openshift-user-workload-monitoring` project: ++ +[source,terminal] +---- +$ oc -n openshift-user-workload-monitoring edit configmap user-workload-monitoring-config +---- + +.. Specify `tolerations` for the component: ++ +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-workload-monitoring-config + namespace: openshift-user-workload-monitoring +data: + config.yaml: | + : + tolerations: + +---- ++ +Substitute `` and `` accordingly. ++ +For example, `oc adm taint nodes node1 key1=value1:NoSchedule` adds a taint to `node1` with the key `key1` and the value `value1`. This prevents monitoring components from deploying pods on `node1` unless a toleration is configured for that taint. The following example configures the `thanosRuler` component to tolerate the example taint: ++ +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-workload-monitoring-config + namespace: openshift-user-workload-monitoring +data: + config.yaml: | + thanosRuler: + tolerations: + - key: "key1" + operator: "Equal" + value: "value1" + effect: "NoSchedule" +---- + +. Save the file to apply the changes. The new component placement configuration is applied automatically. ++ +[WARNING] +==== +When changes are saved to a monitoring config map, the pods and other resources in the related project might be redeployed. The running monitoring processes in that project might also be restarted. +==== diff --git a/modules/osd-monitoring-components-for-monitoring-user-defined-projects.adoc b/modules/osd-monitoring-components-for-monitoring-user-defined-projects.adoc new file mode 100644 index 0000000000..bba180d3bf --- /dev/null +++ b/modules/osd-monitoring-components-for-monitoring-user-defined-projects.adoc @@ -0,0 +1,29 @@ +// Module included in the following assemblies: +// +// * monitoring/osd-understanding-the-monitoring-stack.adoc + +[id="components-for-monitoring-user-defined-projects_{context}"] += Components for monitoring user-defined projects + +{product-title} includes an optional enhancement to the monitoring stack that enables you to monitor services and pods in user-defined projects. This feature includes the following components: + +.Components for monitoring user-defined projects +[cols="1,2",options="header"] +|=== + +|Component|Description + +|Prometheus Operator +|The Prometheus Operator in the `openshift-user-workload-monitoring` project creates, configures, and manages Prometheus and Thanos Ruler instances in the same project. + +|Prometheus +|Prometheus is the monitoring system through which monitoring is provided for user-defined projects. Prometheus sends alerts to Alertmanager for processing. However, alert routing is not currently supported. + +|Thanos Ruler +|The Thanos Ruler is a rule evaluation engine for Prometheus that is deployed as a separate process. In {product-title} {product-version}, Thanos Ruler provides rule and alerting evaluation for the monitoring of user-defined projects. + +|=== + +// TODO: Just checking that the {product-version} is correct and replaces properly for dedicated + +All of these components are monitored by the stack and are automatically updated when {product-title} is updated. diff --git a/modules/osd-monitoring-configurable-monitoring-components.adoc b/modules/osd-monitoring-configurable-monitoring-components.adoc new file mode 100644 index 0000000000..fb466dfd83 --- /dev/null +++ b/modules/osd-monitoring-configurable-monitoring-components.adoc @@ -0,0 +1,17 @@ +// Module included in the following assemblies: +// +// * monitoring/osd-configuring-the-monitoring-stack.adoc + +[id="configurable-monitoring-components_{context}"] += Configurable monitoring components + +This table shows the monitoring components you can configure and the keys used to specify the components in the `user-workload-monitoring-config` `ConfigMap` objects: + +.Configurable monitoring components +[options="header"] +|=== +|Component |user-workload-monitoring-config config map key +|Prometheus Operator |`prometheusOperator` +|Prometheus |`prometheus` +|Thanos Ruler |`thanosRuler` +|=== diff --git a/modules/osd-monitoring-configuring-a-local-persistent-volume-claim.adoc b/modules/osd-monitoring-configuring-a-local-persistent-volume-claim.adoc new file mode 100644 index 0000000000..301f539c56 --- /dev/null +++ b/modules/osd-monitoring-configuring-a-local-persistent-volume-claim.adoc @@ -0,0 +1,95 @@ +// Module included in the following assemblies: +// +// * monitoring/osd-configuring-the-monitoring-stack.adoc + +[id="configuring-a-local-persistent-volume-claim_{context}"] += Configuring a local persistent volume claim + +For monitoring components to use a persistent volume (PV), you must configure a persistent volume claim (PVC). + +.Prerequisites + +* You have access to the cluster as a user with the `dedicated-admin` role. +* You have created the `user-workload-monitoring-config` `ConfigMap` object. +* You have installed the OpenShift CLI (`oc`). + +.Procedure + +. To configure a PVC for a component that monitors user-defined projects, edit the `ConfigMap` object: +.. Edit the `user-workload-monitoring-config` `ConfigMap` object in the `openshift-user-workload-monitoring` project: ++ +[source,terminal] +---- +$ oc -n openshift-user-workload-monitoring edit configmap user-workload-monitoring-config +---- + +.. Add your PVC configuration for the component under `data.config.yaml`: ++ +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-workload-monitoring-config + namespace: openshift-user-workload-monitoring +data: + config.yaml: | + : + volumeClaimTemplate: + spec: + storageClassName: + resources: + requests: + storage: +---- ++ +See the link:https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims[Kubernetes documentation on PersistentVolumeClaims] for information on how to specify `volumeClaimTemplate`. ++ +The following example configures a PVC that claims local persistent storage for the Prometheus instance that monitors user-defined projects: ++ +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-workload-monitoring-config + namespace: openshift-user-workload-monitoring +data: + config.yaml: | + prometheus: + volumeClaimTemplate: + spec: + storageClassName: local-storage + resources: + requests: + storage: 40Gi +---- ++ +In the above example, the storage class created by the Local Storage Operator is called `local-storage`. ++ +The following example configures a PVC that claims local persistent storage for Thanos Ruler: ++ +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-workload-monitoring-config + namespace: openshift-user-workload-monitoring +data: + config.yaml: | + thanosRuler: + volumeClaimTemplate: + spec: + storageClassName: local-storage + resources: + requests: + storage: 40Gi +---- + +. Save the file to apply the changes. The pods affected by the new configuration are restarted automatically and the new storage configuration is applied. ++ +[WARNING] +==== +When changes are saved to a monitoring config map, the pods and other resources in the related project might be redeployed. The running monitoring processes in that project might also be restarted. +==== diff --git a/modules/osd-monitoring-configuring-the-monitoring-stack.adoc b/modules/osd-monitoring-configuring-the-monitoring-stack.adoc new file mode 100644 index 0000000000..06c87f9883 --- /dev/null +++ b/modules/osd-monitoring-configuring-the-monitoring-stack.adoc @@ -0,0 +1,71 @@ +// Module included in the following assemblies: +// +// * monitoring/osd-configuring-the-monitoring-stack.adoc + +[id="configuring-the-monitoring-stack_{context}"] += Configuring the monitoring stack + +In {product-title}, you can configure the stack that monitors workloads for user-defined projects by using the `user-workload-monitoring-config` `ConfigMap` object. Config maps configure the Cluster Monitoring Operator (CMO), which in turn configures the components of the stack. + +.Prerequisites + +* You have access to the cluster as a user with the `dedicated-admin` role. +* You have created the `user-workload-monitoring-config` `ConfigMap` object. +* You have installed the OpenShift CLI (`oc`). + +.Procedure + +. Edit the `ConfigMap` object. +.. Edit the `user-workload-monitoring-config` `ConfigMap` object in the `openshift-user-workload-monitoring` project: ++ +[source,terminal] +---- +$ oc -n openshift-user-workload-monitoring edit configmap user-workload-monitoring-config +---- + +.. Add your configuration under `data.config.yaml` as a key-value pair `:{nbsp}`: ++ +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-workload-monitoring-config + namespace: openshift-user-workload-monitoring +data: + config.yaml: | + : + +---- ++ +Substitute `` and `` accordingly. ++ +The following example `ConfigMap` object configures a data retention period and minimum container resource requests for Prometheus. This relates to the Prometheus instance that monitors user-defined projects only: ++ +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-workload-monitoring-config + namespace: openshift-user-workload-monitoring +data: + config.yaml: | + prometheus: <1> + retention: 24h <2> + resources: + requests: + cpu: 200m <3> + memory: 2Gi <4> +---- +<1> Defines the Prometheus component and the subsequent lines define its configuration. +<2> Configures a 24 hour data retention period for the Prometheus instance that monitors user-defined projects. +<3> Defines a minimum resource request of 200 millicores for the Prometheus container. +<4> Defines a minimum pod resource request of 2 GiB of memory for the Prometheus container. + +. Save the file to apply the changes to the `ConfigMap` object. The pods affected by the new configuration are restarted automatically. ++ +[WARNING] +==== +When changes are saved to a monitoring config map, the pods and other resources in the related project might be redeployed. The running monitoring processes in that project might also be restarted. +==== diff --git a/modules/osd-monitoring-deploying-a-sample-service.adoc b/modules/osd-monitoring-deploying-a-sample-service.adoc new file mode 100644 index 0000000000..cf6f5c33bb --- /dev/null +++ b/modules/osd-monitoring-deploying-a-sample-service.adoc @@ -0,0 +1,86 @@ +// Module included in the following assemblies: +// +// * monitoring/osd-managing-metrics.adoc + +[id="deploying-a-sample-service_{context}"] += Deploying a sample service + +To test monitoring of a service in a user-defined project, you can deploy a sample service. + +.Procedure + +. Create a YAML file for the service configuration. In this example, it is called `prometheus-example-app.yaml`. + +. Add the following deployment and service configuration details to the file: ++ +[source,yaml] +---- +apiVersion: v1 +kind: Namespace +metadata: + name: ns1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: prometheus-example-app + name: prometheus-example-app + namespace: ns1 +spec: + replicas: 1 + selector: + matchLabels: + app: prometheus-example-app + template: + metadata: + labels: + app: prometheus-example-app + spec: + containers: + - image: quay.io/brancz/prometheus-example-app:v0.2.0 + imagePullPolicy: IfNotPresent + name: prometheus-example-app +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: prometheus-example-app + name: prometheus-example-app + namespace: ns1 +spec: + ports: + - port: 8080 + protocol: TCP + targetPort: 8080 + name: web + selector: + app: prometheus-example-app + type: ClusterIP +---- ++ +This configuration deploys a service named `prometheus-example-app` in the user-defined `ns1` project. This service exposes the custom `version` metric. + +. Apply the configuration to the cluster: ++ +[source,terminal] +---- +$ oc apply -f prometheus-example-app.yaml +---- ++ +It takes some time to deploy the service. + +. You can check that the pod is running: ++ +[source,terminal] +---- +$ oc -n ns1 get pod +---- ++ +.Example output +[source,terminal] +---- +NAME READY STATUS RESTARTS AGE +prometheus-example-app-7857545cb7-sbgwq 1/1 Running 0 81m +---- diff --git a/modules/osd-monitoring-exploring-the-visualized-metrics.adoc b/modules/osd-monitoring-exploring-the-visualized-metrics.adoc new file mode 100644 index 0000000000..d30d799372 --- /dev/null +++ b/modules/osd-monitoring-exploring-the-visualized-metrics.adoc @@ -0,0 +1,47 @@ +// Module included in the following assemblies: +// +// * monitoring/osd-managing-metrics.adoc + +[id="exploring-the-visualized-metrics_{context}"] += Exploring the visualized metrics + +After running the queries, the metrics are displayed on an interactive plot. The X-axis in the plot represents time and the Y-axis represents metrics values. Each metric is shown as a colored line on the graph. You can manipulate the plot interactively and explore the metrics. + +.Procedure + +In the *Administrator* perspective: + +. Initially, all metrics from all enabled queries are shown on the plot. You can select which metrics are shown. ++ +[NOTE] +==== +By default, the query table shows an expanded view that lists every metric and its current value. You can select *˅* to minimize the expanded view for a query. +==== + +** To hide all metrics from a query, click {kebab} for the query and click *Hide all series*. + +** To hide a specific metric, go to the query table and click the colored square near the metric name. + +. To zoom into the plot and change the time range, do one of the following: + +** Visually select the time range by clicking and dragging on the plot horizontally. + +** Use the menu in the left upper corner to select the time range. + +. To reset the time range, select *Reset Zoom*. + +. To display outputs for all queries at a specific point in time, hover over the plot at that point. The query outputs appear in a pop-up box. + +. To hide the plot, select *Hide Graph*. + +In the *Developer* perspective: + +. To zoom into the plot and change the time range, do one of the following: + +** Visually select the time range by clicking and dragging on the plot horizontally. + +** Use the menu in the left upper corner to select the time range. + +. To reset the time range, select *Reset Zoom*. + +. To display outputs for all queries at a specific point in time, hover over the plot at that point. The query outputs appear in a pop-up box. diff --git a/modules/osd-monitoring-limiting-scrape-samples-in-user-defined-projects.adoc b/modules/osd-monitoring-limiting-scrape-samples-in-user-defined-projects.adoc new file mode 100644 index 0000000000..7e19adae0a --- /dev/null +++ b/modules/osd-monitoring-limiting-scrape-samples-in-user-defined-projects.adoc @@ -0,0 +1,20 @@ +// Module included in the following assemblies: +// +// * monitoring/osd-configuring-the-monitoring-stack.adoc + +[id="controlling-the-impact-of-unbound-attributes-in-user-defined-projects_{context}"] += Controlling the impact of unbound metrics attributes in user-defined projects + +Developers can create labels to define attributes for metrics in the form of key-value pairs. The number of potential key-value pairs corresponds to the number of possible values for an attribute. An attribute that has an unlimited number of potential values is called an unbound attribute. For example, a `customer_id` attribute is unbound because it has an infinite number of possible values. + +Every assigned key-value pair has a unique time series. The use of many unbound attributes in labels can result in an exponential increase in the number of time series created. This can impact Prometheus performance and can consume a lot of disk space. + +A `dedicated-admin` can use the following measure to control the impact of unbound metrics attributes in user-defined projects: + +* *Limit the number of samples that can be accepted* per target scrape in user-defined projects +// * *Create alerts* that fire when a scrape sample threshold is reached or when the target cannot be scraped + +[NOTE] +==== +Limiting scrape samples can help prevent the issues caused by adding many unbound attributes to labels. Developers can also prevent the underlying cause by limiting the number of unbound attributes that they define for metrics. Using attributes that are bound to a limited set of possible values reduces the number of potential key-value pair combinations. +==== diff --git a/modules/osd-monitoring-maintenance-and-support.adoc b/modules/osd-monitoring-maintenance-and-support.adoc new file mode 100644 index 0000000000..90d4c382d4 --- /dev/null +++ b/modules/osd-monitoring-maintenance-and-support.adoc @@ -0,0 +1,15 @@ +// Module included in the following assemblies: +// +// * monitoring/osd-configuring-the-monitoring-stack.adoc + +[id="maintenance-and-support_{context}"] += Maintenance and support for monitoring + +The supported way of configuring {product-title} Monitoring is by using the options described in this document. *Do not use other configurations, as they are unsupported.* + +[IMPORTANT] +==== +Installing another Prometheus instance is not supported by the Red Hat Site Reliability Engineers (SREs). +==== + +Configuration paradigms can change across Prometheus releases, and such cases can only be handled gracefully if all configuration possibilities are controlled. If you use configurations other than those described in this section, your changes will disappear because the `cluster-monitoring-operator` reconciles any differences. The Operator resets everything to the defined state by default and by design. diff --git a/modules/osd-monitoring-modifying-retention-time-for-prometheus-metrics-data.adoc b/modules/osd-monitoring-modifying-retention-time-for-prometheus-metrics-data.adoc new file mode 100644 index 0000000000..04d4ebd223 --- /dev/null +++ b/modules/osd-monitoring-modifying-retention-time-for-prometheus-metrics-data.adoc @@ -0,0 +1,63 @@ +// Module included in the following assemblies: +// +// * monitoring/osd-configuring-the-monitoring-stack.adoc + +[id="modifying-retention-time-for-prometheus-metrics-data_{context}"] += Modifying the retention time for Prometheus metrics data + +By default, the {product-title} monitoring stack configures the retention time for Prometheus data to be 15 days. You can modify the retention time for the Prometheus instance that monitors user-defined projects, to change how soon the data is deleted. + +.Prerequisites + +* You have access to the cluster as a user with the `dedicated-admin` role. +* You have created the `user-workload-monitoring-config` `ConfigMap` object. +* You have installed the OpenShift CLI (`oc`). + +.Procedure + +. To modify the retention time for the Prometheus instance that monitors user-defined projects, edit the `ConfigMap` object: +.. Edit the `user-workload-monitoring-config` `ConfigMap` object in the `openshift-user-workload-monitoring` project: ++ +[source,terminal] +---- +$ oc -n openshift-user-workload-monitoring edit configmap user-workload-monitoring-config +---- + +.. Add your retention time configuration under `data.config.yaml`: ++ +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-workload-monitoring-config + namespace: openshift-user-workload-monitoring +data: + config.yaml: | + prometheus: + retention: +---- ++ +Substitute `` with a number directly followed by `ms` (milliseconds), `s` (seconds), `m` (minutes), `h` (hours), `d` (days), `w` (weeks), or `y` (years). ++ +The following example sets the retention time to 24 hours for the Prometheus instance that monitors user-defined projects: ++ +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-workload-monitoring-config + namespace: openshift-user-workload-monitoring +data: + config.yaml: | + prometheus: + retention: 24h +---- + +. Save the file to apply the changes. The pods affected by the new configuration are restarted automatically. ++ +[WARNING] +==== +When changes are saved to a monitoring config map, the pods and other resources in the related project might be redeployed. The running monitoring processes in that project might also be restarted. +==== diff --git a/modules/osd-monitoring-moving-monitoring-components-to-different-nodes.adoc b/modules/osd-monitoring-moving-monitoring-components-to-different-nodes.adoc new file mode 100644 index 0000000000..b393242ba4 --- /dev/null +++ b/modules/osd-monitoring-moving-monitoring-components-to-different-nodes.adoc @@ -0,0 +1,87 @@ +// Module included in the following assemblies: +// +// * monitoring/osd-configuring-the-monitoring-stack.adoc + +[id="moving-monitoring-components-to-different-nodes_{context}"] += Moving monitoring components to different nodes + +You can move any of the components that monitor workloads for user-defined projects to specific worker nodes. It is not permitted to move components to master or infrastructure nodes. + +.Prerequisites + +* You have access to the cluster as a user with the `dedicated-admin` role. +* You have created the `user-workload-monitoring-config` `ConfigMap` object. +* You have installed the OpenShift CLI (`oc`). + +.Procedure + +. To move a component that monitors user-defined projects, edit the `ConfigMap` object: +.. Edit the `user-workload-monitoring-config` `ConfigMap` object in the `openshift-user-workload-monitoring` project: ++ +[source,terminal] +---- +$ oc -n openshift-user-workload-monitoring edit configmap user-workload-monitoring-config +---- + +.. Specify the `nodeSelector` constraint for the component under `data.config.yaml`: ++ +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-workload-monitoring-config + namespace: openshift-user-workload-monitoring +data: + config.yaml: | + : + nodeSelector: + : + : + <...> +---- ++ +Substitute `` accordingly and substitute `: ` with the map of key-value pairs that specifies the destination nodes. Often, only a single key-value pair is used. ++ +The component can only run on nodes that have each of the specified key-value pairs as labels. The nodes can have additional labels as well. ++ +[IMPORTANT] +==== +Many of the monitoring components are deployed by using multiple pods across different nodes in the cluster to maintain high availability. When moving monitoring components to labeled nodes, ensure that enough matching nodes are available to maintain resilience for the component. If only one label is specified, ensure that enough nodes contain that label to distribute all of the pods for the component across separate nodes. Alternatively, you can specify multiple labels each relating to individual nodes. +==== ++ +[NOTE] +==== +If monitoring components remain in a `Pending` state after configuring the `nodeSelector` constraint, check the pod logs for errors relating to taints and tolerations. +==== ++ +For example, to move monitoring components for user-defined projects to specific worker nodes labeled `nodename: worker1`, `nodename: worker2`, and `nodename: worker2`, use: ++ +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-workload-monitoring-config + namespace: openshift-user-workload-monitoring +data: + config.yaml: | + prometheusOperator: + nodeSelector: + nodename: worker1 + prometheus: + nodeSelector: + nodename: worker1 + nodename: worker2 + thanosRuler: + nodeSelector: + nodename: worker1 + nodename: worker2 +---- + +. Save the file to apply the changes. The components affected by the new configuration are moved to the new nodes automatically. ++ +[WARNING] +==== +When changes are saved to a monitoring config map, the pods and other resources in the related project might be redeployed. The running monitoring processes in that project might also be restarted. +==== diff --git a/modules/osd-monitoring-querying-metrics-for-all-projects-as-an-administrator.adoc b/modules/osd-monitoring-querying-metrics-for-all-projects-as-an-administrator.adoc new file mode 100644 index 0000000000..ff509a98dd --- /dev/null +++ b/modules/osd-monitoring-querying-metrics-for-all-projects-as-an-administrator.adoc @@ -0,0 +1,44 @@ +// Module included in the following assemblies: +// +// * monitoring/osd-managing-metrics.adoc + +[id="querying-metrics-for-all-projects-as-an-administrator_{context}"] += Querying metrics for all projects as an administrator + +As a `dedicated-admin` or as a user with view permissions for all projects, you can access metrics for all default {product-title} and user-defined projects in the Metrics UI. + +[NOTE] +==== +Only dedicated administrators have access to the third-party UIs provided with {product-title} Monitoring. +==== + +.Prerequisites + +* You have access to the cluster as a user with the `dedicated-admin` role or with view permissions for all projects. + +.Procedure + +. From the *Administrator* perspective in the OpenShift web console, select *Monitoring* -> *Metrics*. + +. Select *Insert Metric at Cursor* to view a list of predefined queries. + +. To create a custom query, add your Prometheus Query Language (PromQL) query to the *Expression* field. + +. To add multiple queries, select *Add Query*. + +. To delete a query, select {kebab} next to the query, then choose *Delete query*. + +. To disable a query from being run, select {kebab} next to the query and choose *Disable query*. + +. Select *Run Queries* to run the queries that you have created. The metrics from the queries are visualized on the plot. If a query is invalid, the UI shows an error message. ++ +[NOTE] +==== +Queries that operate on large amounts of data might time out or overload the browser when drawing time series graphs. To avoid this, select *Hide graph* and calibrate your query using only the metrics table. Then, after finding a feasible query, enable the plot to draw the graphs. +==== + +. Optional: The page URL now contains the queries you ran. To use this set of queries again in the future, save this URL. + +.Additional resources + +* See the link:https://prometheus.io/docs/prometheus/latest/querying/basics/[Prometheus query documentation] for more information about creating PromQL queries. diff --git a/modules/osd-monitoring-querying-metrics-for-user-defined-projects-as-a-developer.adoc b/modules/osd-monitoring-querying-metrics-for-user-defined-projects-as-a-developer.adoc new file mode 100644 index 0000000000..3a3b6cbe0d --- /dev/null +++ b/modules/osd-monitoring-querying-metrics-for-user-defined-projects-as-a-developer.adoc @@ -0,0 +1,36 @@ +// Module included in the following assemblies: +// +// * monitoring/osd-managing-metrics.adoc + +[id="querying-metrics-for-user-defined-projects-as-a-developer_{context}"] += Querying metrics for user-defined projects as a developer + +You can access metrics for a user-defined project as a developer or as a user with view permissions for the project. + +In the *Developer* perspective, the Metrics UI includes some predefined CPU, memory, bandwidth, and network packet queries for the selected project. You can also run custom Prometheus Query Language (PromQL) queries for CPU, memory, bandwidth, network packet and application metrics for the project. + +[NOTE] +==== +Developers can only use the *Developer* perspective and not the *Administrator* perspective. As a developer you can only query metrics for one project at a time. Developers cannot access the third-party UIs provided with {product-title} monitoring. +==== + +.Prerequisites + +* You have access to the cluster as a developer or as a user with view permissions for the project that you are viewing metrics for. +* You have enabled monitoring for user-defined projects. +* You have deployed a service in a user-defined project. +* You have created a `ServiceMonitor` custom resource definition (CRD) for the service to define how the service is monitored. + +.Procedure + +// TODO: The previous procedure said the "OpenShift web console" and this says the "OpenShift Dedicated web console". I assume it should be the same between the two? +. From the *Developer* perspective in the {product-title} web console, select *Monitoring* -> *Metrics*. + +. Select the project that you want to view metrics for in the *Project:* list. + +. Choose a query from the *Select Query* list, or run a custom PromQL query by selecting *Show PromQL*. ++ +[NOTE] +==== +In the *Developer* perspective, you can only run one query at a time. +==== diff --git a/modules/osd-monitoring-querying-metrics.adoc b/modules/osd-monitoring-querying-metrics.adoc new file mode 100644 index 0000000000..3c032de4db --- /dev/null +++ b/modules/osd-monitoring-querying-metrics.adoc @@ -0,0 +1,12 @@ +// Module included in the following assemblies: +// +// * monitoring/osd-managing-metrics.adoc + +[id="querying-metrics_{context}"] += Querying metrics + +The OpenShift monitoring dashboard lets you run Prometheus Query Language (PromQL) queries to examine metrics visualized on a plot. This functionality provides information about the state of a cluster and any user-defined projects that you are monitoring. + +As a `dedicated-admin`, you can query one or more namespaces at a time for metrics about user-defined projects. + +As a developer, you must specify a project name when querying metrics. You must have the required privileges to view metrics for the selected project. diff --git a/modules/osd-monitoring-reviewing-monitoring-dashboards-developer.adoc b/modules/osd-monitoring-reviewing-monitoring-dashboards-developer.adoc new file mode 100644 index 0000000000..cb81dd3b3d --- /dev/null +++ b/modules/osd-monitoring-reviewing-monitoring-dashboards-developer.adoc @@ -0,0 +1,26 @@ +// Module included in the following assemblies: +// +// * monitoring/osd-reviewing-monitoring-dashboards.adoc + +[id="reviewing-monitoring-dashboards-developer_{context}"] += Reviewing monitoring dashboards as a developer + +In the *Developer* perspective, you can view dashboards relating to a selected project. You must have access to monitor a project to view dashboard information for it. + +.Prerequisites + +* You have access to the cluster as a `dedicated-admin` or as a user with view permissions for the project that you are viewing the dashboard for. + +.Procedure + +. In the *Developer* perspective in the {product-title} web console, navigate to *Monitoring* -> *Dashboard*. + +. Choose a project in the *Project:* list. + +. Choose a workload in the *All Workloads* list. + +. Optional: Select a time range for the graphs in the *Time Range* list. + +. Optional: Select a *Refresh Interval*. + +. Hover over each of the graphs within a dashboard to display detailed information about specific items. diff --git a/modules/osd-monitoring-setting-a-scrape-sample-limit-for-user-defined-projects.adoc b/modules/osd-monitoring-setting-a-scrape-sample-limit-for-user-defined-projects.adoc new file mode 100644 index 0000000000..55b10bbaf0 --- /dev/null +++ b/modules/osd-monitoring-setting-a-scrape-sample-limit-for-user-defined-projects.adoc @@ -0,0 +1,51 @@ +// Module included in the following assemblies: +// +// * monitoring/osd-configuring-the-monitoring-stack.adoc + +[id="setting-a-scrape-sample-limit-for-user-defined-projects_{context}"] += Setting a scrape sample limit for user-defined projects + +You can limit the number of samples that can be accepted per target scrape in user-defined projects. + +[WARNING] +==== +If you set a sample limit, no further sample data is ingested for that target scrape after the limit is reached. +==== + +.Prerequisites + +* You have access to the cluster as a user with the `dedicated-admin` role. +* You have created the `user-workload-monitoring-config` `ConfigMap` object. +* You have installed the OpenShift CLI (`oc`). + +.Procedure + +. Edit the `user-workload-monitoring-config` `ConfigMap` object in the `openshift-user-workload-monitoring` project: ++ +[source,terminal] +---- +$ oc -n openshift-user-workload-monitoring edit configmap user-workload-monitoring-config +---- + +. Add the `enforcedSampleLimit` configuration to `data.config.yaml` to limit the number of samples that can be accepted per target scrape in user-defined projects: ++ +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-workload-monitoring-config + namespace: openshift-user-workload-monitoring +data: + config.yaml: | + prometheus: + enforcedSampleLimit: 50000 <1> +---- +<1> A value is required if this parameter is specified. This `enforcedSampleLimit` example limits the number of samples that can be accepted per target scrape in user-defined projects to 50,000. + +. Save the file to apply the changes. The limit is applied automatically. ++ +[WARNING] +==== +When changes are saved to the `user-workload-monitoring-config` `ConfigMap` object, the pods and other resources in the `openshift-user-workload-monitoring` project might be redeployed. The running monitoring processes in that project might also be restarted. +==== diff --git a/modules/osd-monitoring-setting-log-levels-for-monitoring-components.adoc b/modules/osd-monitoring-setting-log-levels-for-monitoring-components.adoc new file mode 100644 index 0000000000..767eaafc68 --- /dev/null +++ b/modules/osd-monitoring-setting-log-levels-for-monitoring-components.adoc @@ -0,0 +1,82 @@ +// Module included in the following assemblies: +// +// * monitoring/osd-configuring-the-monitoring-stack.adoc + +[id="setting-log-levels-for-monitoring-components_{context}"] += Setting log levels for monitoring components + +You can configure the log level for Prometheus Operator, Prometheus, and Thanos Ruler. + +The following log levels can be applied to each of those components in the `user-workload-monitoring-config` `ConfigMap` object: + +* `debug`. Log debug, informational, warning, and error messages. +* `info`. Log informational, warning, and error messages. +* `warn`. Log warning and error messages only. +* `error`. Log error messages only. + +The default log level is `info`. + +.Prerequisites + +* You have access to the cluster as a user with the `dedicated-admin` role. +* You have created the `user-workload-monitoring-config` `ConfigMap` object. +* You have installed the OpenShift CLI (`oc`). + +.Procedure + +. Edit the `ConfigMap` object: +.. Edit the `user-workload-monitoring-config` `ConfigMap` object in the `openshift-user-workload-monitoring` project: ++ +[source,terminal] +---- +$ oc -n openshift-user-workload-monitoring edit configmap user-workload-monitoring-config +---- + +.. Add `logLevel: ` for a component under `data.config.yaml`: ++ +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-workload-monitoring-config + namespace: openshift-user-workload-monitoring +data: + config.yaml: | + : <1> + logLevel: <2> +---- +<1> The monitoring component that you are applying a log level to. +<2> The log level to apply to the component. + +. Save the file to apply the changes. The pods for the component restarts automatically when you apply the log-level change. ++ +[WARNING] +==== +When changes are saved to a monitoring config map, the pods and other resources in the related project might be redeployed. The running monitoring processes in that project might also be restarted. +==== + +. Confirm that the log level has been applied by reviewing the deployment or pod configuration in the related project. The following example checks the log level in the `prometheus-operator` deployment in the `openshift-user-workload-monitoring` project: ++ +[source,terminal] +---- +$ oc -n openshift-user-workload-monitoring get deploy prometheus-operator -o yaml | grep "log-level" +---- ++ +.Example output +[source,terminal] +---- + - --log-level=debug +---- + +. Check that the pods for the component are running. The following example lists the status of pods in the `openshift-user-workload-monitoring` project: ++ +[source,terminal] +---- +$ oc -n openshift-user-workload-monitoring get pods +---- ++ +[NOTE] +==== +If an unrecognized `loglevel` value is included in the `ConfigMap` object, the pods for the component might not restart successfully. +==== diff --git a/modules/osd-monitoring-setting-up-metrics-collection-for-user-defined-projects.adoc b/modules/osd-monitoring-setting-up-metrics-collection-for-user-defined-projects.adoc new file mode 100644 index 0000000000..3cbd203fe5 --- /dev/null +++ b/modules/osd-monitoring-setting-up-metrics-collection-for-user-defined-projects.adoc @@ -0,0 +1,10 @@ +// Module included in the following assemblies: +// +// * monitoring/osd-managing-metrics.adoc + +[id="setting-up-metrics-collection-for-user-defined-projects_{context}"] += Setting up metrics collection for user-defined projects + +You can create a `ServiceMonitor` resource to scrape metrics from a service endpoint in a user-defined project. This assumes that your application uses a Prometheus client library to expose metrics to the `/metrics` canonical name. + +This section describes how to deploy a sample service in a user-defined project and then create a `ServiceMonitor` resource that defines how that service should be monitored. diff --git a/modules/osd-monitoring-specifying-how-a-service-is-monitored.adoc b/modules/osd-monitoring-specifying-how-a-service-is-monitored.adoc new file mode 100644 index 0000000000..20d3ef2f76 --- /dev/null +++ b/modules/osd-monitoring-specifying-how-a-service-is-monitored.adoc @@ -0,0 +1,70 @@ +// Module included in the following assemblies: +// +// * monitoring/osd-managing-metrics.adoc + +[id="specifying-how-a-service-is-monitored_{context}"] += Specifying how a service is monitored + +To use the metrics exposed by your service, you must configure {product-title} monitoring to scrape metrics from the `/metrics` endpoint. You can do this using a `ServiceMonitor` custom resource definition (CRD) that specifies how a service should be monitored, or a `PodMonitor` CRD that specifies how a pod should be monitored. The former requires a `Service` object, while the latter does not, allowing Prometheus to directly scrape metrics from the metrics endpoint exposed by a pod. + +[NOTE] +==== +In {product-title}, you can use the `tlsConfig` property for a `ServiceMonitor` resource to specify the TLS configuration to use when scraping metrics from an endpoint. The `tlsConfig` property is not yet available for `PodMonitor` resources. If you need to use a TLS configuration when scraping metrics, you must use the `ServiceMonitor` resource. +==== + +This procedure shows you how to create a `ServiceMonitor` resource for a service in a user-defined project. + +.Prerequisites + +* You have access to the cluster as a user with the `dedicated-admin` role or the `monitoring-edit` role. +* For this example, you have deployed the `prometheus-example-app` sample service in the `ns1` project. + +.Procedure + +. Create a YAML file for the `ServiceMonitor` resource configuration. In this example, the file is called `example-app-service-monitor.yaml`. + +. Add the following `ServiceMonitor` resource configuration details: ++ +[source,yaml] +---- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + k8s-app: prometheus-example-monitor + name: prometheus-example-monitor + namespace: ns1 +spec: + endpoints: + - interval: 30s + port: web + scheme: http + selector: + matchLabels: + app: prometheus-example-app +---- ++ +This defines a `ServiceMonitor` resource that scrapes the metrics exposed by the `prometheus-example-app` sample service, which includes the `version` metric. + +. Apply the configuration to the cluster: ++ +[source,terminal] +---- +$ oc apply -f example-app-service-monitor.yaml +---- ++ +It takes some time to deploy the `ServiceMonitor` resource. + +. You can check that the `ServiceMonitor` resource is running: ++ +[source,terminal] +---- +$ oc -n ns1 get servicemonitor +---- ++ +.Example output +[source,terminal] +---- +NAME AGE +prometheus-example-monitor 81m +---- diff --git a/modules/osd-monitoring-support-considerations.adoc b/modules/osd-monitoring-support-considerations.adoc new file mode 100644 index 0000000000..e00dd991e6 --- /dev/null +++ b/modules/osd-monitoring-support-considerations.adoc @@ -0,0 +1,10 @@ +// Module included in the following assemblies: +// +// * monitoring/osd-configuring-the-monitoring-stack.adoc + +[id="support-considerations_{context}"] += Support considerations for monitoring user-defined projects + +The following modifications are explicitly not supported: + +* Installing custom Prometheus instances on {product-title} diff --git a/modules/osd-monitoring-targets-for-user-defined-projects.adoc b/modules/osd-monitoring-targets-for-user-defined-projects.adoc new file mode 100644 index 0000000000..23f3c11f5d --- /dev/null +++ b/modules/osd-monitoring-targets-for-user-defined-projects.adoc @@ -0,0 +1,11 @@ +// Module included in the following assemblies: +// +// * monitoring/osd-understanding-the-monitoring-stack.adoc + +[id="monitoring-targets-for-user-defined-projects_{context}"] += Monitoring targets for user-defined projects + +Monitoring is enabled by default for {product-title} user-defined projects. You can monitor: + +* Metrics provided through service endpoints in user-defined projects. +* Pods running in user-defined projects. diff --git a/modules/osd-monitoring-troubleshooting-issues.adoc b/modules/osd-monitoring-troubleshooting-issues.adoc new file mode 100644 index 0000000000..c750f1e118 --- /dev/null +++ b/modules/osd-monitoring-troubleshooting-issues.adoc @@ -0,0 +1,110 @@ + +// Module included in the following assemblies: +// +// * monitoring/osd-troubleshooting-monitoring-issues.adoc + +[id="troubleshooting-monitoring-issues_{context}"] += Determining why user-defined project metrics are unavailable + +If metrics are not displaying when monitoring user-defined projects, follow these steps to troubleshoot the issue. + +.Procedure + +. Query the metric name and verify that the project is correct: +.. From the *Developer* perspective in the OpenShift Container Platform web console, select *Monitoring* -> *Metrics*. +.. Select the project that you want to view metrics for in the *Project:* list. +.. Choose a query from the *Select Query* list, or run a custom PromQL query by selecting *Show PromQL*. ++ +The *Select Query* pane shows the metric names. ++ +Queries must be done on a per-project basis. The metrics that are shown relate to the project that you have selected. +. Verify that the pod that you want metrics from is actively serving metrics. Run the following `oc exec` command into a pod to target the `podIP`, `port`, and `/metrics`. ++ +[source,terminal] +---- +$ oc exec -n -- curl :/metrics +---- ++ +[NOTE] +==== +You must run the command on a pod that has `curl` installed. +==== ++ +The following example output shows a result with a valid version metric. ++ +.Example output +[source,terminal] +---- + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed +# HELP version Version information about this binary-- --:--:-- --:--:-- 0 +# TYPE version gauge +version{version="v0.1.0"} 1 +100 102 100 102 0 0 51000 0 --:--:-- --:--:-- --:--:-- 51000 +---- ++ +An invalid output indicates that there is a problem with the corresponding application. + +. If you are using a `PodMonitor` CRD, verify that the `PodMonitor` CRD is configured to point to the correct pods using label matching. For more information, see the Prometheus Operator documentation. +. If you are using a `ServiceMonitor` CRD, and if the `/metrics` endpoint of the pod is showing metric data, follow these steps to verify the configuration: +.. Verify that the service is pointed to the correct `/metrics` endpoint. The service `labels` in this output must match the services monitor `labels` and the `/metrics` endpoint defined by the service in the subsequent steps. ++ +[source,terminal] +---- +$ oc get service +---- ++ +.Example output +[source,terminal] +---- +apiVersion: v1 +kind: Service <1> +metadata: + labels: <2> + app: prometheus-example-app + name: prometheus-example-app + namespace: ns1 +spec: + ports: + - port: 8080 + protocol: TCP + targetPort: 8080 + name: web + selector: + app: prometheus-example-app + type: ClusterIP +---- ++ +<1> Specifies that this is a service API. +<2> Specifies the labels that are being used for this service. + +.. Query the `serviceIP`, `port`, and `/metrics` endpoints to see if the same metrics from the `curl` command you ran on the pod previously: +... Run the following command to find the service IP: ++ +[source,terminal] +---- +$ oc get service -n +---- +... Query the `/metrics` endpoint: ++ +[source,terminal] +---- +$ oc exec -n -- curl :/metrics +---- ++ +Valid metrics are returned in the following example. ++ +.Example output +[source,terminal] +---- +% Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed +100 102 100 102 0 0 51000 0 --:--:-- --:--:-- --:--:-- 99k +# HELP version Version information about this binary +# TYPE version gauge +version{version="v0.1.0"} 1 +---- +.. Use label matching to verify that the `ServiceMonitor` object is configured to point to the desired service. To do this, compare the `Service` object from the `oc get service` output to the `ServiceMonitor` object from the `oc get servicemonitor` output. The labels must match for the metrics to be displayed. ++ +For example, from the previous steps, notice how the `Service` object has the `app: prometheus-example-app` label and the `ServiceMonitor` object has the same `app: prometheus-example-app` match label. +. If everything looks valid and the metrics are still unavailable, please contact the support team for further help. diff --git a/modules/osd-monitoring-understanding-metrics.adoc b/modules/osd-monitoring-understanding-metrics.adoc new file mode 100644 index 0000000000..77772c8b19 --- /dev/null +++ b/modules/osd-monitoring-understanding-metrics.adoc @@ -0,0 +1,33 @@ +// Module included in the following assemblies: +// +// * monitoring/osd-managing-metrics.adoc + +[id="understanding-metrics_{context}"] += Understanding metrics + +In {product-title}, cluster components are monitored by scraping metrics exposed through service endpoints. You can also configure metrics collection for user-defined projects. Metrics enable you to monitor how cluster components and your own workloads are performing. + +You can define the metrics that you want to provide for your own workloads by using Prometheus client libraries at the application level. + +In {product-title}, metrics are exposed through an HTTP service endpoint under the `/metrics` canonical name. You can list all available metrics for a service by running a `curl` query against `\http:///metrics`. For instance, you can expose a route to the `prometheus-example-app` example application and then run the following to view all of its available metrics: + +[source,terminal] +---- +$ curl http:///metrics +---- + +.Example output +[source,terminal] +---- +# HELP http_requests_total Count of all HTTP requests +# TYPE http_requests_total counter +http_requests_total{code="200",method="get"} 4 +http_requests_total{code="404",method="get"} 2 +# HELP version Version information about this binary +# TYPE version gauge +version{version="v0.1.0"} 1 +---- + +.Additional resources + +* See the link:https://prometheus.io/docs/instrumenting/clientlibs/[Prometheus documentation] for details on Prometheus client libraries. diff --git a/modules/osd-monitoring-understanding-the-monitoring-stack.adoc b/modules/osd-monitoring-understanding-the-monitoring-stack.adoc new file mode 100644 index 0000000000..b2e95e57dc --- /dev/null +++ b/modules/osd-monitoring-understanding-the-monitoring-stack.adoc @@ -0,0 +1,16 @@ +// Module included in the following assemblies: +// +// * monitoring/osd-understanding-the-monitoring-stack.adoc + +[id="understanding-the-monitoring-stack_{context}"] += Understanding the monitoring stack + +The {product-title} monitoring stack is based on the link:https://prometheus.io/[Prometheus] open source project and its wider ecosystem. The monitoring stack includes the following: + +* *Default platform monitoring components*. A set of platform monitoring components are installed in the `openshift-monitoring` project by default during an {product-title} installation. This provides monitoring for core {product-title}. The default monitoring stack also enables remote health monitoring for clusters. Critical metrics, such as CPU and memory, are collected from all of the workloads in every namespace and are made available for your use. ++ +These components are illustrated in the *Installed by default* section in the following diagram. + +* *Components for monitoring user-defined projects*. This feature is enabled by default and provides monitoring for user-defined projects. These components are illustrated in the *User* section in the following diagram. + +image:osd-monitoring-architecture.svg[{product-title} monitoring architecture] diff --git a/modules/osd-rhoam.adoc b/modules/osd-rhoam.adoc new file mode 100644 index 0000000000..5f983d12e0 --- /dev/null +++ b/modules/osd-rhoam.adoc @@ -0,0 +1,24 @@ +// Module included in the following assemblies: +// +// * adding_service_cluster/available-services.adoc +// * adding_service_cluster/rosa-available-services.adoc + +[id="osd-rhoam_{context}"] += Red Hat OpenShift API Management + +The Red Hat OpenShift API Management (OpenShift API Management) service is available as an add-on to your {product-title} on AWS cluster. OpenShift API Management is a managed API traffic control and API program management solution. It is based on the 3scale API Management platform and and implements single sign-on for Red Hat solutions to secure and protect your APIs. + +This OpenShift API Management entitlement provides: + +ifdef::openshift-rosa[] +* Availability to any cluster that meets the resource requirements listed in the Red Hat OpenShift API Management service definition. +endif::[] +ifdef::openshift-dedicated[] +* Availability to any cluster that meets the resource requirements listed in the {product-title} service definition. +endif::[] +* Full production-level support. +* No time limits on usage. +* 100K quota, or calls per day. Customers have the option to pay for a OpenShift API Management subscription with higher quotas. + +.Additional resources +* link:https://access.redhat.com/documentation/en-us/red_hat_openshift_api_management[Red Hat OpenShift API Management] documentation diff --git a/modules/osd-storage-pv-aws-config-account.adoc b/modules/osd-storage-pv-aws-config-account.adoc new file mode 100644 index 0000000000..e53fa02152 --- /dev/null +++ b/modules/osd-storage-pv-aws-config-account.adoc @@ -0,0 +1,34 @@ +// Module included in the following assemblies: +// +// * storage/persistent_storage/osd-persistent-storage-aws.adoc + +[id="osd-storage-pv-aws-config-account_{context}"] += Configuring the AWS account + +Set up your AWS account to prepare AWS EFS for use by {product-title}. + +.Procedure + +. Log in to the link:https://console.aws.amazon.com/ec2[AWS EC2 Console]. +. Select the region that matches the cluster region. +. Filter only worker EC2 instances, and select an instance. Note the VPC ID and security group ID. These values are required later in the process. +. Click the *Security* tab, and click the Security Group Name. +. From the *Actions* dropdown menu, click *Edit Inbound Rules*. Scroll to the bottom, and click *Add Rule*. +. Add an NFS rule that allows NFS traffic from the VPC private CIDR. +. Open the link:https://console.aws.amazon.com/efs/[Amazon EFS page]. To create the EFS, click *Create file system*. +. Click *Customize* and proceed through the wizard. +.. In `Step 2:`, configure the network access: +... Click the VPC of the cluster that you noted previously. +... Ensure that the private subnets are selected. +... Select the Security Group Name that you noted previously for the EC2 worker instances. +... Click *Next*. +.. In `Step 3:`, configure the client access: +... Click *Add access point*. +... Enter a unique Path such as `/access_point_1`. +... Configure the Owner fields with ownership or permissions that allow write access for your worker pods. For example, if your worker pods run with group ID `100`, you can set that ID as your `Owner Group ID` and ensure the permissions include `g+rwx`. +. Continue through the wizard steps, and click *Create File System*. +. After the file system is created: +.. Note the file system ID for later use. +.. Click *Manage client access* and note the access point ID. + +You can add more NFS rules, using steps 5-10, to create separate shared data stores. In each case, make note of the corresponding file system ID and access point ID. diff --git a/modules/osd-storage-pv-aws-connect-pods.adoc b/modules/osd-storage-pv-aws-connect-pods.adoc new file mode 100644 index 0000000000..2f1435df00 --- /dev/null +++ b/modules/osd-storage-pv-aws-connect-pods.adoc @@ -0,0 +1,37 @@ +// Module included in the following assemblies: +// +// * storage/persistent_storage/osd-persistent-storage-aws.adoc + +[id="osd-storage-pv-aws-connect-pods_{context}"] += Connecting pods + +The persistent volume claim (PVC) that was created in your project is ready for use. You can create a sample pod to test this PVC. + +.Procedure + +. Create and navigate to a project. +. Click *Workloads* -> *Pods* -> *Create Pod*. +. Enter the YAML information. Use the name of your `PersistentVolumeClaim` object under `.spec.volumes[].persistentVolumeClaim.claimName`. ++ +.Example +[source,terminal] +---- +apiVersion: v1 +kind: Pod +metadata: + name: test-efs +spec: + volumes: + - name: efs-storage-vol + persistentVolumeClaim: + claimName: pvc-sv1 + containers: + - name: test-efs + image: centos:latest + command: [ "/bin/bash", "-c", "--" ] + args: [ "while true; do touch /mnt/efs-data/verify-efs && echo 'hello efs' && sleep 30; done;" ] + volumeMounts: + - mountPath: "/mnt/efs-data" + name: efs-storage-vol +---- +. After the pods are created, click *Workloads* -> *Pods* -> *Logs* to verify the pod logs. diff --git a/modules/osd-storage-pv-aws-create-sharedvolumes-cli.adoc b/modules/osd-storage-pv-aws-create-sharedvolumes-cli.adoc new file mode 100644 index 0000000000..fd5e97f073 --- /dev/null +++ b/modules/osd-storage-pv-aws-create-sharedvolumes-cli.adoc @@ -0,0 +1,38 @@ +// Module included in the following assemblies: +// +// * storage/persistent_storage/osd-persistent-storage-aws.adoc + + +[id="osd-storage-pv-aws-create-sharedvolumes-cli_{context}"] += Creating `SharedVolume` resources using the CLI + +You must create one `SharedVolume` resource per file system:access point pair in each project from which you want pods to access it. You can create a SharedVolume manually by entering YAML or JSON definitions, or by dragging and dropping a file into an editor. + +.Procedure + +. Using the `oc` CLI, create the YAML file using the `accessPointID` and `fileSystemID` values from the EFS resources you created earlier. ++ +[source,yaml] +---- + apiVersion: aws-efs.managed.openshift.io/v1alpha1 + kind: SharedVolume + metadata: + name: sv1 + namespace: efsop2 + spec: + accessPointID: fsap-0123456789abcdef + fileSystemID: fs-0123cdef +---- + +. Apply the file to the cluster using the following command: ++ +[source,terminal] +---- +$ oc apply -f .yaml +---- ++ +The `SharedVolume` resource is created, and triggers the AWS EFS Operator to generate and associate a PersistentVolume:PersistentVolumeClaim pair with the specified EFS access point. + +. To verify that the PVC exists and is bound, navigate to *Storage* > *Persistent Volume Claims*. ++ +The PVC name is `pvc-{shared_volume_name}`. The associated PV name is `pv-{project_name}-{shared_volume_name}`. diff --git a/modules/osd-storage-pv-aws-create-sharedvolumes-console.adoc b/modules/osd-storage-pv-aws-create-sharedvolumes-console.adoc new file mode 100644 index 0000000000..ebc12c75f2 --- /dev/null +++ b/modules/osd-storage-pv-aws-create-sharedvolumes-console.adoc @@ -0,0 +1,37 @@ +// Module included in the following assemblies: +// +// * storage/persistent_storage/osd-persistent-storage-aws.adoc + +[id="osd-storage-pv-aws-create-sharedvolumes-console_{context}"] += Creating `SharedVolume` resources using the console + +You must create one `SharedVolume` resource per file system:access point pair in each project from which you want pods to access it. + +.Procedure + +. In the OpenShift web console, create and navigate to a project. +. Click *Operators* -> *Installed Operators*. Find the entry for AWS EFS Operator, and click *SharedVolume* under Provided APIs. +. Click *Create SharedVolume*. +. Edit the sample YAML: +.. Type a suitable value for `name`. +.. Replace the values of `accessPointID` and `fileSystemID` with the values from the EFS resources you created earlier. ++ +[source,yaml] +---- + apiVersion: aws-efs.managed.openshift.io/v1alpha1 + kind: SharedVolume + metadata: + name: sv1 + namespace: efsop2 + spec: + accessPointID: fsap-0123456789abcdef + fileSystemID: fs-0123cdef +---- + +. Click *Create*. ++ +The `SharedVolume` resource is created, and triggers the AWS EFS Operator to generate and associate a PersistentVolume:PersistentVolumeClaim pair with the specified EFS access point. + +. To verify that the persistent volume claim (PVC) exists and is bound, click *Storage* -> *Persistent Volume Claims*. ++ +The PVC name is `pvc-`. The associated PV name is `pv--`. diff --git a/modules/osd-storage-pv-aws-install-efs.adoc b/modules/osd-storage-pv-aws-install-efs.adoc new file mode 100644 index 0000000000..71e4b3b484 --- /dev/null +++ b/modules/osd-storage-pv-aws-install-efs.adoc @@ -0,0 +1,13 @@ +// Module included in the following assemblies: +// +// * storage/persistent_storage/osd-persistent-storage-aws.adoc + +[id="osd-storage-pv-aws-install-efs_{context}"] += Installing the EFS Operator + +.Procedure + +. Log in to the OpenShift Web UI for your cluster. +. Click *Operators* -> *OperatorHub*. +. Search for and select the AWS EFS Operator. Click *Install*. +. Accept the default settings, and click *Subscribe*. diff --git a/modules/osd-storage-pv-aws-uninstall-efs.adoc b/modules/osd-storage-pv-aws-uninstall-efs.adoc new file mode 100644 index 0000000000..0ad2c4ed04 --- /dev/null +++ b/modules/osd-storage-pv-aws-uninstall-efs.adoc @@ -0,0 +1,19 @@ +// Module included in the following assemblies: +// +// * storage/persistent_storage/osd-persistent-storage-aws.adoc + +[id="osd-storage-pv-aws-uninstall-efs_{context}"] += Uninstalling the EFS Operator + +.Procedure + +To remove the Operator from your cluster: + +. Delete all of the workloads using the persistent volume claims that were generated by the Operator. +. Delete all of the shared volumes from all of the namespaces. The Operator automatically removes the associated persistent volumes and persistent volume claims. +. Uninstall the Operator: +.. Click *Operators* -> *Installed Operators*. +.. Find the entry for AWS EFS Operator, and click the menu button on the right-hand side of the Operator. +.. Click *Uninstall* and confirm the deletion. + +. Delete the shared volume CRD. This action triggers the deletion of the remaining Operator-owned resources. diff --git a/modules/osd-vs-ocp.adoc b/modules/osd-vs-ocp.adoc new file mode 100644 index 0000000000..da6a649d3b --- /dev/null +++ b/modules/osd-vs-ocp.adoc @@ -0,0 +1,39 @@ + +// Module included in the following assemblies: +// +// * assemblies/osd-architecture.adoc + +[id="osd-vs-ocp_{context}"] + += Understanding how {product-title} differs from {OCP} + +{product-title} uses the same code base as {OCP} but is installed in an opinionated way to be optimized for performance, scalability, and security. {product-title} is a fully managed service; therefore, many of the {product-title} components and settings that you manually set up in {OCP} are set up for you by default. + +Review the following differences between {product-title} and a standard installation of {OCP} on your own infrastructure: + +[options="header"] +|==== +|{OCP} |{product-title} + +|The customer installs and configures {OCP}. +|{product-title} is installed through a user-friendly webpage and in a standardized way that is optimized for performance, scalability, and security. + +|Customers can choose their computing resources. +|{product-title} is hosted and managed in a public cloud (Amazon Web Services or Google Cloud Platform) either owned by Red Hat or provided by the customer. + +|Customers have top-level administrative access to the infrastructure. +|Customers have a built-in administrator group, though the top-level administration access is available when cloud accounts are provided by the customer. + +|Customers can use all supported features and configuration settings available in {OCP}. +|Some {OCP} features and configuration settings might not be available or changeable in {product-title} . + +|You set up control plane components such as the API server and etcd on machines that get the `control` role. You can modify the control plane components, but keep in mind that you are responsible for backing up, restoring, and making control plane data highly available. +|Red Hat sets up the control plane and manages the control plane components for you. The control plane is highly available. + +|You are responsible for updating the underlying infrastructure for the control plane and worker nodes. You can use the OpenShift web console to update {OCP} versions. +|Red Hat automatically notifies the customer when updates are available. You can manually or automatically schedule upgrades in {OCM}. + +|Support is provided based on the terms of your Red Hat subscription or cloud provider. +|Engineered, operated, and supported by Red Hat with a 99.95% uptime SLA and link:https://access.redhat.com/support/offerings/openshift/sla[24x7] coverage. + +|==== diff --git a/modules/policy-change-management.adoc b/modules/policy-change-management.adoc new file mode 100644 index 0000000000..6d6576f03f --- /dev/null +++ b/modules/policy-change-management.adoc @@ -0,0 +1,71 @@ + +// Module included in the following assemblies: +// +// * assemblies/policy-process-security.adoc + +[id="policy-change-management_{context}"] += Change management + + +Cluster changes are initiated in one of two ways: + +* A customer initiates changes through self-service capabilities like cluster deployment, worker node scaling, and cluster deletion. +* An SRE initiates a change through Operator-driven capabilities like configuration, upgrade, patching, or configuration changes. + +Change history is captured in the *Cluster History* section in OpenShift Cluster Manager (OCM) *Overview* tab and is available to customers. This includes logs from the following changes: + +* Adding or removing identity providers +* Adding or removing users to/from the dedicated-admins group +* Scaling the cluster compute nodes +* Scaling the cluster load balancer +* Scaling the cluster persistent storage +* Upgrading the cluster + +SRE-initiated changes that require manual intervention generally follow the below procedure: + +* Preparing for change +** Change characteristics are identified and a gap analysis against current state is performed. +** Change steps are documented and validated. +** Communication plan and schedule is shared with all stakeholders. +** CICD and end-to-end tests are updated to automate change validation. +** Change request capturing change details is submitted for management approval. +* Managing change +** Automated nightly CI/CD jobs pick up the change and run tests. +** The change is made to integration and stage environments, and manually validated before updating the customer cluster. +** Major change notifications are sent before and after the event. +* Reinforcing the change +** Feedback on the change is collected and analyzed. +** Potential gaps are diagnosed in order to understand resistance and automate similar change requests. +** Corrective actions are implemented. + +[NOTE] +==== +SREs consider manual changes a failure and this is only used as a fallback process. +==== + +[id="config-management_{context}"] +== Configuration management + +The infrastructure and configuration of the {product-title} environment is managed as code. Red Hat SRE manages changes to the {product-title} environment using a GitOps workflow and automated CI/CD pipeline. + +Each proposed change undergoes a series of automated verifications immediately upon check-in. Changes are then deployed to a staging environment where they undergo automated integration testing. Finally, changes are deployed to the production environment. Each step is fully automated. + +An authorized SRE reviewer must approve advancement to each step. The reviewer might not be the same individual who proposed the change. All changes and approvals are fully auditable as part of the GitOps workflow. + +[id="patch-management_{context}"] +== Patch management + +OpenShift Container Platform software and the underlying immutable Red Hat Enterprise Linux CoreOS (RHCOS) operating system image are patched for bugs and vulnerabilities as a side effect of regular z-stream upgrades. Read more about link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.6/html/architecture/architecture-rhcos[RHCOS architecture] in the OpenShift Container Platform documentation. + +// TODO: checking whether the OCP reference above should be dedicated? Either way, the attribute version should probably be used throughout the above paragraph + +[id="release-management_{context}"] +== Release management + +{product-title} clusters are upgraded as frequently as weekly to ensure that the latest security patches and bug fixes are applied to {product-title} clusters. + +Patch-level upgrades, also referred to as z-stream upgrades (for example, 4.3.18 to 4.3.19), are automatically deployed on Tuesdays. New z-stream releases are tested nightly with automated {product-title} integration testing and released only once validated in the OSD environment. + +Minor version upgrades, also referred to as y-stream upgrades (for example, 4.3 to 4.4), are coordinated with customers by email notification. + +Customers can review the history of all cluster upgrade events in their OCM web console. diff --git a/modules/policy-customer-responsibility.adoc b/modules/policy-customer-responsibility.adoc new file mode 100644 index 0000000000..675bd83327 --- /dev/null +++ b/modules/policy-customer-responsibility.adoc @@ -0,0 +1,41 @@ + +// Module included in the following assemblies: +// +// * assemblies/policy-responsibility-matrix.adoc + +[id="policy-customer-responsibility_{context}"] += Customer responsibilities for data and applications + + +The customer is responsible for the applications, workloads, and data that they deploy to {product-title}. However, Red Hat provides various tools to help the customer manage data and applications on the platform. + +[cols="2a,3a,3a",options="header"] +|=== + +|Resource +|Red Hat responsibilities +|Customer responsibilities + +|Customer data +|* Maintain platform-level standards for data encryption. +* Provide OpenShift components to help manage application data, such as secrets. +* Enable integration with third-party data services (such as AWS RDS or Google Cloud SQL) to store and manage data outside of the cluster and/or cloud provider. +|Maintain responsibility for all customer data stored on the platform and how customer applications consume and expose this data. + +|Customer applications +|* Provision clusters with OpenShift components installed so that customers can access the OpenShift and Kubernetes APIs to deploy and manage containerized applications. +* Create clusters with image pull secrets so that customer deployments can pull images from the Red Hat Container Catalog registry. +* Provide access to OpenShift APIs that a customer can use to set up Operators to add community, third-party, and Red Hat services to the cluster. +* Provide storage classes and plug-ins to support persistent volumes for use with customer applications. +|* Maintain responsibility for customer and third-party applications, data, and their complete lifecycle. +* If a customer adds Red Hat, community, third-party, their own, or other services to the cluster by using Operators or external images, the customer is responsible for these services and for working with the appropriate provider (including Red Hat) to troubleshoot any issues. +* Use the provided tools and features to configure and deploy; keep up-to-date; set up resource requests and limits; size the cluster to have enough resources to run apps; set up permissions; integrate with other services; manage any image streams or templates that the customer deploys; externally serve; save, back up, and restore data; and otherwise manage their highly available and resilient workloads. +* Maintain responsibility for monitoring the applications run on OpenShift Dedicated; including installing and operating software to gather metrics and create alerts. + +|Developer services (CodeReady) +|Make CodeReady Workspaces available as an add-on through OpenShift Cluster Manager (OCM). +|Install, secure, and operate CodeReady Workspaces and the Developer CLI. + +|=== + +// TODO: Should "Red Hat Container Catalog" be "Red Hat Ecosystem Catalog" now? diff --git a/modules/policy-disaster-recovery.adoc b/modules/policy-disaster-recovery.adoc new file mode 100644 index 0000000000..073e103bc4 --- /dev/null +++ b/modules/policy-disaster-recovery.adoc @@ -0,0 +1,16 @@ + +// Module included in the following assemblies: +// +// * assemblies/policy-process-security.adoc + +[id="policy-disaster-recovery_{context}"] += Disaster recovery + + +{product-title} provides disaster recovery for failures that occur at the pod, worker node, infrastructure node, master node, and availability zone levels. + +All disaster recovery requires that the customer use best practices for deploying highly available applications, storage, and cluster architecture (for example, single-zone deployment vs. multi-zone deployment) to account for the level of desired availability. + +One single-zone cluster will not provide disaster avoidance or recovery in the event of an availability zone or region outage. Multiple single-zone clusters with customer-maintained failover can account for outages at the zone or region levels. + +One multi-zone cluster will not provide disaster avoidance or recovery in the event of a full region outage. Multiple multi-zone clusters with customer-maintained failover can account for outages at the region level. diff --git a/modules/policy-failure-points.adoc b/modules/policy-failure-points.adoc new file mode 100644 index 0000000000..fb2bde078d --- /dev/null +++ b/modules/policy-failure-points.adoc @@ -0,0 +1,50 @@ + +// Module included in the following assemblies: +// +// * assemblies/policy-understand-availability.adoc + +[id="policy-failure-points_{context}"] += Potential points of failure + + +{OCP} provides many features and options for protecting your workloads against downtime, but applications must be architected appropriately to take advantage of these features. + +{product-title} can help further protect you against many common Kubernetes issues by adding Red Hat Site Reliability Engineer (SRE) support and the option to deploy a multi-zone cluster, but there are a number of ways in which a container or infrastructure can still fail. By understanding potential points of failure, you can understand risks and appropriately architect both your applications and your clusters to be as resilient as necessary at each specific level. + +[NOTE] +==== +An outage can occur at several different levels of infrastructure and cluster components. +==== + +[id="container-pod-failure_{context}"] +== Container or pod failure +By design, pods are meant to exist for a short time. Appropriately scaling services so that multiple instances of your application pods are running protects against issues with any individual pod or container. The node scheduler can also ensure that these workloads are distributed across different worker nodes to further improve resiliency. + +When accounting for possible pod failures, it is also important to understand how storage is attached to your applications. Single persistent volumes attached to single pods cannot leverage the full benefits of pod scaling, whereas replicated databases, database services, or shared storage can. + +To avoid disruption to your applications during planned maintenance, such as upgrades, it is important to define a pod disruption budget. These are part of the Kubernetes API and can be managed with the OpenShift CLI (`oc`) like other object types. They allow the specification of safety constraints on pods during operations, such as draining a node for maintenance. + +[id="worker-node-failure_{context}"] +== Worker node failure +Worker nodes are the virtual machines that contain your application pods. By default, an {product-title} cluster has a minimum of four worker nodes for a single availability-zone cluster. In the event of a worker node failure, pods are relocated to functioning worker nodes, as long as there is enough capacity, until any issue with an existing node is resolved or the node is replaced. More worker nodes means more protection against single node outages, and ensures proper cluster capacity for rescheduled pods in the event of a node failure. + +[NOTE] +==== +When accounting for possible node failures, it is also important to understand how storage is affected. +==== + +[id="cluster-failure_{context}"] +== Cluster failure +{product-title} clusters have at least three master nodes and three infrastructure nodes that are preconfigured for high availability, either in a single zone or across multiple zones depending on the type of cluster you have selected. This means that master and infrastructure nodes have the same resiliency of worker nodes, with the added benefit of being managed completely by Red Hat. + +In the event of a complete master outage, the OpenShift APIs will not function, and existing worker node pods will be unaffected. However, if there is also a pod or node outage at the same time, the masters will have to recover before new pods or nodes can be added or scheduled. + +All services running on infrastructure nodes are configured by Red Hat to be highly available and distributed across infrastructure nodes. In the event of a complete infrastructure outage, these services will be unavailable until these nodes have been recovered. + +[id="zone-failure_{context}"] +== Zone failure +A zone failure from a public cloud provider affects all virtual components, such as worker nodes, block or shared storage, and load balancers that are specific to a single availability zone. To protect against a zone failure, {product-title} provides the option for clusters that are distributed across three availability zones, called multi-availability zone clusters. Existing stateless workloads are redistributed to unaffected zones in the event of an outage, as long as there is enough capacity. + +[id="storage-failure_{context}"] +== Storage failure +If you have deployed a stateful application, then storage is a critical component and must be accounted for when thinking about high availability. A single block storage PV is unable to withstand outages even at the pod level. The best ways to maintain availability of storage are to use replicated storage solutions, shared storage that is unaffected by outages, or a database service that is independent of the cluster. diff --git a/modules/policy-identity-access-management.adoc b/modules/policy-identity-access-management.adoc new file mode 100644 index 0000000000..8761fb6517 --- /dev/null +++ b/modules/policy-identity-access-management.adoc @@ -0,0 +1,169 @@ + +// Module included in the following assemblies: +// +// * assemblies/policy-process-security.adoc + +[id="policy-identity-access-management_{context}"] += Identity and access management +Most access by Red Hat site reliability engineering (SRE) teams is done by using cluster Operators through automated configuration management. + +[id="subprocessors_{context}"] +== Subprocessors +For a list of the available subprocessors, see the link:https://access.redhat.com/articles/5528091[Red Hat Subprocessor List] on the Red Hat Customer Portal. + +[id="sre-access-all_{context}"] +== SRE access to all {product-title} clusters +SREs access {product-title} clusters through a proxy. The proxy mints a service account in an {product-title} cluster for the SREs when they log in. As no identity provider is configured for {product-title} clusters, SREs access the proxy by running a local web console container. SREs do not access the cluster web console directly. SREs must authenticate as individual users to ensure auditability. All authentication attempts are logged to a Security Information and Event Management (SIEM) system. + +[id="privileged-access_{context}"] +== Privileged access controls in {product-title} +Red Hat SRE adheres to the principle of least privilege when accessing {product-title} and public cloud provider components. There are four basic categories of manual SRE access: + +* SRE admin access through the Red Hat Customer Portal with normal two-factor authentication and no privileged elevation. + +* SRE admin access through the Red Hat corporate SSO with normal two-factor authentication and no privileged elevation. + +* OpenShift elevation, which is a manual elevation using Red Hat SSO. It is fully audited and management approval is required for every operation SREs make. + +* Cloud provider access or elevation, which is a manual elevation for cloud provider console or CLI access. Access is limited to 60 minutes and is fully audited. + +Each of these access types has different levels of access to components: + +[cols= "3a,3a,3a,3a,3a",options="header"] + +|=== + +| Component | Typical SRE admin access (Red Hat Customer Portal) | Typical SRE admin access (Red Hat SSO) |OpenShift elevation | Cloud provider access + +| OpenShift Cluster Manager (OCM) | R/W | No access | No access | No access +| OpenShift web console | No access | R/W | R/W | No access +| Node operating system | No access | A specific list of elevated OS and network permissions. | A specific list of elevated OS and network permissions. | No access +| AWS Console | No access | No access, but this is the account used to request cloud provider access. | No access | All cloud provider permissions using the SRE identity. + +|=== + +[id="sre-access-cloud-infra_{context}"] +== SRE access to cloud infrastructure accounts +Red Hat personnel do not access cloud infrastructure accounts in the course of routine {product-title} operations. For emergency troubleshooting purposes, Red Hat SRE have well-defined and auditable procedures to access cloud infrastructure accounts. + +In AWS, SREs generate a short-lived AWS access token for the `BYOCAdminAccess` user using the AWS Security Token Service (STS). Access to the STS token is audit logged and traceable back to individual users. The `BYOCAdminAccess` has the `AdministratorAccess` IAM policy attached. + +In Google Cloud, SREs access resources after being authenticated against a Red Hat SAML identity provider (IDP). The IDP authorizes tokens that have time-to-live expirations. The issuance of the token is auditable by corporate Red Hat IT and linked back to an individual user. + +[id="support-access_{context}"] +== Red Hat support access +Members of the Red Hat CEE team typically have read-only access to parts of the cluster. Specifically, CEE has limited access to the core and product namespaces and does not have access to the customer namespaces. + +[cols= "3,2a,2a,2a,2a",options="header"] + +|=== + +| Role | Core namespace | Layered product namespace | Customer namespace | Cloud infrastructure account^*^ + +|OpenShift SRE| Read: All + +Write: Very + +Limited ^[1]^ +| Read: All + +Write: None +| Read: None^[2]^ + +Write: None +|Read: All ^[3]^ + +Write: All ^[3]^ + + +|CEE +|Read: All + +Write: None + +|Read: All + +Write: None + +|Read: None^[2]^ + +Write: None + +|Read: None + +Write: None + + +|Customer administrator +|Read: None + +Write: None + +|Read: None + +Write: None + +| Read: All + +Write: All + +|Read: Limited^[4]^ + +Write: Limited^[4]^ + + +|Customer user +|Read: None + +Write: None + +|Read: None + +Write: None + +|Read: Limited^[5]^ + +Write: Limited^[5]^ + +|Read: None + +Write: None + + +|Everybody else +|Read: None + +Write: None +|Read: None + +Write: None +|Read: None + +Write: None +|Read: None + +Write: None + +|=== +[.small] +-- +Cloud Infrastructure Account refers to the underlying AWS or Google Cloud account + +1. Limited to addressing common use cases such as failing deployments, upgrading a cluster, and replacing bad worker nodes. +2. Red Hat associates have no access to customer data by default. +3. SRE access to the cloud infrastructure account is a "break-glass" procedure for exceptional troubleshooting during a documented incident. +4. Customer administrator has limited access to the cloud infrastructure account console through Cloud Infrastructure Access. +5. Limited to what is granted through RBAC by the customer administrator, as well as namespaces created by the user. +-- + +// TODO: The above uses an asterisk as a footnote I think for the first sentence (though it doesn't show it as a reference below the table), then numbers for the rest of the footnote items. I'd suggest bumping all the numbers and using a number for the first header asterisk as well. + +[id="customer-access_{context}"] +== Customer access +Customer access is limited to namespaces created by the customer and permissions that are granted using RBAC by the customer administrator role. Access to the underlying infrastructure or product namespaces is generally not permitted without `cluster-admin` access. More information on customer access and authentication can be found in the Understanding Authentication section of the documentation. + +// TODO: I don't think there is this "Understanding Authentication" section in the OSD docs + +[id="access-approval_{context}"] +== Access approval and review +New SRE user access requires management approval. Separated or transferred SRE accounts are removed as authorized users through an automated process. Additionally, SRE performs periodic access review including management sign-off of authorized user lists. diff --git a/modules/policy-incident.adoc b/modules/policy-incident.adoc new file mode 100644 index 0000000000..f841942e1b --- /dev/null +++ b/modules/policy-incident.adoc @@ -0,0 +1,91 @@ + +// Module included in the following assemblies: +// +// * assemblies/policy-process-security.adoc + +[id="policy-incident_{context}"] += Incident and operations management + + +This documentation details the Red Hat responsibilities for the {product-title} managed service. + +[id="platform-monitoring_{context}"] +== Platform monitoring +A Red Hat Site Reliability Engineer (SRE) maintains a centralized monitoring and alerting system for all {product-title} cluster components, SRE services, and underlying cloud provider accounts. Platform audit logs are securely forwarded to a centralized SIEM (Security Information and Event Monitoring) system, where they might trigger configured alerts to the SRE team and are also subject to manual review. Audit logs are retained in the SIEM for one year. Audit logs for a given cluster are not deleted at the time the cluster is deleted. + +[id="incident-management_{context}"] +== Incident management +An incident is an event that results in a degradation or outage of one or more Red Hat services. An incident can be raised by a customer or Customer Experience and Engagement (CEE) member through a support case, directly by the centralized monitoring and alerting system, or directly by a member of the SRE team. + +Depending on the impact on the service and customer, the incident is categorized in terms of link:https://access.redhat.com/support/offerings/production/sla[severity]. + +The general workflow of how a new incident is managed by Red Hat: + +. An SRE first responder is alerted to a new incident, and begins an initial investigation. +. After the initial investigation, the incident is assigned an incident lead, who coordinates the recovery efforts. +. The incident lead manages all communication and coordination around recovery, including any relevant notifications or support case updates. +. The incident is recovered. +. The incident is documented and a root cause analysis is performed within 3 business days of the incident. +. A root cause analysis (RCA) draft document is shared with the customer within 7 business days of the incident. + +[id="notifications_{context}"] +== Notifications +Platform notifications are configured using email. Any customer notification is also sent to the corresponding Red Hat account team and if applicable, the Red Hat Technical Account Manager. + +The following activities can trigger notifications: + +* Platform incident +* Performance degradation +* Cluster capacity warnings +* Critical vulnerabilities and resolution +* Upgrade scheduling + +[id="backup-recovery_{context}"] +== Backup and recovery +All {product-title} clusters are backed up using cloud provider snapshots. Notably, this does not include customer data stored on persistent volumes. All snapshots are taken using the appropriate cloud provider snapshot APIs and are uploaded to a secure object storage bucket (S3 in AWS, and GCS in Google Cloud) in the same account as the cluster. + +[cols= "3a,2a,2a,3a",options="header"] + +|=== +|Component +|Snapshot frequency +|Retention +|Notes + +.2+|Full object store backup, all SRE-managed cluster persistent volumes (PVs) +|Daily +|7 days +.2+|This is a full backup of all Kubernetes objects like etcd, as well as all SRE-managed PVs in the cluster. + +|Weekly +|30 days + + +|Full object store backup +|Hourly +|24 hour +|This is a full backup of all Kubernetes objects like etcd. No PVs are backed up in this backup schedule. + +|Node root volume +|Never +|N/A +|Nodes are considered to be short-term. Nothing critical should be stored on a node's root volume. + +|=== + +* Red Hat SRE rehearses recovery processes quarterly. +* Red Hat does not commit to any Recovery Point Objective (RPO) or Recovery Time Objective (RTO). +* Customers should take regular backups of their data. +* Backups performed by SRE are taken as a precautionary measure only. They are stored in the same region as the cluster. +* Customers can access SRE backup data on request by opening a support case. +* Red Hat highly encourages customers to deploy multi-AZ clusters with workloads that follow Kubernetes best practices to ensure high availability within a region. +* In the event an entire cloud region is unavailable, customers must install a new cluster in a different region and restore their apps using their backup data. + + +[id="cluster-capacity_{context}"] +== Cluster capacity +Evaluating and managing cluster capacity is a responsibility that is shared between Red Hat and the customer. Red Hat SRE is responsible for the capacity of all master and infrastructure nodes on the cluster. + +Red Hat SRE also evaluates cluster capacity during upgrades and in response to cluster alerts. The impact of a cluster upgrade on capacity is evaluated as part of the upgrade testing process to ensure that capacity is not negatively impacted by new additions to the cluster. During a cluster upgrade, additional worker nodes are added to make sure that total cluster capacity is maintained during the upgrade process. + +Capacity evaluations by SRE staff also happen in response to alerts from the cluster, once usage thresholds are exceeded for a certain period of time. Such alerts can also result in a notification to the customer. diff --git a/modules/policy-responsibilities.adoc b/modules/policy-responsibilities.adoc new file mode 100644 index 0000000000..b0b2a355ac --- /dev/null +++ b/modules/policy-responsibilities.adoc @@ -0,0 +1,55 @@ + +// Module included in the following assemblies: +// +// * assemblies/policy-responsibility-matrix.adoc + +[id="policy-responsibilities_{context}"] += Overview of responsibilities for {product-title} + + +While Red Hat manages the {product-title} service, the customer shares responsibility with respect to certain aspects. The {product-title} services are accessed remotely, hosted on public cloud resources, created in either Red Hat or customer-owned cloud service provider accounts, and have underlying platform and data security that is owned by Red Hat. + +[IMPORTANT] +==== +If the `cluster-admin` role is enabled on a cluster, see the responsibilities and exclusion notes in the link:https://www.redhat.com/en/about/agreements[Red Hat Enterprise Agreement Appendix 4 (Online Subscription Services)]. +==== + +[cols="3,2a,2a,2a,2a,2a",options="header"] +|=== + +|Resource +|Incident and operations management +|Change management +|Identity and access management +|Security and regulation compliance +|Disaster recovery + +|Customer data |Customer |Customer |Customer |Customer |Customer + +|Customer applications |Customer |Customer |Customer |Customer |Customer + +|Developer services |Customer |Customer |Customer |Customer |Customer + +|Platform monitoring |Red Hat |Red Hat |Red Hat |Red Hat |Red Hat + +|Logging |Red Hat |Shared |Shared |Shared |Red Hat + +|Application networking |Shared |Shared |Shared |Red Hat |Red Hat + +|Cluster networking |Red Hat |Shared |Shared |Red Hat |Red Hat + +|Virtual networking |Shared |Shared |Shared |Shared |Shared + +|Master and infrastructure nodes |Red Hat |Red Hat |Red Hat |Red Hat |Red Hat + +|Worker nodes |Red Hat |Red Hat |Red Hat |Red Hat |Red Hat + +|Cluster version |Red Hat |Shared |Red Hat |Red Hat |Red Hat + +|Capacity management |Red Hat |Shared |Red Hat |Red Hat |Red Hat + +|Virtual storage |Red Hat and cloud provider |Red Hat and cloud provider |Red Hat and cloud provider |Red Hat and cloud provider |Red Hat and cloud provider + +|Physical infrastructure and security |Cloud provider |Cloud provider |Cloud provider |Cloud provider |Cloud provider + +|=== diff --git a/modules/policy-security-regulation-compliance.adoc b/modules/policy-security-regulation-compliance.adoc new file mode 100644 index 0000000000..e950e081b0 --- /dev/null +++ b/modules/policy-security-regulation-compliance.adoc @@ -0,0 +1,72 @@ + +// Module included in the following assemblies: +// +// * assemblies/policy-process-security.adoc + +[id="policy-security-regulation-compliance_{context}"] += Security and regulation compliance + +Security and regulation compliance includes tasks, such as the implementation of security controls and compliance certification. + +[id="data-classification_{context}"] +== Data classification +Red Hat defines and follows a data classification standard to determine the sensitivity of data and highlight inherent risk to the confidentiality and integrity of that data while it is collected, used, transmitted stored, and processed. Customer-owned data is classified at the highest level of sensitivity and handling requirements. + +[id="data-management_{context}"] +== Data management +{product-title} uses cloud provider services to help securely manage keys for encrypted data (AWS KMS and Google Cloud KMS). These keys are used for control plane data volumes which are encrypted by default. Persistent volumes for customer applications also use these cloud services for key management. + +When a customer deletes their {product-title} cluster, all cluster data is permanently deleted, including control plane data volumes, customer application data volumes (PVs), and backup data. + +[id="vulnerability-management_{context}"] +== Vulnerability management +Red Hat performs periodic vulnerability scanning of {product-title} using industry standard tools. Identified vulnerabilities are tracked to their remediation according to timelines based on severity. Vulnerability scanning and remediation activities are documented for verification by third-party assessors in the course of compliance certification audits. + +[id="network-security_{context}"] +== Network security + +[id="firewall_{context}"] +=== Firewall and DDoS protection +Each {product-title} cluster is protected by a secure network configuration at the cloud infrastructure level using firewall rules (AWS Security Groups or Google Cloud Compute Engine firewall rules). {product-title} customers on AWS are also protected against DDoS attacks with link:https://docs.aws.amazon.com/waf/latest/developerguide/ddos-overview.html[AWS Shield Standard]. + +[id="private-clusters_{context}"] +=== Private clusters and network connectivity +Customers can optionally configure their {product-title} cluster endpoints (web console, API, and application router) to be made private so that the cluster control plane or applications are not accessible from the Internet. + +For AWS, customers can configure a private network connection to their {product-title} cluster through AWS VPC peering, AWS VPN, or AWS Direct Connect. + +[NOTE] +==== +At this time, private clusters are not supported for {product-title} clusters on Google Cloud. +==== + +[id="network-access-controls_{context}"] +=== Cluster network access controls +Fine-grained network access control rules can be configured by customers per project by using `NetworkPolicy` objects and the OpenShift SDN. + +[id="penetration-testing_{context}"] +== Penetration testing +Red Hat performs periodic penetration tests against {product-title}. Tests are performed by an independent internal team using industry standard tools and best practices. + +Any issues that are discovered are prioritized based on severity. Any issues found belonging to open source projects are shared with the community for resolution. + +[id="compliance_{context}"] +== Compliance +{product-title} follows common industry best practices for security and controls. The certifications are outlined in the following table. + +.Security and control certifications for {product-title} +[cols= "3,3,3",options="header"] +|=== +| Certification | {product-title} on AWS | {product-title} on GCP + +| ISO 27001 | Yes | Yes + +| PCI DSS | Yes | Yes + +| SOC 1 | Yes | Yes + +| SOC 2 Type 1 | Yes | Yes + +| SOC 2 Type 2 | Yes | Yes + +|=== diff --git a/modules/policy-shared-responsibility.adoc b/modules/policy-shared-responsibility.adoc new file mode 100644 index 0000000000..9dc24f3083 --- /dev/null +++ b/modules/policy-shared-responsibility.adoc @@ -0,0 +1,168 @@ + +// Module included in the following assemblies: +// +// * assemblies/policy-responsibility-matrix.adoc + +[id="policy-shared-responsibility_{context}"] += Shared responsibility matrix + + +The customer and Red Hat share responsibility for the monitoring and maintenance of an {product-title} cluster. This documentation illustrates the delineation of responsibilities by area and task. + +[id="incident-operations-management_{context}"] +== Incident and operations management +The customer is responsible for incident and operations management of customer application data and any custom networking the customer might have configured for the cluster network or virtual network. + +[cols= "2a,3a,3a",options="header"] +|=== + +|Resource +|Red Hat responsibilities +|Customer responsibilities + +|Application networking +|Monitor cloud load balancers and native OpenShift router service, and respond to alerts. +|* Monitor health of service load balancer endpoints +* Monitor health of application routes, and the endpoints behind them. +* Report outages to Red Hat. + +|Virtual networking +|Monitor cloud load balancers, subnets, and public cloud components necessary for default platform networking, and respond to alerts. +|Monitor network traffic that is optionally configured through VPC to VPC connection, VPN connection, or Direct connection for potential issues or security threats. + +|=== + +[id="change-management_{context}"] +== Change management +Red Hat is responsible for enabling changes to the cluster infrastructure and services that the customer will control, as well as maintaining versions for the master nodes, infrastructure nodes and services, and worker nodes. The customer is responsible for initiating infrastructure change requests and installing and maintaining optional services and networking configurations on the cluster, as well as all changes to customer data and customer applications. + +[cols="2a,3a,3a",options="header"] +|=== + +|Resource +|Red Hat responsibilities +|Customer responsibilities + + +|Logging +|* Centrally aggregate and monitor platform audit logs. +* Provide and maintain a logging operator to enable the customer to deploy a logging stack for default application logging. +* Provide audit logs upon customer request. +|* Install the optional default application logging operator on the cluster. +* Install, configure, and maintain any optional app logging solutions, such as logging sidecar containers or third-party logging applications. +* Tune size and frequency of application logs being produced by customer applications if they are affecting the stability of the logging stack or the cluster. +* Request platform audit logs through a support case for researching specific incidents. + +|Application networking +|* Set up public cloud load balancers. Provide the ability to set up private load balancers and up to one additional load balancer when required. +* Set up native OpenShift router service. Provide the ability to set the router as private and add up to one additional router shard. +* Install, configure, and maintain OpenShift SDN components for default internal pod traffic. +* Provide the ability for the customer to manage `NetworkPolicy` and `EgressNetworkPolicy` (firewall) objects. +|* Configure non-default pod network permissions for project and pod networks, pod ingress, and pod egress using `NetworkPolicy` objects. +* Use OpenShift Cluster Manager to request a private load balancer for default application routes. +* Use OpenShift Cluster Manager to configure up to one additional public or private router shard and corresponding load balancer. +* Request and configure any additional service load balancers for specific services. +* Configure any necessary DNS forwarding rules. + +|Cluster networking +|* Set up cluster management components, such as public or private service endpoints and necessary integration with virtual networking components. +* Set up internal networking components required for internal cluster communication between worker, infrastructure, and master nodes. +|* Provide optional non-default IP address ranges for machine CIDR, service CIDR, and pod CIDR if needed through OpenShift Cluster Manager when the cluster is provisioned. +* Request that the API service endpoint be made public or private on cluster creation or after cluster creation through OpenShift Cluster Manager. + +|Virtual networking +|* Set up and configure virtual networking components required to provision the cluster, including virtual private cloud, subnets, load balancers, internet gateways, NAT gateways, etc. +* Provide the ability for the customer to manage VPN connectivity with on-premises resources, VPC to VPC connectivity, and Direct connectivity as required through OpenShift Cluster Manager. +* Enable customers to create and deploy public cloud load balancers for use with service load balancers. +|* Set up and maintain optional public cloud networking components, such as VPC to VPC connection, VPN connection, or Direct connection. +* Request and configure any additional service load balancers for specific services. + +|Cluster version +|* Communicate schedule and status of upgrades for minor and maintenance versions. +* Publish changelogs and release notes for minor and maintenance upgrades. +|* Work with Red Hat to establish maintenance start times for upgrades. +* Test customer applications on minor and maintenance versions to ensure compatibility. + +|Capacity management +|* Monitor utilization of control plane (master nodes and infrastructure nodes). +* Scale or resize control plane nodes to maintain quality of service. +* Monitor utilization of customer resources including Network, Storage and Compute capacity. Where autoscaling features are not enabled alert customer for any changes required to cluster resources (for example, new compute nodes to scale, additional storage, etc). +|* Use the provided OpenShift Cluster Manager controls to add or remove additional worker nodes as required. +* Respond to Red Hat notifications regarding cluster resource requirements. + +|=== + +[id="identity-access-management_{context}"] +== Identity and access management +The Identity and Access Management matrix includes responsibilities for managing authorized access to clusters, applications, and infrastructure resources. This includes tasks such as providing access control mechanisms, authentication, authorization, and managing access to resources. + +[cols="2a,3a,3a",options="header"] +|=== +|Resource +|Red Hat responsibilities +|Customer responsibilities + +|Logging +|* Adhere to an industry standards-based tiered internal access process for platform audit logs. +* Provide native OpenShift RBAC capabilities. +|* Configure OpenShift RBAC to control access to projects and by extension a project’s application logs. +* For third-party or custom application logging solutions, the customer is responsible for access management. + +|Application networking +|Provide native OpenShift RBAC and `dedicated-admin` capabilities. +|* Configure OpenShift dedicated-admins and RBAC to control access to route configuration as required. +* Manage Org Admins for Red Hat organization to grant access to OpenShift Cluster Manager. OCM is used to configure router options and provide service load balancer quota. + +|Cluster networking +|* Provide customer access controls through OpenShift Cluster Manager. +* Provide native OpenShift RBAC and `dedicated-admin` capabilities. +|* Manage Red Hat organization membership of Red Hat accounts. +* Manage Org Admins for Red Hat organization to grant access to OpenShift Cluster Manager. +* Configure OpenShift dedicated-admins and RBAC to control access to route configuration as required. + +|Virtual networking +|Provide customer access controls through OpenShift Cluster Manager. +|Manage optional user access to public cloud components through OpenShift Cluster Manager. + +|=== + +[id="security-regulation-compliance_{context}"] +== Security and regulation compliance +The following are the responsibilities and controls related to compliance: + +[cols="2a,3a,3a",options="header"] +|=== + +|Resource +|Red Hat responsibilities +|Customer responsibilities + +|Logging +|Send cluster audit logs to a Red Hat SIEM to analyze for security events. Retain audit logs for a defined period of time to support forensic analysis. +|Analyze application logs for security events. Send application logs to an external endpoint through logging sidecar containers or third-party logging applications if longer retention is required than is offered by the default logging stack. + +|Virtual networking +|* Monitor virtual networking components for potential issues and security threats. +* Leverage additional public cloud provider tools for additional monitoring and protection. +|* Monitor optionally-configured virtual networking components for potential issues and security threats. +* Configure any necessary firewall rules or data center protections as required. + +|=== + +[id="disaster-recovery_{context}"] +== Disaster recovery +Disaster recovery includes data and configuration backup, replicating data and configuration to the disaster recovery environment, and failover on disaster events. + + +[cols="2a,3a,3a" ,options="header"] +|=== +|Resource +|Red Hat responsibilities +|Customer responsibilities + +|Virtual networking +|Restore or recreate affected virtual network components that are necessary for the platform to function. +|* Configure virtual networking connections with more than one tunnel where possible for protection against outages as recommended by the public cloud provider. +* Maintain failover DNS and load balancing if using a global load balancer with multiple clusters. + +|=== diff --git a/modules/rosa-about.adoc b/modules/rosa-about.adoc new file mode 100644 index 0000000000..0e6f0d3758 --- /dev/null +++ b/modules/rosa-about.adoc @@ -0,0 +1,11 @@ + + +// Module included in the following assemblies: +// +// * cli_reference/rosa_cli/rosa-get-started-cli.adoc + +[id="rosa-about_{context}"] += About the rosa CLI + + +Use the `rosa` command-line utility for {product-title} (ROSA) to create, update, manage, and delete {product-title} clusters and resources. diff --git a/modules/rosa-accessing-your-cluster-quick.adoc b/modules/rosa-accessing-your-cluster-quick.adoc new file mode 100644 index 0000000000..a5d94edf0a --- /dev/null +++ b/modules/rosa-accessing-your-cluster-quick.adoc @@ -0,0 +1,60 @@ + +// Module included in the following assemblies: +// +// getting_started_rosa/rosa-accessing-cluster.adoc + + +[id="rosa-accessing-your-cluster-quick_{context}"] += Accessing your cluster quickly + +You can use this quick access procedure to log in to your cluster. + +[NOTE] +==== +As a best practice, access your cluster with an IDP account instead. +==== + +.Procedure + +. Enter the following command: ++ +[source,terminal] +---- +$ rosa create admin --cluster= +---- ++ +.Example output +[source,terminal] +---- +W: It is recommended to add an identity provider to login to this cluster. See 'rosa create idp --help' for more information. +I: Admin account has been added to cluster 'cluster_name'. It may take up to a minute for the account to become active. +I: To login, run the following command: +oc login https://api.cluster-name.t6k4.i1.oragnization.org:6443 \ +--username cluster-admin \ +--password FWGYL-2mkJI-3ZTTZ-rINns +---- + +. Enter the `oc login` command, username, and password from the output of the previous command: + ++ +.Example output +[source,terminal] +---- +$ oc login https://api.cluster_name.t6k4.i1.oragnization.org:6443 \ +> --username cluster-admin \ +> --password FWGYL-2mkJI-3ZTTZ-rINns +Login successful. You have access to 77 projects, the list has been suppressed. You can list all projects with ' projects' +---- + +. Using the default project, enter this `oc` command to verify that the cluster administrator access is created: ++ +[source,terminal] +---- +$ oc whoami +---- ++ +.Example output +[source,terminal] +---- +cluster-admin +---- diff --git a/modules/rosa-accessing-your-cluster.adoc b/modules/rosa-accessing-your-cluster.adoc new file mode 100644 index 0000000000..37b4ba1f9e --- /dev/null +++ b/modules/rosa-accessing-your-cluster.adoc @@ -0,0 +1,144 @@ + +// Module included in the following assemblies: +// +// getting_started_rosa/rosa-accessing-cluster.adoc + + +[id="rosa-accessing-your-cluster_{context}"] += Accessing your cluster with an IDP account + +To log in to your cluster, you can configure an identity provider (IDP). This procedure uses GitHub as an example IDP. To view other supported IDPs, run the `rosa create idp --help` command. + +[NOTE] +==== +Alternatively, as the user who created the cluster, you can use the quick access procedure. +==== + +.Procedure + +To access your cluster using an IDP account: + +. Add an IDP. +.. The following command creates an IDP backed by GitHub. After running the command, follow the interactive prompts from the output to access your link:https://github.com/settings/developers[GitHub developer settings] and configure a new OAuth application. ++ +[source, terminal] +---- +$ rosa create idp --cluster= --interactive +---- ++ +.. Enter the following values: ++ +-- +* Type of identity provider: `github` +* Restrict to members of: `organizations` (if you do not have a GitHub Organization, you can create one now) +* GitHub organizations: `rh-test-org` (enter the name of your organization) +-- ++ +.Example output +[source,terminal] +---- +I: Interactive mode enabled. +Any optional fields can be left empty and a default will be selected. +? Type of identity provider: github +? Restrict to members of: organizations +? GitHub organizations: rh-test-org +? To use GitHub as an identity provider, you must first register the application: + - Open the following URL: + https://github.com/organizations/rh-rosa-test-cluster/settings/applications/new?oauth_application%5Bcallback_url%5D=https%3A%2F%2Foauth-openshift.apps.rh-rosa-test-cluster.z7v0.s1.devshift.org%2Foauth2callback%2Fgithub-1&oauth_application%5Bname%5D=rh-rosa-test-cluster-stage&oauth_application%5Burl%5D=https%3A%2F%2Fconsole-openshift-console.apps.rh-rosa-test-cluster.z7v0.s1.devshift.org + - Click on 'Register application' +... +---- ++ +.. Follow the URL from the output. This creates a new OAuth application in the GitHub organization you specified. +.. Click *Register application* to access your client ID and client secret. +.. Use the information from the GitHub application you created and continue the prompts. Enter the following values: ++ +-- +* Client ID: `<my_github_client_id>` +* Client Secret: [? for help] `<my_github_client_secret>` +* Hostname: (optional, you can leave it blank for now) +* Mapping method: `claim` +-- ++ +.Continued example output +[source,terminal] +---- +... +? Client ID: +? Client Secret: [? for help] +? Hostname: +? Mapping method: claim +I: Configuring IDP for cluster 'rh_rosa_test_cluster' +I: Identity Provider 'github-1' has been created. You need to ensure that there is a list of cluster administrators defined. See 'rosa create user --help' for more information. To login into the console, open https://console-openshift-console.apps.rh-test-org.z7v0.s1.devshift.org and click on github-1 +---- ++ +The IDP can take 1-2 minutes to be configured within your cluster. +.. Enter the following command to verify that your IDP has been configured correctly: ++ +[source,terminal] +---- +$ rosa list idps --cluster= +---- ++ +.Example output +[source,terminal] +---- +NAME TYPE AUTH URL +github-1 GitHub https://oauth-openshift.apps.rh-rosa-test-cluster1.j9n4.s1.devshift.org/oauth2callback/github-1 +---- ++ +. Log in to your cluster. +.. Enter the following command to get the `Console URL` of your cluster: ++ +[source,terminal] +---- +$ rosa describe cluster --cluster= +---- ++ +.Example output +[source,terminal] +---- +Name: rh-rosa-test-cluster1 +ID: 1de87g7c30g75qechgh7l5b2bha6r04e +External ID: 34322be7-b2a7-45c2-af39-2c684ce624e1 +API URL: https://api.rh-rosa-test-cluster1.j9n4.s1.devshift.org:6443 +Console URL: https://console-openshift-console.apps.rh-rosa-test-cluster1.j9n4.s1.devshift.org +Nodes: Master: 3, Infra: 3, Compute: 4 +Region: us-east-2 +State: ready +Created: May 27, 2020 +---- ++ +.. Navigate to the `Console URL`, and log in using your Github credentials. +.. In the top right of the OpenShift console, click your name and click **Copy Login Command**. +.. Select the name of the IDP you added (in our case **github-1**), and click **Display Token**. +.. Copy and paste the `oc` login command into your terminal. ++ +[source,terminal] +---- +$ oc login --token=z3sgOGVDk0k4vbqo_wFqBQQTnT-nA-nQLb8XEmWnw4X --server=https://api.rh-rosa-test-cluster1.j9n4.s1.devshift.org:6443 +---- ++ +.Example output +[source,terminal] +---- +Logged into "https://api.rh-rosa-cluster1.j9n4.s1.devshift.org:6443" as "rh-rosa-test-user" using the token provided. + +You have access to 67 projects, the list has been suppressed. You can list all projects with 'oc projects' + +Using project "default". +---- +.. Enter a simple `oc` command to verify everything is setup properly and that you are logged in. ++ +[source,terminal] +---- +$ oc version +---- ++ +.Example output +[source,terminal] +---- +Client Version: 4.4.0-202005231254-4a4cd75 +Server Version: 4.3.18 +Kubernetes Version: v1.16.2 +---- diff --git a/modules/rosa-adding-instance-types.adoc b/modules/rosa-adding-instance-types.adoc new file mode 100644 index 0000000000..5a3e4d0ec0 --- /dev/null +++ b/modules/rosa-adding-instance-types.adoc @@ -0,0 +1,49 @@ + + + +// Module included in the following assemblies: +// +// * nodes/nodes/rosa-managing-worker-nodes.adoc + +[id="rosa-adding-instance-types_{context}"] += Adding instance types + + +After a machine pool is created, the instance type cannot be changed. To add a different instance type for worker nodes, you must create a new machine pool for the additional instance type. + +.Procedure + +. To add an instance type with a new machine pool, enter the following command: ++ +[source,terminal] +---- +$ rosa create machinepool --cluster= --instance-type= +---- ++ +This example creates a new machine pool with the instance type `m5.2x.large`, 2 replicas, and labels on a cluster named `mycluster`: ++ +[source,terminal] +---- +$ rosa create machinepool --cluster=mycluster db-nodes-large-mp --replicas=2 --labels=app=db,tier=backend --instance-type=m5.2xlarge +---- ++ +[NOTE] +==== +For a complete list of supported instance types, see the _Policies and Service Definition_. +==== + +. To verify that the machine pool was created with the instance type, enter the following command: ++ +[source,terminal] +---- +$ rosa list machinepools --cluster= +---- ++ +.Example output +[source,terminal] +---- +ID AUTOSCALING REPLICAS INSTANCE TYPE LABELS TAINTS AVAILABILITY ZONES +default No 2 m5.xlarge us-east-1a +db-nodes-mp No 2 m5.xlarge app=db, tier=backend us-east-1a +db-nodes-large-mp No 2 m5.2xlarge app=db, tier=backend us-east-1a +---- diff --git a/modules/rosa-adding-node-labels.adoc b/modules/rosa-adding-node-labels.adoc new file mode 100644 index 0000000000..6bf728d6e8 --- /dev/null +++ b/modules/rosa-adding-node-labels.adoc @@ -0,0 +1,55 @@ +// Module included in the following assemblies: +// +// * nodes/nodes/rosa-managing-worker-nodes.adoc + +[id="rosa-adding-node-labels_{context}"] += Adding node labels + + +Add or edit labels for worker nodes at any time to manage the nodes in a manner that is relevant to you. For example, you can assign types of workloads to specific nodes. + +Labels are assigned as key=value pairs. Each key must be unique to the object it is assigned to. Labels do not change or impact the core system values, such as a machine pool ID. + +[NOTE] +==== +Currently, adding node labels on an existing machine pool adds the labels to only new nodes that are created in that pool. The existing nodes will not have the node label applied. If you need nodes with the node label, you can either scale down the nodes in the machine pool to zero (this will not work with the 'default' machine pool), add the node label, and then scale back up to the desired amount. You can also create a new machine pool and define the node labels at creation time. +==== + +.Procedure + +. To create a new machine pool, add the node labels, and create replica worker nodes, enter the following command: ++ +[source,terminal] +---- +$ rosa create machinepool --cluster= --name= --replicas= --labels= +---- ++ +This example shows how to use labels to assign a database workload to a group of worker nodes, and creates 2 replica worker nodes that you can manage as one unit: ++ +[source,terminal] +---- +$ rosa create machinepool --cluster=mycluster --name=db-nodes-mp --replicas=2 --labels=app=db,tier=backend +---- ++ +.Example output +[source,terminal] +---- +I: Machine pool 'db-nodes-mp' created successfully on cluster 'mycluster' +---- + +.Verification + +. To verify that the machine pool, labels, and replicas were created, enter the following command: ++ +[source,terminal] +---- +$ rosa list machinepools --cluster= +---- ++ +.Example output +[source,terminal] +---- +ID AUTOSCALING REPLICAS INSTANCE TYPE LABELS TAINTS AVAILABILITY ZONES +default No 2 m5.xlarge us-east-1a +db-nodes-mp No 2 m5.xlarge app=db, tier=backend us-east-1a +---- diff --git a/modules/rosa-architecture.adoc b/modules/rosa-architecture.adoc new file mode 100644 index 0000000000..14165055ea --- /dev/null +++ b/modules/rosa-architecture.adoc @@ -0,0 +1,10 @@ +[id="rosa-architecture_{context}"] += ROSA architecture on public and private networks + +You can install ROSA using either a public or private network. Configure a private cluster and private network connection during or after the cluster creation process. +Red Hat manages the cluster with limited access through a public network. For more information, see the Service Definition. + +.ROSA deployed on public and private networks +image::156_OpenShift_ROSA_Arch_0621_arch.svg[ROSA deployed on public and private networks] + +Alternatively, install a cluster using AWS PrivateLink, which is hosted on private subnets only. diff --git a/modules/rosa-aws-iam.adoc b/modules/rosa-aws-iam.adoc new file mode 100644 index 0000000000..85ff44c79d --- /dev/null +++ b/modules/rosa-aws-iam.adoc @@ -0,0 +1,40 @@ + + + +// Module included in the following assemblies: +// +// * assemblies/rosa-aws-prereqs.adoc + +[id="rosa-policy-iam_{context}"] += Red Hat managed IAM references for AWS + + +Red Hat is responsible for creating and managing the following Amazon Web Services (AWS) resources: IAM policies, IAM users, and IAM roles. + +[id="rosa-iam-policies_{context}"] +== IAM Policies + +[NOTE] +==== +IAM policies are subject to modification as the capabilities of {product-title} change. +==== + +* The `AdministratorAccess` policy is used by the administration role. This policy provides Red Hat the access necessary to administer the {product-title} (ROSA) cluster in the customer's AWS account. ++ +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": "*", + "Resource": "*", + "Effect": "Allow" + } + ] +} +---- + +[id="rosa-iam-users_{context}"] +== IAM users + +The `osdManagedAdmin` user is created immediately after installing ROSA into the customer's AWS account. diff --git a/modules/rosa-aws-privatelink-create-cluster.adoc b/modules/rosa-aws-privatelink-create-cluster.adoc new file mode 100644 index 0000000000..74b4797215 --- /dev/null +++ b/modules/rosa-aws-privatelink-create-cluster.adoc @@ -0,0 +1,59 @@ +[id="rosa-aws-privatelink-create-cluster.adoc_{context}"] += Creating an AWS PrivateLink cluster + +You can create an AWS PrivateLink cluster using the `rosa` CLI. + +[NOTE] +==== +AWS PrivateLink is supported on existing VPCs only. +==== + +.Prerequisites + +You have installed {product-title}. + +.Procedure + +Creating a cluster can take up to 40 minutes. + +. With AWS PrivateLink, you can create a cluster with a single availability zone (Single-AZ) or multiple availability zones (Multi-AZ). In either case, your machine's classless inter-domain routing (CIDR) must match your virtual private cloud's CIDR. See https://docs.openshift.com/container-platform/4.7/installing/installing_aws/installing-aws-vpc.html#installation-custom-aws-vpc-requirements_installing-aws-vpc[Requirements for using your own VPC] and link:https://docs.openshift.com/container-platform/4.7/installing/installing_aws/installing-aws-vpc.html#installation-custom-aws-vpc-validation_installing-aws-vpc[VPC Validation] for more information. ++ +[IMPORTANT] +==== +If you use a firewall, you must configure it so that {product-title} can access the sites that it requires to function. + +For more information, see the AWS PrivateLink firewall prerequisites section. +==== + + +** To create a Single-AZ cluster: ++ +[source,terminal] +---- +$ rosa create cluster --private-link --cluster-name= [--machine-cidr=/16] --subnet-ids= +---- +** To create a Multi-AZ cluster: ++ +[source,terminal] +---- +$ rosa create cluster --private-link --multi-az --cluster-name= [--machine-cidr=/16] --subnet-ids=,, +---- + +. Enter the following command to check the status of your cluster. During cluster creation, the `State` field from the output will transition from `pending` to `installing`, and finally to `ready`. ++ +[source, terminal] +---- +$ rosa describe cluster --cluster= +---- ++ +[NOTE] +==== +If installation fails or the `State` field does not change to `ready` after 40 minutes, check the installation troubleshooting documentation for more details. +==== + +. Enter the following command to follow the OpenShift installer logs to track the progress of your cluster: ++ +[source, terminal] +---- +$ rosa logs install --cluster= --watch +---- diff --git a/modules/rosa-aws-procedure.adoc b/modules/rosa-aws-procedure.adoc new file mode 100644 index 0000000000..6b84c6794a --- /dev/null +++ b/modules/rosa-aws-procedure.adoc @@ -0,0 +1,18 @@ + + + +// Module included in the following assemblies: +// +// * assemblies/rosa-aws-prereqs.adoc + +[id="rosa-required-procedure_{context}"] += Required customer procedure + + +Complete these steps before deploying {product-title} (ROSA). + +.Procedure +. If you, as the customer, are utilizing AWS Organizations, then you must use an AWS account within your organization or link:https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html#orgs_manage_accounts_create-new[create a new one]. +. To ensure that Red Hat can perform necessary actions, you must either create a Service Control Policy (SCP) or ensure that none is applied to the AWS account. +. link:https://docs.aws.amazon.com/organizations/latest/userguide/orgs_introduction.html[Attach] the SCP to the AWS account. +. Follow the ROSA procedures for setting up the environment. diff --git a/modules/rosa-aws-provisioned.adoc b/modules/rosa-aws-provisioned.adoc new file mode 100644 index 0000000000..4e9ce9eff7 --- /dev/null +++ b/modules/rosa-aws-provisioned.adoc @@ -0,0 +1,118 @@ +// Module included in the following assemblies: +// +// * rosa_getting_started/rosa-aws-prereqs.adoc + +[id="rosa-aws-policy-provisioned_{context}"] += Provisioned AWS Infrastructure + + +This is an overview of the provisioned Amazon Web Services (AWS) components on a deployed {product-title} (ROSA) cluster. For a more detailed listing of all provisioned AWS components, see the link:https://access.redhat.com/documentation/en-us/openshift_container_platform/[OpenShift Container Platform documentation]. + +[id="rosa-ec2-instances_{context}"] +== EC2 instances + +AWS EC2 instances are required for deploying the control plane and data plane functions of ROSA in the AWS public cloud. + +Instance types can vary for control plane and infrastructure nodes, depending on the worker node count. At a minimum, the following EC2 instances will be deployed: + +- Three `m5.2xlarge` control plane nodes +- Two `r5.xlarge` infrastructure nodes +- Two `m5.xlarge` customizable worker nodes + +For further guidance on worker node counts, see the link to "Initial Planning Considerations" in the "Additional resources" section of this page. + +[id="rosa-ebs-storage_{context}"] +== Elastic Block Storage storage + +Amazon EBS block storage is used for both local node storage and persistent volume storage. + +Volume requirements for each EC2 instance: + +- Control Plane Volume +* Size: 350GB +* Type: io1 +* Input/Output Operations Per Second: 1000 + +- Infrastructure Volume +* Size: 300GB +* Type: gp2 +* Input/Output Operations Per Second: 100 + +- Worker Volume +* Size: 300GB +* Type: gp2 +* Input/Output Operations Per Second: 100 + +[id="rosa-elastic-load-balancers_{context}"] +== Elastic load balancers + +Up to two Network Elastic Load Balancers (ELBs) for API and up to two Classic ELBs for application router. For more information, see the link:https://aws.amazon.com/elasticloadbalancing/features/#Details_for_Elastic_Load_Balancing_Products[ELB documentation for AWS]. + +[id="rosa-s3-storage_{context}"] +== S3 storage +The image registry and Elastic Block Store (EBS) volume snapshots are backed by AWS S3 storage. Pruning of resources is performed regularly to optimize S3 usage and cluster performance. + +[NOTE] +==== +Two buckets are required with a typical size of 2TB each. +==== + +[id="rosa-vpc_{context}"] +== VPC +Customers should expect to see one VPC per cluster. Additionally, the VPC will need the following configurations: + +* *Subnets*: Two subnets for a cluster with a single availability zone, or six subnets for a cluster with multiple availability zones. + +* *Router tables*: One router table per private subnet, and one additional table per cluster. + +* *Internet gateways*: One Internet Gateway per cluster. + +* *NAT gateways*: One NAT Gateway per public subnet. + +[id="rosa-security-groups_{context}"] +== Security groups + +AWS security groups provide security at the protocol and port access level; they are associated with EC2 instances and Elastic Load Balancers. Each security group contains a set of rules that filter traffic coming in and out of an EC2 instance. You must ensure the ports required for the OpenShift installation are open on your network and configured to allow access between hosts. + +[cols="2a,2a,2a,2a",options="header"] +|=== + +|Group +|Type +|IP Protocol +|Port range + + +.4+|MasterSecurityGroup +.4+|`AWS::EC2::SecurityGroup` +|`icmp` +|`0` + +|`tcp` +|`22` + +|`tcp` +|`6443` + +|`tcp` +|`22623` + +.2+|WorkerSecurityGroup +.2+|`AWS::EC2::SecurityGroup` +|`icmp` +|`0` + +|`tcp` +|`22` + + +.2+|BootstrapSecurityGroup +.2+|`AWS::EC2::SecurityGroup` + +|`tcp` +|`22` + +|`tcp` +|`19531` + +|=== diff --git a/modules/rosa-aws-requirements.adoc b/modules/rosa-aws-requirements.adoc new file mode 100644 index 0000000000..00fc55e0fa --- /dev/null +++ b/modules/rosa-aws-requirements.adoc @@ -0,0 +1,56 @@ +// Module included in the following assemblies: +// +// * rosa_getting_started/rosa-aws-prereqs.adoc + +[id="rosa-customer-requirements_{context}"] += Customer Requirements + +{product-title} (ROSA) clusters must meet several prerequisites before they can be deployed. +[NOTE] +==== +In order to create the cluster, the user must be logged in as an IAM user and not an assumed role or STS user. +==== + +[id="rosa-account_{context}"] +== Account +* The customer ensures that the link:https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html[AWS limits] are sufficient to support {product-title} provisioned within the customer's AWS account. +* The customer's AWS account should be in the customer’s AWS Organizations with the applicable Service Control Policy (SCP) applied. ++ +[NOTE] +==== +It is not a requirement that the customer's account be within the AWS Organizations or for the SCP to be applied, however Red Hat must be able to perform all the actions listed in the SCP without restriction. +==== + +* The customer's AWS account should not be transferable to Red Hat. +* The customer may not impose AWS usage restrictions on Red Hat activities. Imposing restrictions will severely hinder Red Hat’s ability to respond to incidents. +* The customer may deploy native AWS services within the same AWS account. ++ +[NOTE] +==== +Customers are encouraged, but not mandated, to deploy resources in a Virtual Private Cloud (VPC) separate from the VPC hosting {product-title} and other Red Hat supported services. +==== + +[id="rosa-access-requirements_{context}"] +== Access requirements +* To appropriately manage the {product-title} service, Red Hat must have the `AdministratorAccess` policy applied to the administrator role at all times. This requirement does *not* apply if you are using AWS Security Token Service (STS). ++ +[NOTE] +==== +This policy only provides Red Hat with permissions and capabilities to change resources in the customer-provided AWS account. +==== +* Red Hat must have AWS console access to the customer-provided AWS account. This access is protected and managed by Red Hat. +* The customer must not utilize the AWS account to elevate their permissions within the {product-title} cluster. +* Actions available in the `rosa` CLI utility or link:https://cloud.redhat.com/openshift[OpenShift Cluster Manager (OCM)] console must not be directly performed in the customer's AWS account. + +[id="rosa-support-requirements_{context}"] +== Support requirements +* Red Hat recommends that the customer have at least link:https://aws.amazon.com/premiumsupport/plans/[Business Support] from AWS. +* Red Hat has authority from the customer to request AWS support on their behalf. +* Red Hat has authority from the customer to request AWS resource limit increases on the customer's account. +* Red Hat manages the restrictions, limitations, expectations, and defaults for all {product-title} clusters in the same manner, unless otherwise specified in this requirements section. + +[id="rosa-security-requirements_{context}"] +== Security requirements +* Volume snapshots will remain within the customer's AWS account and customer-specified region. +* Red Hat must have ingress access to EC2 hosts and the API server from allow-listed IP addresses. +* Red Hat must have egress allowed to forward system and audit logs to a Red Hat managed central logging stack. diff --git a/modules/rosa-aws-scp.adoc b/modules/rosa-aws-scp.adoc new file mode 100644 index 0000000000..bc8401d121 --- /dev/null +++ b/modules/rosa-aws-scp.adoc @@ -0,0 +1,208 @@ +// Module included in the following assemblies: +// +// * rosa_getting_started/rosa-aws-prereqs.adoc + +[id="rosa-minimum-scp_{context}"] +== Minimum required Service Control Policy (SCP) + +Service Control Policy (SCP) management is the responsibility of the customer. These policies are maintained in the AWS Organizations and control what services are available within the attached AWS accounts. + +[NOTE] +==== +The minimum SCP requirement does not apply when using AWS security token service (STS). For more information about STS, see link:https://docs.openshift.com/rosa/rosa_getting_started_sts/rosa-sts-aws-prereqs.html[AWS prerequisites for ROSA with STS]. +==== + +[cols="2a,2a,2a,2a",options="header"] + +|=== +| +| Service +| Actions +| Effect + +.15+| Required +|Amazon EC2 | All |Allow +|Amazon EC2 Auto Scaling | All |Allow +|Amazon S3| All |Allow +|Identity And Access Management | All |Allow +|Elastic Load Balancing | All |Allow +|Elastic Load Balancing V2| All |Allow +|Amazon CloudWatch | All |Allow +|Amazon CloudWatch Events | All |Allow +|Amazon CloudWatch Logs | All |Allow +|AWS Support | All |Allow +|AWS Key Management Service | All |Allow +|AWS Security Token Service | All |Allow +|AWS Resource Tagging | All |Allow +|AWS Route53 DNS | All |Allow +|AWS Service Quotas | ListServices + +GetRequestedServiceQuotaChange + +GetServiceQuota + +RequestServiceQuotaIncrease + +ListServiceQuotas +| Allow + + +.3+|Optional + +| AWS Billing +| ViewAccount + +Viewbilling + +ViewUsage +| Allow + +|AWS Cost and Usage Report +|All +|Allow + +|AWS Cost Explorer Services +|All +|Allow + + +|=== + +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "autoscaling:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "s3:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "iam:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "cloudwatch:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "events:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "logs:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "support:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "kms:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "sts:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "tag:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "route53:*" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "servicequotas:ListServices", + "servicequotas:GetRequestedServiceQuotaChange", + "servicequotas:GetServiceQuota", + "servicequotas:RequestServiceQuotaIncrease", + "servicequotas:ListServiceQuotas" + ], + "Resource": [ + "*" + ] + } + ] +} + +---- diff --git a/modules/rosa-aws-understand.adoc b/modules/rosa-aws-understand.adoc new file mode 100644 index 0000000000..45df1bfe0f --- /dev/null +++ b/modules/rosa-aws-understand.adoc @@ -0,0 +1,14 @@ + + + +// Module included in the following assemblies: +// +// * assemblies/rosa-aws-prereqs.adoc + +[id="rosa-aws-prereqs_{context}"] += Deployment Prerequisites +To deploy {product-title} (ROSA) into your existing Amazon Web Services (AWS) account, Red Hat requires that several prerequisites are met. + +Red Hat recommends the usage of AWS Organizations to manage multiple AWS accounts. The AWS Organizations, managed by the customer, host multiple AWS accounts. There is a root account in the organization that all accounts will refer to in the account hierarchy. + +It is a best practice for the ROSA cluster to be hosted in an AWS account within an AWS Organizational Unit. A Service Control Policy (SCP) is created and applied to the AWS Organizational Unit that manages what services the AWS sub-accounts are permitted to access. The SCP applies only to available permissions within a single AWS account for all AWS sub-accounts within the Organizational Unit. It is also possible to apply a SCP to a single AWS account. All other accounts in the customer’s AWS Organizations are managed in whatever manner the customer requires. Red Hat Site Reliability Engineers (SRE) will not have any control over SCPs within AWS Organizations. diff --git a/modules/rosa-checking-account-version-info-cli.adoc b/modules/rosa-checking-account-version-info-cli.adoc new file mode 100644 index 0000000000..ca98bebf07 --- /dev/null +++ b/modules/rosa-checking-account-version-info-cli.adoc @@ -0,0 +1,78 @@ +// Module included in the following assemblies: +// +// * cli_reference/rosa_cli/rosa-checking-acct-version-cli.adoc + +[id="rosa-checking-account-version-information_{context}"] += Checking account and version information with the rosa CLI + +Use the following commands to check your account and version information. + +[id="rosa-whoami_{context}"] +== whoami + +Display information about your AWS and Red Hat accounts. + +.Syntax +[source,terminal] +---- +$ rosa whoami [arguments] +---- + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v level +|Log level for V logs. +|=== + +.Example +[source,terminal] +---- +$ rosa whoami +---- + +[id="rosa-version_{context}"] +== version + +Display the version of your `rosa` CLI. + +.Syntax +[source,terminal] +---- +$ rosa version [arguments] +---- + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v level +|Log level for V logs. +|=== + +.Example +[source,terminal] +---- +$ rosa version +---- diff --git a/modules/rosa-common-commands.adoc b/modules/rosa-common-commands.adoc new file mode 100644 index 0000000000..0dd2e8fbf1 --- /dev/null +++ b/modules/rosa-common-commands.adoc @@ -0,0 +1,83 @@ + +// Module included in the following assemblies: +// +// * cli_reference/rosa_cli/rosa-manage-objects-cli.adoc + +[id="rosa-common-commands_{context}"] += Common commands and arguments + + +These common commands and arguments are available for the `rosa` CLI. + +[id="rosa-debug_{context}"] +== debug + +Enables debug mode for the parent command. + +.Example +[source,terminal] +---- +$ rosa create cluster --cluster= --debug +---- + +[id="rosa-help_{context}"] +== help + +Displays general help information for the `rosa` CLI and a list of available commands. This option can also be used as an argument to display help information for a parent command, such as `version` or `create`. + +.Examples +Displays general help for the `rosa` CLI: +[source,terminal] +---- +$ rosa --help +---- + +Displays general help for `version`: +[source,terminal] +---- +$ rosa version --help +---- + +[id="rosa-interactive_{context}"] +== interactive + +Enables interactive mode. + +.Example +[source,terminal] +---- +$ rosa create cluster --cluster= --interactive +---- + +[id="rosa-profile-string_{context}"] +== profile + +Specifies an AWS profile from your credential file. + +.Example +[source,terminal] +---- +$ rosa create cluster --cluster= --profile=myAWSprofile +---- + +[id="rosa-vlevel_{context}"] +== v level + +Specifies the log level for V logs. + +.Example +[source,terminal] +---- +$ rosa create cluster --cluster= --v +---- + +[id="rosa-version_{context}"] +== version + +Displays the `rosa` version. + +.Example +[source,terminal] +---- +$ rosa version [arguments] +---- diff --git a/modules/rosa-configure.adoc b/modules/rosa-configure.adoc new file mode 100644 index 0000000000..cdf4158ec8 --- /dev/null +++ b/modules/rosa-configure.adoc @@ -0,0 +1,271 @@ + +// Module included in the following assemblies: +// +// * cli_reference/rosa_cli/rosa-get-started-cli.adoc + +[id="rosa-configure_{context}"] += Configuring the rosa CLI + + +Use the following commands to configure the `rosa` CLI. + +[id="rosa-login_{context}"] +== login + +Log in to your Red Hat account, saving the credentials to the `rosa` configuration file. You must provide a token when logging in. You can copy your token from link:https://cloud.redhat.com/openshift/token/rosa[the {product-title} token page]. + +The `rosa` CLI looks for a token in the following priority order: + +. Command-line arguments +. The `ROSA_TOKEN` environment variable +. The `rosa` configuration file +. Interactively from a command-line prompt + +.Syntax +[source,terminal] +---- +$ rosa login [arguments] +---- + +.Arguments +[cols="30,70"] +|=== +|Option |Definition + +|--client-id +|The OpenID client identifier (string). Default: `cloud-services` + +|--client-secret +|The OpenID client secret (string). + +|--insecure +|Enables insecure communication with the server. This disables verification of TLS certificates and host names. + +|--scope +|The OpenID scope (string). If this option is used, it replaces the default scopes. This can be repeated multiple times to specify multiple scopes. Default: `openid` + +|--token +|Accesses or refreshes the token (string). + +|--token-url +|The OpenID token URL (string). Default: `\https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token` +|=== + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. +|=== + +[id="rosa-logout_{context}"] +== logout + +Log out of `rosa`. Logging out also removes the `rosa` configuration file. + +.Syntax +[source,terminal] +---- +$ rosa logout [arguments] +---- + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. +|=== + +[id="rosa-verify-permissions_{context}"] +== verify permissions + +Verify that the AWS permissions required to create a `rosa` cluster are configured correctly. + +.Syntax +[source,terminal] +---- +$ rosa verify permissions [arguments] +---- + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--region +|The AWS region (string) in which to run the command. This value overrides the `AWS_REGION` environment variable. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. +|=== + +.Examples +Verify that the AWS permissions are configured correctly: +[source,terminal] +---- +$ rosa verify permissions +---- + +Verify that the AWS permissions are configured correctly in a specific region: + +[source,terminal] +---- +$ rosa verify permissions --region=us-west-2 +---- + +[id="rosa-verify-quota_{context}"] +== verify quota + +Verifies that AWS quotas are configured correctly for your default region. + +.Syntax +[source,terminal] +---- +$ rosa verify quota [arguments] +---- + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--region +|The AWS region (string) in which to run the command. This value overrides the `AWS_REGION` environment variable. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. +|=== + +.Examples +Verify that the AWS quotas are configured correctly for the default region: + +[source,terminal] +---- +$ rosa verify quota +---- + +Verify that the AWS quotas are configured correctly in a specific region: + +[source,terminal] +---- +$ rosa verify quota --region=us-west-2 +---- + +[id="rosa-download-ocp-client_{context}"] +== download oc + +Download the latest compatible version of the OpenShift Container Platform CLI (`oc`). + +After downloading `oc`, you must unzip the archive and add it to your path. + +.Syntax +[source,terminal] +---- +$ rosa download oc [arguments] +---- + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. +|=== + +.Example +Download `oc` client tools: + +[source,terminal] +---- +$ rosa download oc +---- + +[id="rosa-verify-ocp-client_{context}"] +== verify oc + +Verifies that the OpenShift Container Platform CLI (`oc`) is installed correctly. + +.Syntax +[source,terminal] +---- +$ rosa verify oc [arguments] +---- + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--region +|The AWS region (string) in which to run the command. This value overrides the AWS_REGION environment variable. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. +|=== + +.Example +Verify `oc` client tools: + +[source,terminal] +---- +$ rosa verify oc +---- diff --git a/modules/rosa-configuring-aws-account.adoc b/modules/rosa-configuring-aws-account.adoc new file mode 100644 index 0000000000..1db94cdcf2 --- /dev/null +++ b/modules/rosa-configuring-aws-account.adoc @@ -0,0 +1,76 @@ + +// Module included in the following assemblies: +// +// * rosa-getting-started/rosa-config-aws-account.adoc + + +[id="rosa-configuring-aws-account_{context}"] += Configuring your AWS account + +To configure your AWS account to use the ROSA service, complete the following steps. + +.Prerequisites + +* Review and complete the deployment prerequisites and policies. +* Create a link:https://cloud.redhat.com[Red Hat account], if you do not already have one. Then, check your email for a verification link. You will need these credentials to install ROSA. + +.Procedure + +. Log in to the Amazon Web Services (AWS) account that you want to use. ++ +A dedicated AWS account is recommended to run production clusters. If you are using AWS Organizations, you can use an AWS account within your organization or link:https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html#orgs_manage_accounts_create-new[create a new one]. ++ +If you are using AWS Organizations and you need to have a Service Control Policy (SCP) applied to the AWS account you plan to use, see AWS Prerequisites for details on the minimum required SCP. ++ +As part of the cluster creation process, `rosa` establishes an `osdCcsAdmin` IAM user. This user uses the IAM credentials you provide when configuring the AWS CLI. ++ +[NOTE] +==== +This user has `Programmatic` access enabled and the `AdministratorAccess` policy attached to it. +==== ++ +. Enable the ROSA service in the AWS Console. +.. Sign in to your link:https://console.aws.amazon.com/rosa/home[AWS account]. +.. To enable ROSA, go to the link:https://console.aws.amazon.com/rosa/[ROSA service] and select *Enable OpenShift*. + +. Install and configure the AWS CLI. +.. Follow the AWS command-line interface documentation to link:https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html[install] and link:https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html[configure] the AWS CLI for your operating system. ++ +Specify the correct `aws_access_key_id` and `aws_secret_access_key` in the `.aws/credentials` file. See link:https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html[AWS Configuration basics] in the AWS documentation. + +.. Set a default AWS region. ++ +[NOTE] +==== +It is recommended to set the default AWS region by using the environment variable. +==== ++ +The ROSA service evaluates regions in the following priority order: ++ +... The region specified when running a `rosa` command with the `--region` flag. +... The region set in the `AWS_DEFAULT_REGION` environment variable. See link:https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html[Environment variables to configure the AWS CLI] in the AWS documentation. +... The default region set in your AWS configuration file. See link:https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html#cli-configure-quickstart-config[Quick configuration with aws configure] in the AWS documentation. +.. Optional: Configure your AWS CLI settings and credentials by using an AWS named profile. `rosa` evaluates AWS named profiles in the following priority order: +... The profile specified when running a `rosa` command with the `--profile` flag. +... The profile set in the `AWS_PROFILE` environment variable. See link:https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html[Named profiles] in the AWS documentation. +.. Verify the AWS CLI is installed and configured correctly by running the following command to query the AWS API: ++ +[source,terminal] +---- +$ aws sts get-caller-identity +---- ++ +.Example output +[source,terminal] +---- +--------------------------------------------------------------------------------- +| GetCallerIdentity | ++-------------------------------------------------------------------------------+ +|+-----------------------------------+-----------------------+-----------------+| +|| Account | Arn | UserID || +|+-----------------------------------+-----------------------+-----------------+| +|| | arn:aws:iam:user:name | || +|+-----------------------------------+-----------------------+-----------------+| +---- ++ +After completing these steps, install ROSA. diff --git a/modules/rosa-containers-concept.adoc b/modules/rosa-containers-concept.adoc new file mode 100644 index 0000000000..e48d180164 --- /dev/null +++ b/modules/rosa-containers-concept.adoc @@ -0,0 +1,18 @@ + +// Module included in the following assemblies: +// +// understanding-rosa/rosa-understanding.adoc + + +[id="rosa-containers-concept_{context}"] += Containers + +Containers provide a standard way to package your application code, configurations, and dependencies into a single unit. Containers run as isolated processes on compute hosts and share the host operating system and its hardware resources. A container can be moved between environments and run without changes. Unlike virtual machines, containers do not virtualize a device, its operating system, and the underlying hardware. Only the app code, run time, system tools, libraries, and settings are packaged inside the container. This approach makes a container more lightweight, portable, and efficient than a virtual machine. + +Built on existing Linux container technology (LXC), the OCI-compliant container images define templates for how to package software into standardized units that include all of the elements that an app needs to run. {product-title} (ROSA) uses CRI-O as the container runtime to deploy containers to your cluster. + +To run your app in Kubernetes on ROSA, you must first containerize your app by creating a container image that you store in a container registry. + +Image:: A container image is the base for every container that you want to run. Container images are built from a Dockerfile, a text file that defines how to build the image and which build artifacts to include in it, such as the app, the app configuration, and its dependencies. Images are always built from other images, making them quick to configure. + +Registry:: An image registry is a place to store, retrieve, and share container images. Images that are stored in a registry can either be publicly available (public registry) or accessible by a small group of users (private registry). ROSA offers public images that you can use to create your first containerized app. For enterprise applications, use a private registry to protect your images from being used by unauthorized users. diff --git a/modules/rosa-create-cluster-admins.adoc b/modules/rosa-create-cluster-admins.adoc new file mode 100644 index 0000000000..7fabfc65ed --- /dev/null +++ b/modules/rosa-create-cluster-admins.adoc @@ -0,0 +1,62 @@ + +// Module included in the following assemblies: +// +// getting_started_rosa/rosa-creating-cluster.adoc + + +[id="rosa-create-cluster-admins"] += Granting `cluster-admin` access + +As the user who created the cluster, add the `cluster-admin` user role to your account to have the maximum administrator privileges. These privileges are not automatically assigned to your user account when you create the cluster. + +Additionally, only the user who created the cluster can grant cluster access to other `cluster-admin` or `dedicated-admin` users. Users with `dedicated-admin` access have fewer privileges. As a best practice, limit the number of `cluster-admin` users to as few as possible. + +.Prerequisites + +* You have added an identity provider (IDP) to your cluster. +* You have the IDP user name for the user you are creating. +* You are logged in to the cluster. + +.Procedure + +. Give your user `cluster-admin` privileges: ++ +[source,terminal] +---- +$ rosa grant user cluster-admin --user= --cluster= +---- ++ +. Verify your user is listed as a cluster administrator: ++ +[source,terminal] +---- +$ rosa list users --cluster= +---- ++ +.Example output +[source,terminal] +---- +GROUP NAME +cluster-admins rh-rosa-test-user +dedicated-admins rh-rosa-test-user +---- ++ +. Enter the following command to verify that your user now has `cluster-admin` access. A cluster administrator can run this command without errors, but a dedicated administrator cannot. ++ +[source,terminal] +---- +$ oc get all -n openshift-apiserver +---- ++ +.Example output +[source,terminal] +---- +NAME READY STATUS RESTARTS AGE +pod/apiserver-6ndg2 1/1 Running 0 17h +pod/apiserver-lrmxs 1/1 Running 0 17h +pod/apiserver-tsqhz 1/1 Running 0 17h +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/api ClusterIP 172.30.23.241 443/TCP 18h +NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE +daemonset.apps/apiserver 3 3 3 3 3 node-role.kubernetes.io/master= 18h +---- diff --git a/modules/rosa-create-dedicated-cluster-admins.adoc b/modules/rosa-create-dedicated-cluster-admins.adoc new file mode 100644 index 0000000000..21000381e5 --- /dev/null +++ b/modules/rosa-create-dedicated-cluster-admins.adoc @@ -0,0 +1,43 @@ + +// Module included in the following assemblies: +// +// getting_started_rosa/rosa-creating-cluster.adoc + + +[id="rosa-create-dedicated-cluster-admins"] += Granting `dedicated-admin` access +Only the user who created the cluster can grant cluster access to other `cluster-admin` or `dedicated-admin` users. Users with `dedicated-admin` access have fewer privileges. As a best practice, grant `dedicated-admin` access to most of your administrators. + +.Prerequisites + +* You have added an identity provider (IDP) to your cluster. +* You have the IDP user name for the user you are creating. +* You are logged in to the cluster. + +.Procedure + +. Enter the following command to promote your user to a `dedicated-admin`: ++ +[source,terminal] +---- +$ rosa grant user dedicated-admin --user= --cluster= +---- ++ +. Enter the following command to verify that your user now has `dedicated-admin` access: ++ +[source,terminal] +---- +$ oc get groups dedicated-admins +---- ++ +.Example output +[source,terminal] +---- +NAME USERS +dedicated-admins rh-rosa-test-user +---- ++ +[NOTE] +==== +A `Forbidden` error displays if user without `dedicated-admin` privileges runs this command. +==== diff --git a/modules/rosa-create-objects.adoc b/modules/rosa-create-objects.adoc new file mode 100644 index 0000000000..08639367e8 --- /dev/null +++ b/modules/rosa-create-objects.adoc @@ -0,0 +1,500 @@ + +// Module included in the following assemblies: +// +// * cli_reference/rosa_cli/rosa-manage-objects-cli.adoc + +[id="rosa-create-objects_{context}"] += Create objects + + +This section describes the `create` commands for clusters and resources. + +[id="rosa-create-admin_{context}"] +== create admin + +Create a cluster administrator with an automatically generated password that can log in to a cluster. + +.Syntax +[source,terminal] +---- +$ rosa create admin --cluster= | +---- + +.Arguments +[cols="30,70"] +|=== +|Option |Definition + +|--cluster +|Required: The name or ID (string) of the cluster to add to the identity provider (IDP). +|=== + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--interactive +|Enables interactive mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. +|=== + +.Example +Create a cluster administrator that can log in to a cluster named `mycluster`: + +[source,terminal] +---- +$ rosa create admin --cluster=mycluster +---- + +[id="rosa-create-cluster_{context}"] +== create cluster + +Create a new cluster. + +.Syntax +[source,terminal] +---- +$ rosa create cluster --cluster= | [arguments] +---- + +.Arguments +[cols="30,70"] +|=== +|Option |Definition + +|--cluster +|Required: The name or ID (string) of the cluster. When used with the `create cluster` command, this argument is used to generate a sub-domain for your cluster on `openshiftapps.com`. + +|--channel-group +|The channel group (string) is the name of the group where this image belongs, for example `stable` or `fast`. Default: `stable` + +|--compute-machine-type +|The instance type (string) for the compute nodes. Determines the amount of memory and vCPU that are allocated to each compute node. + +|--compute-nodes +|The number (integer) of worker nodes to provision per zone. Single-zone clusters require at least 2 nodes. Multi-zone clusters require at least 3 nodes. Default: `2` for single-az; `3` for multi-az + +|--disable-scp-checks +|Indicates whether cloud permission checks are disabled when attempting to install a cluster. + +|--dry-run +|Simulates creating the cluster. + +|--enable-autoscaling +|Enables autoscaling of compute nodes. By default, autoscaling is set to `2` nodes. To set non-default node limits, use this argument with the `--min-replicas` and `--max-replicas` arguments. + +|--host-prefix +|The subnet prefix length (integer) to assign to each individual node. For example, if host prefix is set to `23`, then each node is assigned a `/23` subnet out of the given CIDR. + +|--machine-cidr +|Block of IP addresses (ipNet) used by OpenShift Container Platform while installing the cluster. Example: `10.0.0.0/16` + +|--max-replicas +|Specifies the maximum number of compute nodes when enabling autoscaling. Default: `2` + +|--min-replicas +|Specifies the minimum number of compute nodes when enabling autoscaling. Default: `2` + +|--multi-az +|Deploys to multiple data centers. + +|--pod-cidr +|Block of IP addresses (ipNet) from which pod IP addresses are allocated. Example: `10.128.0.0/14` + +|--private +|Restricts master API endpoint and application routes to direct, private connectivity. + +|--private-link +| Specifies to use AWS PrivateLink to provide private connectivity between VPCs and services. The `--subnet-ids` argument is required when using `--private-link`. + +|--region +|The AWS region (string) where your worker pool will be located. This argument overrides the `AWS_REGION` environment variable. + +|--service-cidr +|Block of IP addresses (ipNet) for services. Example: `172.30.0.0/16` + +|--subnet-ids +|The subnet IDs (string) to use when installing the cluster. Subnet IDs must be in pairs with one private subnet ID and one public subnet ID per availability zone. Subnets are comma-delimited. Example: `--subnet-ids=subnet-1,subnet-2`. Leave the value empty for installer-provisioned subnet IDs. + + +When using `--private-link`, the `--subnet-ids` argument is required and only one private subnet is allowed per zone. + +|--version +|The version (string) of OpenShift Container Platform that will be used to install the cluster. Example: `4.3.10` +|=== + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--interactive +|Enables interactive mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. +|=== + +.Examples +Create a cluster named `mycluster`: + +[source,terminal] +---- +$ rosa create cluster --cluster=mycluster +---- + +Create a cluster with a specific AWS region: + +[source,terminal] +---- +$ rosa create cluster --cluster=mycluster --region=us-east-2 +---- + +Create a cluster with autoscaling enabled on the default worker machine pool: + +[source,terminal] +---- +$ rosa create cluster --cluster=mycluster -region=us-east-1 --enable-autoscaling --min-replicas=2 --max-replicas=5 +---- + +[id="rosa-create-idp_{context}"] +== create idp + +Add an identity provider (IDP) to define how users log in to a cluster. + +.Syntax +[source,terminal] +---- +$ rosa create idp --cluster= | [arguments] +---- + +.Arguments +[cols="30,70"] +|=== +|Option |Definition + +|--cluster +|Required: The name or ID (string) of the cluster to which the IDP will be added. + +|--ca +|The path (string) to the PEM-encoded certificate file to use when making requests to the server. + +|--client-id +|The client ID (string) from the registered application. + +|--client-secret +|The client secret (string) from the registered application. + +|--mapping-method +|Specifies how new identities (string) are mapped to users when they log in. Default: `claim` + +|--name +|The name (string) for the identity provider. + +|--type +|The type (string) of identity provider. Options: `github`, `gitlab`, `google`, `ldap`, `openid` +|=== + +.GitHub arguments +[cols="30,70"] +|=== +|Option |Definition + +|--hostname +|The optional domain (string) to use with a hosted instance of GitHub Enterprise. + +|--organizations +|Specifies the organizations for login access. Only users that are members of at least one of the listed organizations (string) are allowed to log in. + +|--teams +|Specifies the teams for login access. Only users that are members of at least one of the listed teams (string) are allowed to log in. The format is `/`. +|=== + +.GitLab arguments +[cols="30,70"] +|=== +|Option |Definition + +|--host-url +|The host URL (string) of a GitLab provider. Default: `https://gitlab.com` +|=== + +.Google arguments +[cols="30,70"] +|=== +|Option |Definition + +|--hosted-domain +|Restricts users to a Google Apps domain (string). +|=== + +.LDAP arguments +[cols="30,70"] +|=== +|Option |Definition + +|--bind-dn +|The domain name (string) to bind with during the search phase. + +|--bind-password +|The password (string) to bind with during the search phase. + +|--email-attributes +|The list (string) of attributes whose values should be used as the email address. + +|--id-attributes +|The list (string) of attributes whose values should be used as the user ID. Default: `dn` + +|--insecure +|Does not make TLS connections to the server. + +|--name-attributes +|The list (string) of attributes whose values should be used as the display name. Default: `cn` + +|--url +|An RFC 2255 URL (string) which specifies the LDAP search parameters to use. + +|--username-attributes +|The list (string) of attributes whose values should be used as the preferred username. Default: `uid` +|=== + +.OpenID arguments +[cols="30,70"] +|=== +|Option |Definition + +|--email-claims +|The list (string) of claims to use as the email address. + +|--extra-scopes +|The list (string) of scopes to request, in addition to the `openid` scope, during the authorization token request. + +|--issuer-url +|The URL (string) that the OpenID provider asserts as the issuer identifier. It must use the HTTPS scheme with no URL query parameters or fragment. + +|--name-claims +|The list (string) of claims to use as the display name. + +|--username-claims +|The list (string) of claims to use as the preferred username when provisioning a user. +|=== + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--interactive +|Enables interactive mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. +|=== + +.Examples +Add a GitHub identity provider to a cluster named `mycluster`: + +[source,terminal] +---- +$ rosa create idp --type=github --cluster=mycluster +---- + +Add an identity provider following interactive prompts: + +[source,terminal] +---- +$ rosa create idp --cluster=mycluster --interactive +---- + +[id="rosa-create-ingress_{context}"] +== create ingress + +Add an ingress endpoint to enable API access to the cluster. + +.Syntax +[source,terminal] +---- +$ rosa create ingress --cluster= | [arguments] +---- + +.Arguments +[cols="30,70"] +|=== +|Option |Definition + +|--cluster +|Required: The name or ID (string) of the cluster to which the ingress will be added. + +|--label-match +|The label match (string) for ingress. The format must be a comma-delimited list of key=value pairs. If no label is specified, all routes are exposed on both routers. + +|--private +|Restricts application route to direct, private connectivity. +|=== + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--interactive +|Enables interactive mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. +|=== + +.Examples +Add an internal ingress to a cluster named `mycluster`: + +[source,terminal] +---- +$ rosa create ingress --private --cluster=mycluster +---- + +Add a public ingress to a cluster named `mycluster`: + +[source,terminal] +---- +$ rosa create ingress --cluster=mycluster +---- + +Add an ingress with a route selector label match:s + +[source,terminal] +---- +$ rosa create ingress --cluster=mycluster --label-match=foo=bar,bar=baz +---- + +[id="rosa-create-machinepool_{context}"] +== create machinepool + +Add a machine pool to an existing cluster. + +.Syntax +[source,terminal] +---- +$ rosa create machinepool --cluster= | --replicas= --name= [arguments] +---- + +.Arguments +[cols="30,70"] +|=== +|Option |Definition + +|--cluster +|Required: The name or ID (string) of the cluster to which the machine pool will be added. + +|--enable-autoscaling +|Enable or disable autoscaling of compute nodes. To enable autoscaling, use this argument with the `--min-replicas` and `--max-replicas` arguments. To disable autoscaling, use `--enable-autoscaling=false` with the `--replicas` argument. + +|--instance-type +|The instance type (string) that should be used. Default: `m5.xlarge` + +|--labels +|The labels (string) for the machine pool. The format must be a comma-delimited list of key=value pairs. This list overwrites any modifications made to node labels on an ongoing basis. + +|--max-replicas +|Specifies the maximum number of compute nodes when enabling autoscaling. + +|--min-replicas +|Specifies the minimum number of compute nodes when enabling autoscaling. + +|--name +|Required: The name (string) for the machine pool. + +|--replicas +|Required when autoscaling is not configured. The number (integer) of machines for this machine pool. + +|--taints +|Taints for the machine pool. This string value should be formatted as a comma-separated list of `key=value:ScheduleType`. This list will overwrite any modifications made to Node taints on an ongoing basis. +|=== + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--interactive +|Enables interactive mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. +|=== + +.Examples +Interactively add a machine pool to a cluster named `mycluster`: + +[source,terminal] +---- +$ rosa create machinepool --cluster=mycluster --interactive +---- + +Add a machine pool that is named `mp-1` to a cluster with autoscaling enabled: + +[source,terminal] +---- +$ rosa create machinepool --cluster=mycluster --enable-autoscaling --min-replicas=2 --max-replicas=5 --name=mp-1 +---- + +Add a machine pool that is named `mp-1` with 3 replicas of `m5.xlarge` to a cluster: + +[source,terminal] +---- +$ rosa create machinepool --cluster=mycluster --replicas=3 --instance-type=m5.xlarge --name=mp-1 +---- + +Add a machine pool with labels to a cluster: + +[source,terminal] +---- +$ rosa create machinepool --cluster=mycluster --replicas=2 --instance-type=r5.2xlarge --labels=foo=bar,bar=baz --name=mp-1 +---- diff --git a/modules/rosa-creating-cluster.adoc b/modules/rosa-creating-cluster.adoc new file mode 100644 index 0000000000..3f24eadd95 --- /dev/null +++ b/modules/rosa-creating-cluster.adoc @@ -0,0 +1,96 @@ +// Module included in the following assemblies: +// +// getting_started_rosa/rosa-creating-cluster.adoc + +[id="rosa-creating-cluster_{context}"] += Creating your cluster + +You can create an {product-title} cluster using the `rosa` CLI. + +.Prerequisites + +You have installed {product-title}. + +[NOTE] +==== +link:https://docs.aws.amazon.com/vpc/latest/userguide/vpc-sharing.html[AWS Shared VPCs] are not currently supported for ROSA installs. +==== + +.Procedure + +. You can create a cluster using the default settings or by specifying custom settings using the interactive mode. To view other options when creating a cluster, enter `rosa create cluster --help`. ++ +Creating a cluster can take up to 40 minutes. ++ +[NOTE] +==== +Multiple availability zones (AZ) are recommended for production workloads. The default is a single availability zone. Use `--help` for an example of how to set this option manually or use interactive mode to be prompted for this setting. +==== ++ +* To create your cluster with the default cluster settings: ++ +[source, terminal] +---- +$ rosa create cluster --cluster-name= +---- ++ +.Example output +[source,terminal] +---- +I: Creating cluster with identifier '1de87g7c30g75qechgh7l5b2bha6r04e' and name 'rh-rosa-test-cluster1' +I: To view list of clusters and their status, run `rosa list clusters` +I: Cluster 'rh-rosa-test-cluster1' has been created. +I: Once the cluster is 'Ready' you will need to add an Identity Provider and define the list of cluster administrators. See `rosa create idp --help` and `rosa create user --help` for more information. +I: To determine when your cluster is Ready, run `rosa describe cluster rh-rosa-test-cluster1`. +---- +* To create a cluster using interactive prompts: ++ +[source, terminal] +---- +$ rosa create cluster --interactive +---- +* To configure your networking IP ranges, you can use the following default ranges. For more information when using manual mode, use `rosa create cluster --help | grep cidr`. In interactive mode, you are prompted for the settings. ++ +** Node CIDR: 10.0.0.0/16 +** Service CIDR: 172.30.0.0/16 +** Pod CIDR: 10.128.0.0/14 + +. Enter the following command to check the status of your cluster. During cluster creation, the `State` field from the output will transition from `pending` to `installing`, and finally to `ready`. ++ +[source, terminal] +---- +$ rosa describe cluster --cluster= +---- ++ +.Example output +[source,terminal] +---- +Name: rh-rosa-test-cluster1 +OpenShift Version: 4.6.8 +DNS: *.example.com +ID: uniqueidnumber +External ID: uniqueexternalidnumber +AWS Account: 123456789101 +API URL: https://api.rh-rosa-test-cluster1.example.org:6443 +Console URL: https://console-openshift-console.apps.rh-rosa-test-cluster1.example.or +Nodes: Master: 3, Infra: 2, Compute: 2 +Region: us-west-2 +Multi-AZ: false +State: ready +Channel Group: stable +Private: No +Created: Jan 15 2021 16:30:55 UTC +Details Page: https://cloud.redhat.com/examplename/details/idnumber +---- ++ +[NOTE] +==== +If installation fails or the `State` field does not change to `ready` after 40 minutes, check the installation troubleshooting documentation for more details. +==== + +. Track the progress of the cluster creation by watching the OpenShift installer logs: ++ +[source, terminal] +---- +$ rosa logs install --cluster= --watch +---- diff --git a/modules/rosa-delete-cluster-admins.adoc b/modules/rosa-delete-cluster-admins.adoc new file mode 100644 index 0000000000..086d535369 --- /dev/null +++ b/modules/rosa-delete-cluster-admins.adoc @@ -0,0 +1,31 @@ + +// Module included in the following assemblies: +// +// getting_started_rosa/rosa-creating-cluster.adoc + + +[id="rosa-delete-cluster-admins"] += Revoking `cluster-admin` access +Only the user who created the cluster can revoke access for `cluster-admin` users. + +.Prerequisites + +* You have added an Identity Provider (IDP) to your cluster. +* You have the IDP user name for the user whose privileges you are revoking. +* You are logged in to the cluster. + +.Procedure + +. Revoke the user `cluster-admin` privileges: ++ +[source,terminal] +---- +$ rosa revoke user --cluster= --cluster-admins= +---- ++ +. Verify your user is no longer listed as a `cluster-admin`: ++ +[source,terminal] +---- +$ rosa list users --cluster= +---- diff --git a/modules/rosa-delete-dedicated-admins.adoc b/modules/rosa-delete-dedicated-admins.adoc new file mode 100644 index 0000000000..78b357b8f3 --- /dev/null +++ b/modules/rosa-delete-dedicated-admins.adoc @@ -0,0 +1,36 @@ + +// Module included in the following assemblies: +// +// getting_started_rosa/rosa-creating-cluster.adoc + + +[id="rosa-delete-dedicated-admins"] += Revoking `dedicated-admin` access +Only the user who created the cluster can revoke access for a `dedicated-admin` users. + +.Prerequisites + +* You have added an Identity Provider (IDP) to your cluster. +* You have the IDP user name for the user whose privileges you are revoking. +* You are logged in to the cluster. + +.Procedure + +. Enter the following command to revoke access for a `dedicated-admin`: ++ +[source,terminal] +---- +$ rosa revoke user dedicated-admin --user= --cluster= +---- ++ +. Enter the following command to verify that your user no longer has `dedicated-admin` access. The user will not be listed in the output. ++ +[source,terminal] +---- +$ oc get groups dedicated-admins +---- ++ +[NOTE] +==== +A `Forbidden` error displays if user without `dedicated-admin` privileges runs this command. +==== diff --git a/modules/rosa-delete-objects.adoc b/modules/rosa-delete-objects.adoc new file mode 100644 index 0000000000..b6f5028f5c --- /dev/null +++ b/modules/rosa-delete-objects.adoc @@ -0,0 +1,276 @@ +// Module included in the following assemblies: +// +// * cli_reference/rosa_cli/rosa-manage-objects-cli.adoc + +[id="rosa-delete-objects_{context}"] += Delete objects + + +This section describes the `delete` commands for clusters and resources. + +[id="rosa-delete-admin_{context}"] +== delete admin + +Deletes a cluster administrator from a specified cluster. + +.Syntax +[source,terminal] +---- +$ rosa delete admin --cluster= | +---- + +.Arguments +[cols="30,70"] +|=== +|Option |Definition + +|--cluster +|Required: The name or ID (string) of the cluster to add to the identity provider (IDP). +|=== + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--interactive +|Enables interactive mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. +|=== + +.Example +Delete a cluster administrator from a cluster named `mycluster`. + +[source,terminal] +---- +$ rosa delete admin --cluster=mycluster +---- + +[id="rosa-delete-cluster_{context}"] +== delete cluster + +Deletes a cluster. + +.Syntax +[source,terminal] +---- +$ rosa delete cluster --cluster= | [arguments] +---- + +.Arguments +[cols="30,70"] +|=== +|Option |Definition + +|--cluster +|Required: The name or ID (string) of the cluster to delete. + +|--watch +|Watches the cluster uninstallation logs. +|=== + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--interactive +|Enables interactive mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. + +|--yes +|Automatically answers `yes` to confirm the operation. +|=== + +.Examples +Delete a cluster named `mycluster`. + +[source,terminal] +---- +$ rosa delete cluster --cluster=mycluster +---- + +[id="rosa-delete-idp_{context}"] +== delete idp + +Deletes a specific identity provider (IDP) from a cluster. + +.Syntax +[source,terminal] +---- +$ rosa delete idp --cluster= | [arguments] +---- + +.Arguments +[cols="30,70"] +|=== +|Option |Definition + +|--cluster +|Required: The name or ID (string) of the cluster from which the IDP will be deleted. +|=== + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--interactive +|Enables interactive mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. + +|--yes +|Automatically answers `yes` to confirm the operation. +|=== + +.Example +Delete an identity provider named `github` from a cluster named `mycluster`. + +[source,terminal] +---- +$ rosa delete idp github --cluster=mycluster +---- + +[id="rosa-delete-ingress_{context}"] +== delete ingress + +Deletes a non-default application router (ingress) from a cluster. + +.Syntax +[source,terminal] +---- +$ rosa delete ingress --cluster= | [arguments] +---- + +.Arguments +[cols="30,70"] +|=== +|Option |Definition + +|--cluster +|Required: The name or ID (string) of the cluster from which the ingress will be deleted. +|=== + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--interactive +|Enables interactive mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. + +|--yes +|Automatically answers `yes` to confirm the operation. +|=== + +.Examples +Delete an ingress with the ID `a1b2` from a cluster named `mycluster`. + +[source,terminal] +---- +$ rosa delete ingress --cluster=mycluster a1b2 +---- + +Delete a secondary ingress with the subdomain name `apps2` from a cluster named `mycluster`. + +[source,terminal] +---- +$ rosa delete ingress --cluster=mycluster apps2 +---- + +[id="rosa-delete-machinepool_{context}"] +== delete machinepool + +Deletes a machine pool from a cluster. + +.Syntax +[source,terminal] +---- +$ rosa delete machinepool --cluster= | +---- + +.Arguments +[cols="30,70"] +|=== +|Option |Definition + +|--cluster +|Required: The name or ID (string) of the cluster that the machine pool will be deleted from. +|=== + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--interactive +|Enables interactive mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. + +|--yes +|Automatically answers `yes` to confirm the operation. +|=== + +.Example +Delete the machine pool with the ID `mp-1` from a cluster named `mycluster`. + +[source,terminal] +---- +$ rosa delete machinepool --cluster=mycluster mp-1 +---- diff --git a/modules/rosa-deleting-aws-resources-aws-console.adoc b/modules/rosa-deleting-aws-resources-aws-console.adoc new file mode 100644 index 0000000000..efe22ab67f --- /dev/null +++ b/modules/rosa-deleting-aws-resources-aws-console.adoc @@ -0,0 +1,51 @@ +// Module included in the following assemblies: +// +// * rosa_getting_started_sts/rosa-sts-deleting-cluster.adoc + +[id="rosa-deleting-aws-resources-aws-console_{context}"] += Deleting the AWS resources by using the AWS IAM Console + +After deleting a {product-title} (ROSA) cluster, you can delete the AWS Security Token Service (STS) resources by using the AWS Identity and Access Management (IAM) Console. + +[IMPORTANT] +==== +Account-wide IAM roles and policies might be used by other ROSA clusters in the same AWS account. You must only remove the resources if they are not required by other clusters. +==== + +.Prerequisites + +* You have deleted your ROSA cluster. For more information, see the _Deleting a cluster_ section. ++ +[IMPORTANT] +==== +You must delete the cluster before you remove the IAM roles and policies. The account-wide roles are required to delete the resources created by the installer. The cluster-specific Operator roles are required to clean-up the resources created by the OpenShift Operators. +==== + +.Procedure + +. Log in to the link:https://console.aws.amazon.com/iamv2/home#/home[AWS IAM Console]. + +. Delete the OpenID Connect (OIDC) provider that you created for Operator authentication in your cluster: +.. Navigate to *Access management* -> *Identity providers* and click on the OIDC resource that you created to authenticate the cluster Operators. +.. In the dialog page for the resource, select *Delete* to delete the OIDC provider. + +. Delete the cluster-specific Operator IAM roles: ++ +[TIP] +==== +The IAM role and policy names include the role prefix that is specified when the STS resources are created. The default prefix is `ManagedOpenShift`. +==== ++ +.. Navigate to *Access management* -> *Roles* and click on one of the cluster-specific Operator roles that you created for your cluster. +.. In the dialog page for the resource, select *Delete role* to delete the role. Select *Yes, delete* to confirm the role deletion. +.. Repeat this step to delete each of the cluster-specific Operator roles for the cluster. + +. Delete the account-wide Operator policies that you created for ROSA deployments that use STS: +.. Navigate to *Access management* -> *Policies* and click on one of the Operator policies. +.. In the dialog page for the resource, select *Delete policy* to delete the policy. Select *Delete* to confirm the policy deletion. +.. Repeat this step to delete each of the Operator policies. + +. Delete the account-wide IAM roles and inline policies that you created for ROSA deployments that use STS: +.. Navigate to *Access management* -> *Roles* and click on one of the account-wide roles. +.. In the dialog page for the resource, select *Delete role* to delete the role. Select *Yes, delete* to confirm the role deletion. +.. Repeat this step to delete each of the account-wide roles for the cluster. diff --git a/modules/rosa-deleting-aws-resources-cli.adoc b/modules/rosa-deleting-aws-resources-cli.adoc new file mode 100644 index 0000000000..ac7c9740e6 --- /dev/null +++ b/modules/rosa-deleting-aws-resources-cli.adoc @@ -0,0 +1,100 @@ +// Module included in the following assemblies: +// +// * rosa_getting_started_sts/rosa-sts-deleting-cluster.adoc + +[id="rosa-deleting-aws-resources-cli_{context}"] += Deleting the AWS resources by using the CLI + +After deleting a {product-title} (ROSA) cluster, you can delete the AWS Security Token Service (STS) resources by using the CLI. + +[IMPORTANT] +==== +Account-wide Identity Access Management (IAM) roles and policies might be used by other ROSA clusters in the same AWS account. You must only remove the resources if they are not required by other clusters. +==== + +.Prerequisites + +* You have installed and configured the latest AWS CLI on your installation host. +* You have deleted your ROSA cluster. For more information, see the _Deleting a cluster_ section. ++ +[IMPORTANT] +==== +You must delete the cluster before you remove the IAM roles and policies. The account-wide roles and policies are required to delete the resources created by the installer. The Operator roles and policies are required to clean-up the resources created by the OpenShift Operators. +==== + +.Procedure + +. Delete the OpenID Connect (OIDC) provider that you created for Operator authentication in your cluster: ++ +[source,terminal] +---- +$ aws iam delete-open-id-connect-provider --open-id-connect-provider-arn <1> +---- +<1> Replace `` with the Amazon Resource Name (ARN) of the OpenID Connect (OIDC) resource that you created to authenticate the cluster Operators. You can run `$ aws iam list-open-id-connect-providers` to list the OIDC providers in your account. + +. Delete the cluster-specific Operator IAM roles: +.. List the account-wide Operator policy that is attached to one of the cluster-specific IAM roles: ++ +[source,terminal] +---- +$ aws iam list-attached-role-policies --role-name <1> +---- +<1> Replace `` with the name of a cluster-specific Operator role that you created for the cluster. Specify the role name and not the full ARN. You can run `$ aws iam list-roles` to list the roles in your account. ++ +[TIP] +==== +The IAM role and policy names include the role prefix that is specified when the STS resources are created. The default prefix is `ManagedOpenShift`. +==== ++ +.. Detach the policy from role: ++ +[source,terminal] +---- +$ aws iam detach-role-policy --role-name --policy-arn <1> +---- +<1> Replace `` with the ARN of the attached Operator policy. ++ +.. Delete the role: ++ +[source,terminal] +---- +$ aws iam delete-role --role-name +---- ++ +.. Repeat the steps to delete each of the cluster-specific Operator roles for the cluster. + +. Delete the account-wide Operator policies that you created for ROSA deployments that use STS. The following command deletes a single policy: ++ +[source,terminal] +---- +$ aws iam delete-policy --policy-arn <1> +---- +<1> Replace `` with the ARN of one of the Operator policies. You can list the policies in your account by running `$ aws iam list-policies`. ++ +Repeat this step to delete each of the Operator policies. + +. Delete the account-wide IAM roles and inline policies that you created for ROSA deployments that use STS: +.. List the inline policy for one of the account-wide IAM roles: ++ +[source,terminal] +---- +$ aws iam list-role-policies --role-name <1> +---- +<1> Replace `` with the name of one of the account-wide IAM roles. Specify the role name and not the full ARN. You can run `$ aws iam list-roles` to list the roles in your account. ++ +.. Delete the inline policy: ++ +[source,terminal] +---- +$ aws iam delete-role-policy --role-name --policy-name <1> +---- +<1> Replace `` with the policy name that is included in the output of the preceding command. ++ +.. Delete the role: ++ +[source,terminal] +---- +$ aws iam delete-role --role-name +---- ++ +.. Repeat the steps to delete each of the account-wide roles. diff --git a/modules/rosa-deleting-cluster.adoc b/modules/rosa-deleting-cluster.adoc new file mode 100644 index 0000000000..f0cb8625b1 --- /dev/null +++ b/modules/rosa-deleting-cluster.adoc @@ -0,0 +1,37 @@ +// Module included in the following assemblies: +// +// * rosa_getting_started/rosa-deleting-cluster.adoc +// * rosa_getting_started_sts/rosa-sts-deleting-cluster.adoc + +ifeval::["{context}" == "rosa-sts-deleting-cluster"] +:sts: +endif::[] + +[id="rosa-deleting-cluster_{context}"] += Deleting a cluster + +You can delete an {product-title} cluster using the `rosa` CLI. + +If add-ons are installed, the deletion takes longer because add-ons are uninstalled before the cluster is deleted. The amount of time depends on the number and size of the add-ons. + +.Procedure + +. Enter the following command to delete a cluster and watch the logs, replacing `` with the name or ID of your cluster: ++ +[source, terminal] +---- +$ rosa delete cluster --cluster= --watch +---- + +ifndef::sts[] +. To clean up your CloudFormation stack, enter the following command: ++ +[source, terminal] +---- +$ rosa init --delete-stack +---- +endif::sts[] + +ifeval::["{context}" == "rosa-sts-deleting-cluster"] +:!sts: +endif::[] diff --git a/modules/rosa-disabling-autoscaling-nodes.adoc b/modules/rosa-disabling-autoscaling-nodes.adoc new file mode 100644 index 0000000000..7776e47f2d --- /dev/null +++ b/modules/rosa-disabling-autoscaling-nodes.adoc @@ -0,0 +1,27 @@ + +// Module included in the following assemblies: +// +// rosa-nodes/rosa-disabling-autoscaling-nodes.adoc + +[id="rosa-disabling-autoscaling_{context}"] += Disabling autoscaling nodes in an existing cluster using the rosa CLI + +Disable autoscaling for worker nodes in the machine pool definition. + +.Procedure + +. Enter the following command: ++ +[source,terminal] +---- +$ rosa edit machinepool --cluster= --enable-autoscaling=false --replicas= +---- ++ +.Example ++ +Disable autoscaling on the `default` machine pool on a cluster named `mycluster`: ++ +[source,terminal] +---- +$ rosa edit machinepool --cluster=mycluster default --enable-autoscaling=false --replicas=3 +---- diff --git a/modules/rosa-edit-objects.adoc b/modules/rosa-edit-objects.adoc new file mode 100644 index 0000000000..6d5f00dfd9 --- /dev/null +++ b/modules/rosa-edit-objects.adoc @@ -0,0 +1,228 @@ + +// Module included in the following assemblies: +// +// * cli_reference/rosa_cli/rosa-manage-objects-cli.adoc + +[id="rosa-edit-objects_{context}"] += Edit objects + + +This section describes the `edit` commands for clusters and resources. + +[id="rosa-edit-cluster_{context}"] +== edit cluster + +Allows edits to an existing cluster. + +.Syntax +[source,terminal] +---- +$ rosa edit cluster --cluster= | [arguments] +---- + +.Arguments +[cols="30,70"] +|=== +|Option |Definition + +|--cluster +|Required: The name or ID (string) of the cluster to edit. + +|--private +|Restricts a master API endpoint to direct, private connectivity. + +|=== + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--interactive +|Enables interactive mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. +|=== + +.Examples +Edit a cluster named `mycluster` to make it private. + +[source,terminal] +---- +$ rosa edit cluster --cluster=mycluster --private +---- + +Edit all cluster options interactively on a cluster named `mycluster`. + +[source,terminal] +---- +$ rosa edit cluster --cluster=mycluster --interactive +---- + +[id="rosa-edit-ingress_{context}"] +== edit ingress + +Edits the additional non-default application router for a cluster. + +.Syntax +[source,terminal] +---- +$ rosa edit ingress --cluster= | [arguments] +---- + +.Arguments +[cols="30,70"] +|=== +|Option |Definition + +|--cluster +|Required: The name or ID (string) of the cluster to which the ingress will be added. + +|--label-match +|The label match (string) for ingress. The format must be a comma-delimited list of key=value pairs. If no label is specified, all routes are exposed on both routers. + +|--private +|Restricts the application route to direct, private connectivity. +|=== + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--interactive +|Enables interactive mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. +|=== + +.Examples + +Make an additional ingress with the ID `a1b2` as a private connection on a cluster named `mycluster`. +[source,terminal] +---- +$ rosa edit ingress --private --cluster=mycluster a1b2 +---- + +Update the router selectors for the additional ingress with the ID `a1b2` on a cluster named `mycluster`. + +[source,terminal] +---- +$ rosa edit ingress --label-match=foo=bar --cluster=mycluster a1b2 +---- + +Update the default ingress using the sub-domain identifier `apps` on a cluster named `mycluster`. + +[source,terminal] +---- +$ rosa edit ingress --private=false --cluster=mycluster apps +---- + +[id="rosa-edit-machinepool_{context}"] +== edit machinepool + +Allows edits to the machine pool in a cluster. + +.Syntax +[source,terminal] +---- +$ rosa edit machinepool --cluster= | [arguments] +---- + +.Arguments +[cols="30,70"] +|=== +|Option |Definition + +|--cluster +|Required: The name or ID (string) of the cluster to edit on which the additional machine pool will be edited. + +|--enable-autoscaling +|Enable or disable autoscaling of compute nodes. To enable autoscaling, use this argument with the `--min-replicas` and `--max-replicas` arguments. To disable autoscaling, use `--enable-autoscaling=false` with the `--replicas` argument. + +|--labels +|The labels (string) for the machine pool. The format must be a comma-delimited list of key=value pairs. Editing this value only affects newly created nodes of the machine pool, which are created by increasing the node number, and does not affect the existing nodes. This list overwrites any modifications made to node labels on an ongoing basis. + +|--max-replicas +|Specifies the maximum number of compute nodes when enabling autoscaling. + +|--min-replicas +|Specifies the minimum number of compute nodes when enabling autoscaling. + +|--replicas +|Required when autoscaling is not configured. The number (integer) of machines for this machine pool. + +|--taints +|Taints for the machine pool. This string value should be formatted as a comma-separated list of `key=value:ScheduleType`. Editing this value only affect newly created nodes of the machine pool, which are created by increasing the node number, and does not affect the existing nodes. This list overwrites any modifications made to Node taints on an ongoing basis. +|=== + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--interactive +|Enables interactive mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. +|=== + +.Examples + +Set 4 replicas on a machine pool named `mp1` on a cluster named `mycluster`. + +[source,terminal] +---- +$ rosa edit machinepool --cluster=mycluster --replicas=4 --name=mp1 +---- + +Enable autoscaling on a machine pool named `mp1` on a cluster named `mycluster`. + +[source,terminal] +---- +$ rosa edit machinepool --cluster-name=mycluster --enable-autoscaling --min-replicas=3 --max-replicas=5 --name=mp1 +---- + +Disable autoscaling on a machine pool named `mp1` on a cluster named `mycluster`. + +[source,terminal] +---- +$ rosa edit machinepool --cluster-name=mycluster --enable-autoscaling=false --replicas=3 --name=mp1 +---- + +Modify the autoscaling range on a machine pool named `mp1` on a cluster named `mycluster`. + +[source,terminal] +---- +$ rosa edit machinepool --max-replicas=9 --cluster=mycluster --name=mp1 +---- diff --git a/modules/rosa-enable-private-cluster-existing.adoc b/modules/rosa-enable-private-cluster-existing.adoc new file mode 100644 index 0000000000..ec9e25569a --- /dev/null +++ b/modules/rosa-enable-private-cluster-existing.adoc @@ -0,0 +1,33 @@ + +// Module included in the following assemblies: +// +// cloud_infrastructure_access/rosa-private-cluster.adoc + + +[id="rosa-enabling-private-cluster-existing_{context}"] += Enabling private cluster on an existing cluster + +After a cluster has been created, you can later enable the cluster to be private. + +[IMPORTANT] +==== +Private clusters cannot be used with AWS security token service (STS). However, STS supports AWS PrivateLink clusters. +==== + +.Prerequisites + +AWS VPC Peering, VPN, DirectConnect, or link:https://docs.aws.amazon.com/whitepapers/latest/aws-vpc-connectivity-options/aws-transit-gateway.html[TransitGateway] has been configured to allow private access. + +.Procedure + +Enter the following command to enable the `--private` option on an existing cluster. + +[source, terminal] +---- +$ rosa edit cluster --cluster-name= --private +---- + +[NOTE] +==== +Transitioning your cluster between private and public can take several minutes to complete. +==== diff --git a/modules/rosa-enable-private-cluster-new.adoc b/modules/rosa-enable-private-cluster-new.adoc new file mode 100644 index 0000000000..89625aeb16 --- /dev/null +++ b/modules/rosa-enable-private-cluster-new.adoc @@ -0,0 +1,33 @@ + +// Module included in the following assemblies: +// +// cloud_infrastructure_access/rosa-private-cluster.adoc + + +[id="rosa-enabling-private-cluster-new_{context}"] += Enabling private cluster on a new cluster + +You can enable the private cluster setting when creating a new {product-title} cluster. + +[IMPORTANT] +==== +Private clusters cannot be used with AWS security token service (STS). However, STS supports AWS PrivateLink clusters. +==== + +.Prerequisites + +AWS VPC Peering, VPN, DirectConnect, or link:https://docs.aws.amazon.com/whitepapers/latest/aws-vpc-connectivity-options/aws-transit-gateway.html[TransitGateway] has been configured to allow private access. + +.Procedure + +Enter the following command to create a new private cluster. + +[source, terminal] +---- +$ rosa create cluster --cluster-name= --private +---- + +[NOTE] +==== +Alternatively, use `--interactive` to be prompted for each cluster option. +==== diff --git a/modules/rosa-enabling-autoscaling-nodes.adoc b/modules/rosa-enabling-autoscaling-nodes.adoc new file mode 100644 index 0000000000..708e70b024 --- /dev/null +++ b/modules/rosa-enabling-autoscaling-nodes.adoc @@ -0,0 +1,47 @@ + +// Module included in the following assemblies: +// +// rosa-nodes/rosa-enabling-autoscaling-nodes.adoc + +[id="rosa-enabling-autoscaling-nodes_{context}"] += Enabling autoscaling nodes in an existing cluster using the rosa CLI + +Configure autoscaling to dynamically scale the number of worker nodes up or down based on load. + +Successful autoscaling is dependent on having the correct AWS resource quotas in your AWS account. Verify resource quotas and request quota increases from the link:https://aws.amazon.com/console/[AWS console]. + +.Procedure + +. To identify the machine pool IDs in a cluster, enter the following command: ++ +[source,terminal] +---- +$ rosa list machinepools --cluster= +---- ++ +.Example output ++ +[source,terminal] +---- +ID AUTOSCALING REPLICAS INSTANCE TYPE LABELS TINTS AVAILABILITY ZONES +default No 2 m5.xlarge us-east-1a +mp1 No 2 m5.xlarge us-east-1a +---- ++ +. Get the ID of the machine pools that you want to configure. + +. To enable autoscaling on a machine pool, enter the following command: ++ +[source,terminal] +---- +$ rosa edit machinepool --cluster= --enable-autoscaling --min-replicas= --max-replicas= +---- ++ +.Example ++ +Enable autoscaling on a machine pool with the ID `mp1` on a cluster named `mycluster`, with the number of replicas set to scale between 2 and 5 worker nodes: ++ +[source,terminal] +---- +$ rosa edit machinepool --cluster=mycluster mp1 --enable-autoscaling --min-replicas=2 --max-replicas=5 +---- diff --git a/modules/rosa-getting-support.adoc b/modules/rosa-getting-support.adoc new file mode 100644 index 0000000000..2266c7b93f --- /dev/null +++ b/modules/rosa-getting-support.adoc @@ -0,0 +1,22 @@ + +// Module included in the following assemblies: +// +// support/rosa-getting-support.adoc + +[id="rosa-getting-support_{context}"] += Getting support + +If you experience difficulty with a procedure described in this documentation, visit the link:https://access.redhat.com/[Red Hat Customer Portal]. Through the Customer Portal, you can: + +* Search or browse through the Red Hat Knowledgebase of technical support articles about Red Hat products. +* Access other product documentation. +* Submit a support case to Red Hat Support: +.. Click *Open a New Case*. +.. Select the reason for the support ticket, such as `Defect/Bug` or `Account/Customer Service Request`. +.. In the `Product` field, enter `OpenShift` to filter the list. Select `{product-title}` and the version from the drop-down menus. +.. Complete the remaining fields. +.. On the _Review_ page, select the correct cluster ID that you are contacting support about, and click `Submit`. + +You can also get support from link:https://aws.amazon.com/premiumsupport/[AWS Support] as long as you have a valid AWS support contract. + +If you have a suggestion for improving this documentation or have found an error, submit a link:https://bugzilla.redhat.com/index.cgi[Bugzilla] report against the OpenShift Container Platform product for the Documentation component. Be sure to provide specific details, such as the section name and {product-title} version. diff --git a/modules/rosa-initialize.adoc b/modules/rosa-initialize.adoc new file mode 100644 index 0000000000..e2e1e46920 --- /dev/null +++ b/modules/rosa-initialize.adoc @@ -0,0 +1,94 @@ + +// Module included in the following assemblies: +// +// * cli_reference/rosa_cli/rosa-get-started-cli.adoc + +[id="rosa-initialize_{context}"] += Initializing {product-title} + + +Use the `init` command to initialize {product-title} (ROSA). + +[id="rosa-init_{context}"] +== init + +Perform a series of checks to verify that you are ready to deploy an {product-title} cluster. + +The list of checks includes the following: + +* Checks to see that you have logged in (see `login`) +* Checks that your AWS credentials are valid +* Checks that your AWS permissions are valid (see `verify permissions`) +* Checks that your AWS quota levels are high enough (see `verify quota`) +* Runs a cluster simulation to ensure cluster creation will perform as expected +* Checks that the `osdCcsAdmin` user has been created in your AWS account +* Checks that the OpenShift Container Platform command-line tool is available on your system + +.Syntax +[source,terminal] +---- +$ rosa init [arguments] +---- + +.Arguments +[cols="30,70"] +|=== +|Option |Definition + +|--region +|The AWS region (string) in which to verify quota and permissions. This value overrides the `AWS_REGION` environment variable only when running the `init` command, but it does not change your AWS CLI configuration. + +|--delete-stack +|Deletes the stack template that is applied to your AWS account during the `init` command. + +|--client-id +|The OpenID client identifier (string). Default: `cloud-services` + +|--client-secret +|The OpenID client secret (string). + +|--insecure +|Enables insecure communication with the server. This disables verification of TLS certificates and host names. + +|--scope +|The OpenID scope (string). If this option is used, it completely replaces the default scopes. This can be repeated multiple times to specify multiple scopes. Default: `openid` + +|--token +|Accesses or refreshes the token (string). + +|--token-url +|The OpenID token URL (string). Default: `\https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token` +|=== + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. +|=== + +.Examples +Configure your AWS account to allow ROSA clusters: + +[source,terminal] +---- +$ rosa init +---- + +Configure a new AWS account using pre-existing OpenShift Cluster Manager (OCM) credentials: + +[source,terminal] +---- +$ rosa init --token=$OFFLINE_ACCESS_TOKEN +---- diff --git a/modules/rosa-install-logging-addon.adoc b/modules/rosa-install-logging-addon.adoc new file mode 100644 index 0000000000..4872819df8 --- /dev/null +++ b/modules/rosa-install-logging-addon.adoc @@ -0,0 +1,106 @@ + +// Module included in the following assemblies: +// +// logging/rosa-install-logging.adoc + +[id="rosa-install-logging-addon_{context}"] += Install the logging add-on service + +{product-title} (ROSA) provides logging through the `cluster-logging-operator` add-on. This add-on service offers an optional application log forwarding solution based on AWS CloudWatch. This logging solution can be installed after the ROSA cluster is provisioned. + +.Procedure + +. Enter the following command: ++ +[source,terminal] +---- +$ rosa install addon cluster-logging-operator --cluster= --interactive +---- ++ +For ``, enter the name of your cluster. + +. When prompted, accept the default `yes` to install the `cluster-logging-operator`. +. When prompted, accept the default `yes` to install the optional Amazon CloudWatch log forwarding add-on or enter `no` to decline the installation of this add-on. ++ +[NOTE] +==== +It is not necessary to install the AWS CloudWatch service when you install the `cluster-logging-operator`. You can install the AWS CloudWatch service at any time through the OpenShift Cluster Manager (OCM) console from the cluster's *Add-ons* tab. +==== +. For the collection of applications, infrastructure, and audit logs, accept the default values or change them as needed: ++ +* *Applications logs*: Lets the Operator collect application logs, which includes everything that is _not_ deployed in the openshift-*, kube-*, and default namespaces. Default: `yes` +* *Infrastructure logs*: Lets the Operator collect logs from OpenShift Container Platform, Kubernetes, and some nodes. Default: `yes` +* *Audit logs*: Type `yes` to let the Operator collect node logs related to security audits. By default, Red Hat stores audit logs outside the cluster through a separate mechanism that does not rely on the Cluster Logging Operator. For more information about default audit logging, see the ROSA Service Definition. Default: `no` + +. For the Amazon CloudWatch region, use the default cluster region, leave the `Cloudwatch region` value empty. ++ +.Example output +[source,terminal] +---- +? Are you sure you want to install add-on 'cluster-logging-operator' on cluster ''? Yes +? Use AWS CloudWatch (optional): Yes +? Collect Applications logs (optional): Yes +? Collect Infrastructure logs (optional): Yes +? Collect Audit logs (optional): No +? CloudWatch region (optional): +I: Add-on 'cluster-logging-operator' is now installing. To check the status run 'rosa list addons --cluster=' +---- + +[NOTE] +==== +The installation can take approximately 10 minutes to complete. +==== + +.Verification steps + +. To verify the logging installation status, enter the following command: ++ +[source,terminal] +---- +$ rosa list addons --cluster= +---- + +. To verify which pods are deployed by `cluster-logging-operator` and their state of readiness: + +.. Log in to the `oc` CLI using `cluster-admin` credentials: ++ +[source,terminal] +---- +$ oc login https://api.mycluster.abwp.s1.example.org:6443 \ + --username cluster-admin + --password +---- + +.. Enter the following command to get information about the pods for the default project. Alternatively, you can specify a different project. ++ +[source,terminal] +---- +$ oc get pods -n openshift-logging +---- ++ +.Example output ++ +[source,terminal] +---- +NAME READY STATUS RESTARTS AGE +cluster-logging-operator- 2/2 Running 0 7m1s +fluentd-4mnwp 1/1 Running 0 6m3s +fluentd-6xt25 1/1 Running 0 6m3s +fluentd-fqjhv 1/1 Running 0 6m3s +fluentd-gcvrg 1/1 Running 0 6m3s +fluentd-vpwrt 1/1 Running 0 6m3s +---- + +. Optional: To get information about the `clusterlogging` instance, enter the following command: ++ +[source,terminal] +---- +$ oc get clusterlogging -n openshift-logging +---- + +. Optional: To get information about `clusterlogforwarders` instances, enter the following command: ++ +[source,terminal] +---- +$ oc get clusterlogforwarders -n openshift-logging +---- diff --git a/modules/rosa-install-uninstall-addon.adoc b/modules/rosa-install-uninstall-addon.adoc new file mode 100644 index 0000000000..023e6cb83a --- /dev/null +++ b/modules/rosa-install-uninstall-addon.adoc @@ -0,0 +1,112 @@ +// Module included in the following assemblies: +// +// * cli_reference/rosa_cli/rosa-manage-objects-cli.adoc + +[id="rosa-install-uninstall-addon_{context}"] += Install and uninstall add-ons + + +This section describes how to install and uninstall Red Hat managed service add-ons to a cluster. + +[id="rosa-install-addon_{context}"] +== install addon + +Installs a managed service add-on on a cluster. + +.Syntax +[source,terminal] +---- +$ rosa install addon --cluster= | [arguments] +---- + +.Arguments +[cols="30,70"] +|=== +|Option |Definition + +|--cluster +|Required: The name or ID (string) of the cluster where the add-on will be installed. +|=== + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--profile +|Uses a specific AWS profile (string) from your credentials file. + +|--v level +|Log level for V logs. + +|--yes +|Automatically answers `yes` to confirm the operation. +|=== + +.Example +Add the `codeready-workspaces` add-on installation to a cluster named `mycluster`. + +[source,terminal] +---- +$ rosa install addon --cluster=mycluster codeready-workspaces +---- + +[NOTE] +==== +After installing Red Hat CodeReady Workspace, it can be deployed to any namespace except `openshift-workspaces`. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_codeready_workspaces/2.10/html/installation_guide/installing-codeready-workspaces_crw#creating-a-project-in-openshift-web-console_crw[Installing the Red Hat CodeReady Workspaces Operator]. +==== + +[id="rosa-uninstall-addon_{context}"] +== uninstall addon + +Uninstalls a managed service add-on from a cluster. + +.Syntax +[source,terminal] +---- +$ rosa uninstall addon --cluster= | [arguments] +---- + +.Arguments +[cols="30,70"] +|=== +|Option |Definition + +|--cluster +|Required: The name or ID (string) of the cluster that the add-on will be uninstalled from. +|=== + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--profile +|Uses a specific AWS profile (string) from your credentials file. + +|--v level +|Log level for V logs. + +|--yes +|Automatically answers `yes` to confirm the operation. +|=== + +.Example +Remove the `codeready-workspaces` add-on installation from a cluster named `mycluster`. + +[source,terminal] +---- +$ rosa uninstall addon --cluster=mycluster codeready-workspaces +---- diff --git a/modules/rosa-installing.adoc b/modules/rosa-installing.adoc new file mode 100644 index 0000000000..c3db33f6b7 --- /dev/null +++ b/modules/rosa-installing.adoc @@ -0,0 +1,208 @@ + +// Module included in the following assemblies: +// +// * rosa-getting-started/rosa-installing-rosa.adoc + + +[id="rosa-installing_{context}"] += Installing ROSA + +Complete the following steps to install ROSA before creating a cluster. + +.Prerequisites + +* Review and complete the AWS prerequisites and ROSA policies. +* Create a link:https://cloud.redhat.com[Red Hat account], if you do not already have one. Then, check your email for a verification link. You will need these credentials to install ROSA. +* Configure your AWS account and enable the ROSA service in your AWS account. + +.Procedure + +. Install `rosa`, the {product-title} command-line interface (CLI). +.. Download the link:https://access.redhat.com/products/red-hat-openshift-service-aws/[latest release] of the `rosa` CLI for your operating system. +.. Optional: Rename the executable file you downloaded to `rosa`. This documentation uses `rosa` to refer to the executable file. +.. Optional: Add `rosa` to your path. ++ +.Example +[source,terminal] +---- +$ mv rosa /usr/local/bin/rosa +---- +.. Enter the following command to verify your installation: ++ +[source,terminal] +---- +$ rosa +---- ++ +.Example output +[source,terminal] +---- +Command line tool for ROSA. + +Usage: + rosa [command] + +Available Commands: + completion Generates bash completion scripts + create Create a resource from stdin + delete Delete a specific resource + describe Show details of a specific resource + edit Edit a specific resource + help Help about any command + init Applies templates to support Managed OpenShift on AWS clusters + list List all resources of a specific type + login Log in to your Red Hat account + logout Log out + logs Show logs of a specific resource + verify Verify resources are configured correctly for cluster install + version Prints the version of the tool + +Flags: + --debug Enable debug mode. + -h, --help help for rosa + -v, --v Level log level for V logs + +Use "rosa [command] --help" for more information about a command. +---- ++ +.. Optional: You can run the `rosa completion` command to generate a bash completion file. ++ +[source,terminal] +---- +$ rosa completion > /etc/bash_completion.d/rosa +---- ++ +Add this file to the correct location for your operating system. For example, on a Linux machine, run the following command to enable `rosa` bash completion: ++ +[source,terminal] +---- +$ source /etc/bash_completion.d/rosa +---- + +. Enter the following command to verify that your AWS account has the necessary permissions. ++ +[source,terminal] +---- +$ rosa verify permissions +---- ++ +.Example output +[source,terminal] +---- +I: Validating SCP policies... +I: AWS SCP policies ok +---- + +. Log in to your Red Hat account with `rosa`. ++ +.. Enter the following command. ++ +[source,terminal] +---- +$ rosa login +---- ++ +.. Replace `` with your token. ++ +.Example output +[source,terminal] +---- +To login to your Red Hat account, get an offline access token at https://cloud.redhat.com/openshift/token/rosa +? Copy the token and paste it here: +---- ++ +.Example output continued +[source,terminal] +---- +I: Logged in as 'rh-rosa-user' on 'https://api.openshift.com' +---- + +. Verify that your AWS account has the necessary quota to deploy an {product-title} cluster. ++ +[source,terminal] +---- +$ rosa verify quota --region=us-west-2 +---- ++ +.Example output +[source,terminal] +---- +I: Validating AWS quota... +I: AWS quota ok +---- ++ +[NOTE] +==== +Sometimes your AWS quota varies by region. If you receive any errors, try a different region. +==== ++ +If you need to increase your quota, go to your link:https://aws.amazon.com/console/[AWS console], and request a quota increase for the service that failed. ++ +After both the permissions and quota checks pass, proceed to the next step. ++ +. Prepare your AWS account for cluster deployment: ++ +.. Run the following command to verify your Red Hat and AWS credentials are setup correctly. Check that your AWS Account ID, Default Region and ARN match what you expect. You can safely ignore the rows beginning with OCM for now (OCM stands for OpenShift Cluster Manager). ++ +[source,terminal] +---- +$ rosa whoami +---- ++ +.Example output +[source,terminal] +---- +AWS Account ID: 000000000000 +AWS Default Region: us-east-2 +AWS ARN: arn:aws:iam::000000000000:user/hello +OCM API: https://api.openshift.com +OCM Account ID: 1DzGIdIhqEWyt8UUXQhSoWaaaaa +OCM Account Name: Your Name +OCM Account Username: you@domain.com +OCM Account Email: you@domain.com +OCM Organization ID: 1HopHfA2hcmhup5gCr2uH5aaaaa +OCM Organization Name: Red Hat +OCM Organization External ID: 0000000 +---- ++ +.. Initialize your AWS account. This step runs a CloudFormation template that prepares your AWS account for cluster deployment and management. This step typically takes 1-2 minutes to complete. ++ +[source,terminal] +---- +$ rosa init +---- ++ +.Example output +[source,terminal] +---- +I: Logged in as 'rh-rosa-user' on 'https://api.openshift.com' +I: Validating AWS credentials... +I: AWS credentials are valid! +I: Validating SCP policies... +I: AWS SCP policies ok +I: Validating AWS quota... +I: AWS quota ok +I: Ensuring cluster administrator user 'osdCcsAdmin'... +I: Admin user 'osdCcsAdmin' created successfully! +I: Verifying whether OpenShift command-line tool is available... +E: OpenShift command-line tool is not installed. +Run 'rosa download oc' to download the latest version, then add it to your PATH. +---- + +. Install the OpenShift CLI (`oc`) from the `rosa` CLI. +.. Enter this command to download the latest version of the `oc` CLI: ++ +[source,terminal] +---- +$ rosa download oc +---- + +.. After downloading the `oc` CLI, unzip it and add it to your path. +.. Enter this command to verify that the `oc` CLI is installed correctly: ++ +[source,terminal] +---- +$ rosa verify oc +---- + +After installing ROSA, you are ready to create a cluster. diff --git a/modules/rosa-kubernetes-concept.adoc b/modules/rosa-kubernetes-concept.adoc new file mode 100644 index 0000000000..9874091b83 --- /dev/null +++ b/modules/rosa-kubernetes-concept.adoc @@ -0,0 +1,28 @@ + +// Module included in the following assemblies: +// +// understanding-rosa/rosa-understanding.adoc + + +[id="rosa-kubernetes-concept_{context}"] += Kubernetes + +{product-title} (ROSA) uses the Red Hat enterprise Kubernetes platform. Kubernetes is an open source platform for managing containerized workloads and services across multiple hosts, and offers management tools for deploying, automating, monitoring, and scaling containerized apps with minimal to no manual intervention. For complete information about Kubernetes, see the link:https://kubernetes.io/docs/home/?path=users&persona=app-developer&level=foundational[Kubernetes documentation]. + +Cluster, compute pool, and compute node:: A Kubernetes cluster consists of a control plane and one or more compute nodes. Compute nodes are organized into compute pools of the type or profile of CPU, memory, operating system, attached disks, and other properties. The compute nodes correspond to the Kubernetes `Node` resource, and are managed by a Kubernetes control plane that centrally controls and monitors all Kubernetes resources in the cluster. ++ +When you deploy the resources for a containerized app, the Kubernetes control plane decides which compute node to deploy those resources on, accounting for the deployment requirements and available capacity in the cluster. Kubernetes resources include services, deployments, and pods. + +Namespace:: Kubernetes namespaces are a way to divide your cluster resources into separate areas that you can deploy apps and restrict access to, such as if you want to share the cluster with multiple teams. For example, system resources that are configured for you are kept in separate namespaces like `kube-system`. If you do not designate a namespace when you create a Kubernetes resource, the resource is automatically created in the `default` namespace. + +Pod:: Every containerized app that is deployed into a cluster is deployed, run, and managed by a Kubernetes resource that is called a pod. Pods represent small deployable units in a Kubernetes cluster and are used to group the containers that must be treated as a single unit. In most cases, each container is deployed in its own pod. However, an app can require a container and other helper containers to be deployed into one pod so that those containers can be addressed by using the same private IP address. + +App:: An app can refer to a complete app or a component of an app. You can deploy components of an app in separate pods or separate compute nodes. + +Service:: A service is a Kubernetes resource that groups a set of pods and provides network connectivity to these pods without exposing the actual private IP address of each pod. You can use a service to make your app available within your cluster or to the public Internet. + +Deployment:: A deployment is a Kubernetes resource where you can specify information about other resources or capabilities that are required to run your app, such as services, persistent storage, or annotations. You configure a deployment in a configuration YAML file, and then apply it to the cluster. The Kubernetes master configures the resources and deploys containers into pods on the compute nodes with available capacity. ++ +Define update strategies for your app, including the number of pods that you want to add during a rolling update and the number of pods that can be unavailable at a time. When you perform a rolling update, the deployment checks whether the update is working and stops the rollout when failures are detected. ++ +A deployment is just one type of workload controller that you can use to manage pods. diff --git a/modules/rosa-list-objects.adoc b/modules/rosa-list-objects.adoc new file mode 100644 index 0000000000..4bfda9779d --- /dev/null +++ b/modules/rosa-list-objects.adoc @@ -0,0 +1,569 @@ +// Module included in the following assemblies: +// +// * cli_reference/rosa_cli/rosa-manage-objects-cli.adoc + +[id="rosa-list-objects_{context}"] += List and describe objects + +This section describes the `list` and `describe` commands for clusters and resources. + +[id="rosa-list-oaddon_{context}"] +== list addon + +List the managed service add-on installations. + +.Syntax +[source,terminal] +---- +$ rosa list addons --cluster= | +---- + +.Arguments +[cols="30,70"] +|=== +|Option |Definition + +|--cluster +|Required: The name or ID (string) of the cluster to list the add-ons for. +|=== + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. +|=== + +[id="rosa-list-clusters_{context}"] +== list clusters + +List all of your clusters. + +.Syntax +[source,terminal] +---- +$ rosa list clusters [arguments] +---- + +.Arguments +[cols="30,70"] +|=== +|Option |Definition + +|--count +|The number (integer) of clusters to display. Default: `100` +|=== + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. +|=== + +[id="rosa-list-idps_{context}"] +== list idps + +List all of the identity providers (IDPs) for a cluster. + +.Syntax +[source,terminal] +---- +$ rosa list idps --cluster= | [arguments] +---- + +.Arguments +[cols="30,70"] +|=== +|Option |Definition + +|--cluster +|Required: The name or ID (string) of the cluster that the IDPs will be listed for. +|=== + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. +|=== + +.Example +List all identity providers (IDPs) for a cluster named `mycluster`: + +[source,terminal] +---- +$ rosa list idps --cluster=mycluster +---- + +[id="rosa-list-ingresses_{context}"] +== list ingresses + +List all of the API and ingress endpoints for a cluster. + +.Syntax +[source,terminal] +---- +$ rosa list ingresses --cluster= | [arguments] +---- + +.Arguments +[cols="30,70"] +|=== +|Option |Definition + +|--cluster +|Required: The name or ID (string) of the cluster that the IDPs will be listed for. +|=== + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. +|=== + +.Example +List all API and ingress endpoints for a cluster named `mycluster`: + +[source,terminal] +---- +$ rosa list ingresses --cluster=mycluster +---- + +== list instance-types + +List all of the available instance types for use with {product-title}. Availability is based on the account's AWS quota. + +.Syntax +[source,terminal] +---- +$ rosa list instance-types [arguments] +---- + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--output +|The output format. Allowed formats are `json` or `yaml`. + +|--profile +|Specifies an AWS profile (string) from your credentials file. +|=== + +.Example +List all instance types: + +[source,terminal] +---- +$ rosa list instance-types +---- + +[id="rosa-list-machinepools_{context}"] +== list machinepools + +List the machine pools configured on a cluster. + +.Syntax +[source,terminal] +---- +$ rosa list machinepools --cluster= | [arguments] +---- + +.Arguments +[cols="30,70"] +|=== +|Option |Definition + +|--cluster +|Required: The name or ID (string) of the cluster that the machine pools will be listed for. +|=== + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. +|=== + +.Example +List all of the machine pools on a cluster named `mycluster`: + +[source,terminal] +---- +$ rosa list machinepools --cluster=mycluster +---- + +[id="rosa-list-regions_{context}"] +== list regions + +List all of the available regions for the current AWS account. + +.Syntax +[source,terminal] +---- +$ rosa list regions [arguments] +---- + +.Arguments +[cols="30,70"] +|=== +|Option |Definition + +|--multi-az +|Lists regions that provide support for multiple availability zones. +|=== + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. +|=== + +.Example +List all of the available regions: + +[source,terminal] +---- +$ rosa list regions +---- + +[id="rosa-list-upgrades_{context}"] +== list upgrades + +List all available and scheduled cluster version upgrades. + +.Syntax +[source,terminal] +---- +$ rosa list upgrades --cluster= | [arguments] +---- + +.Arguments +[cols="30,70"] +|=== +|Option |Definition + +|--cluster +|Required: The name or ID (string) of the cluster that the available upgrades will be listed for. +|=== + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. +|=== + +.Example +List all of the available upgrades for a cluster named `mycluster`: + +[source,terminal] +---- +$ rosa list upgrades --cluster=mycluster +---- + +[id="rosa-list-users_{context}"] +== list users +List the cluster administrator and dedicated administrator users for a specified cluster. + +.Syntax +[source,terminal] +---- +$ rosa list users --cluster= | [arguments] +---- + +.Arguments +[cols="30,70"] +|=== +|Option |Definition + +|--cluster +|Required: The name or ID (string) of the cluster that the cluster administrators will be listed for. +|=== + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. +|=== + +.Example +List all of the cluster administrators and dedicated administrators for a cluster named `mycluster`: + +[source,terminal] +---- +$ rosa list users --cluster=mycluster +---- + +[id="rosa-list-versions_{context}"] +== list versions + +List all of the OpenShift versions that are available for creating a cluster. + +.Syntax +[source,terminal] +---- +$ rosa list versions [arguments] +---- + +.Arguments +[cols="30,70"] +|=== +|Option |Definition + +|--channel-group +|Lists only versions from the specified channel group (string). Default: `stable` +|=== + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. +|=== + +.Example +List all of the OpenShift Container Platform versions: + +[source,terminal] +---- +$ rosa list versions +---- + +[id="rosa-describe-admin_{context}"] +== describe admin + +Show the details of a specified `cluster-admin` user and a command to log in to the cluster. + +.Syntax +[source,terminal] +---- +$ rosa describe admin --cluster= | [arguments] +---- + +.Arguments +[cols="30,70"] +|=== +|Option |Definition + +|--cluster +|Required: The name or ID (string) of the cluster to which the cluster-admin belongs. +|=== + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. +|=== + +.Example +Describe the `cluster-admin` user for a cluster named `mycluster`: + +[source,terminal] +---- +$ rosa describe admin --cluster=mycluster +---- + +[id="rosa-describe-addon_{context}"] +== describe addon + +Show the details of a managed service add-on. + +.Syntax +[source,terminal] +---- +$ rosa describe addon | [arguments] +---- + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. +|=== + +.Example +Describe an add-on named `codeready-workspaces`: + +[source,terminal] +---- +$ rosa describe addon codeready-workspaces +---- + +[id="rosa-describe-cluster_{context}"] +== describe cluster + +Shows the details for a cluster. + +.Syntax +[source,terminal] +---- +$ rosa describe cluster --cluster= | [arguments] +---- + +.Arguments +[cols="30,70"] +|=== +|Option |Definition + +|--cluster +|Required: The name or ID (string) of the cluster. +|=== + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. +|=== + +.Example +Describe a cluster named `mycluster`: +[source,terminal] +---- +$ rosa describe cluster --cluster=mycluster +---- diff --git a/modules/rosa-logs.adoc b/modules/rosa-logs.adoc new file mode 100644 index 0000000000..d10caf805e --- /dev/null +++ b/modules/rosa-logs.adoc @@ -0,0 +1,120 @@ + +// Module included in the following assemblies: +// +// * cli_reference/rosa_cli/rosa-troubleshoot-cli.adoc + +[id="rosa-logs_{context}"] += Checking logs with the rosa CLI + + +Use the following commands to check your install and uninstall logs. + +[id="rosa-logs-install_{context}"] +== logs install + +Show the cluster install logs. + +.Syntax +[source,terminal] +---- +$ rosa logs install --cluster= | [arguments] +---- + +.Arguments +[cols="30,70"] +|=== +|Option |Definition + +|--cluster +|Required: The name or ID (string) of the cluster to get logs for. + +|--tail +|The number (integer) of lines to get from the end of the log. Default: `2000` + +|--watch +|Watches for changes after getting the logs. +|=== + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. +|=== + +.Examples +Show the last 100 install log lines for a cluster named `mycluster`: + +[source,terminal] +---- +$ rosa logs install mycluster --tail=100 +---- + +Show the install logs for a cluster named `mycluster`: + +[source,terminal] +---- +$ rosa logs install --cluster=mycluster +---- + +[id="rosa-logs-uninstall_{context}"] +== logs uninstall + +Show the cluster uninstall logs. + +.Syntax +[source,terminal] +---- +$ rosa logs uninstall --cluster= | [arguments] +---- + +.Arguments +[cols="30,70"] +|=== +|Option |Definition + +|--cluster +|The name or ID (string) of the cluster to get logs for. + +|--tail +|The number (integer) of lines to get from the end of the log. Default: `2000` + +|--watch +|Watches for changes after getting the logs. +|=== + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--profile +|Specifies an AWS profile (string) from your credentials file. + +|--v +|The log level for V logs. +|=== + +.Example +Show the last 100 uninstall logs for a cluster named `mycluster`: +[source,terminal] +---- +$ rosa logs uninstall --cluster=mycluster --tail=100 +---- diff --git a/modules/rosa-openshift-concepts.adoc b/modules/rosa-openshift-concepts.adoc new file mode 100644 index 0000000000..7f6545e2ec --- /dev/null +++ b/modules/rosa-openshift-concepts.adoc @@ -0,0 +1,10 @@ + +// Module included in the following assemblies: +// +// understanding-rosa/rosa-understanding.adoc + + +[id="rosa-openshift-concept_{context}"] += OpenShift + +OpenShift is a Kubernetes container platform that provides a trusted environment to run enterprise workloads. It extends the Kubernetes platform with built-in software to enhance app lifecycle development, operations, and security. With OpenShift, you can consistently deploy your workloads across hybrid cloud providers and environments. diff --git a/modules/rosa-parent-commands.adoc b/modules/rosa-parent-commands.adoc new file mode 100644 index 0000000000..77d1f9ef35 --- /dev/null +++ b/modules/rosa-parent-commands.adoc @@ -0,0 +1,65 @@ + +// Module included in the following assemblies: +// +// * cli_reference/rosa_cli/rosa-manage-objects-cli.adoc + +[id="rosa-parent-commands_{context}"] += Parent commands + + +The `rosa` CLI uses parent commands with child commands to manage objects. The parent commands are `create`, `edit`, `delete`, `list`, and `describe`. Not all parent commands can be used with all child commands. For more information, see the specific reference topics that describes the child commands. + +[id="rosa-create_{context}"] +== create + +Creates an object or resource when paired with a child command. + +.Example +[source,terminal] +---- +$ rosa create cluster --cluster-name=mycluster +---- + +[id="rosa-edit_{context}"] +== edit + +Edits options for an object, such as making a cluster private. + +.Example +[source,terminal] +---- +$ rosa edit cluster --cluster=mycluster --private +---- + +[id="rosa-delete_{context}"] +== delete + +Deletes an object or resource when paired with a child command. + +.Example +[source,terminal] +---- +$ rosa delete ingress --cluster=mycluster +---- + +[id="rosa-list_{context}"] +== list + +Lists clusters or resources for a specific cluster. + +.Example +[source,terminal] +---- +$ rosa list users --cluster=mycluster +---- + +[id="rosa-describe_{context}"] +== describe + +Shows the details for a cluster. + +.Example +[source,terminal] +---- +$ rosa describe --cluster=mycluster +---- diff --git a/modules/rosa-planning-cluster-maximums-environment.adoc b/modules/rosa-planning-cluster-maximums-environment.adoc new file mode 100644 index 0000000000..b62a791865 --- /dev/null +++ b/modules/rosa-planning-cluster-maximums-environment.adoc @@ -0,0 +1,58 @@ + +// Module included in the following assemblies: +// +// rosa_planning/rosa-planning-environment.adoc + +[id="rosa-planning-cluster-maximums-environment_{context}"] += OpenShift Container Platform testing environment and configuration + +The following table lists the OpenShift Container Platform environment and configuration on which the cluster maximums are tested for the AWS cloud platform. + +[options="header",cols="8*"] +|=== +| Node |Type |vCPU |RAM(GiB) |Disk type|Disk size(GiB)/IOS |Count |Region + +|Control plane/etcd ^[1]^ +|m5.4xlarge +|16 +|64 +|io1 +|350 / 1,000 +|3 +|us-west-2 + +|Infrastructure nodes ^[2]^ +|r5.2xlarge +|8 +|64 +|gp2 +|300 / 900 +|3 +|us-west-2 + +|Workload ^[3]^ +|m5.2xlarge +|8 +|32 +|gp2 +|350 / 900 +|3 +|us-west-2 + +|Worker nodes +|m5.2xlarge +|8 +|32 +|gp2 +|350 / 900 +|102 +|us-west-2 +|=== +[.small] +-- +1. io1 disks are used for control plane/etcd nodes because etcd is I/O intensive and latency sensitive. A greater number of IOPS can be required, depending on usage. +2. Infrastructure nodes are used to host monitoring components because Prometheus can claim a large amount of memory, depending on usage patterns. +3. Workload nodes are dedicated to run performance and scalability workload generators. +-- + +Larger cluster sizes and higher object counts might be reachable. However, the sizing of the infrastructure nodes limits the amount of memory that is available to Prometheus. When creating, modifying, or deleting objects, Prometheus stores the metrics in its memory for roughly 3 hours prior to persisting the metrics on disk. If the rate of creation, modification, or deletion of objects is too high, Prometheus can become overwhelmed and fail due to the lack of memory resources. diff --git a/modules/rosa-planning-cluster-maximums.adoc b/modules/rosa-planning-cluster-maximums.adoc new file mode 100644 index 0000000000..f3032fc24f --- /dev/null +++ b/modules/rosa-planning-cluster-maximums.adoc @@ -0,0 +1,54 @@ + +// Module included in the following assemblies: +// +// rosa_planning/rosa-planning-environment.adoc + +[id="tested-cluster-maximums_{context}"] += ROSA tested cluster maximums + +The following table specifies the maximum limits for each tested type in a {product-title} cluster. + +.Tested cluster maximums +[options="header",cols="50,50"] +|=== +|Maximum type |4.8 tested maximum + +|Number of nodes +|102 + +|Number of pods ^[1]^ +|20,400 + +|Number of pods per node +|250 + +|Number of pods per core +|There is no default value + +|Number of namespaces ^[2]^ +|3,400 + +|Number of pods per namespace ^[3]^ +|20,400 + +|Number of services ^[4]^ +|10,000 + +|Number of services per namespace +|10,000 + +|Number of back ends per service +|10,000 + +|Number of deployments per namespace ^[3]^ +|1,000 +|=== +[.small] +-- +1. The pod count displayed here is the number of test pods. The actual number of pods depends on the application’s memory, CPU, and storage requirements. +2. When there are a large number of active projects, etcd can suffer from poor performance if the keyspace grows excessively large and exceeds the space quota. Periodic maintenance of etcd, including defragmentation, is highly recommended to make etcd storage available. +3. There are a number of control loops in the system that must iterate over all objects in a given namespace as a reaction to some changes in state. Having a large number of objects of a type, in a single namespace, can make those loops expensive and slow down processing the state changes. The limit assumes that the system has enough CPU, memory, and disk to satisfy the application requirements. +4. Each service port and each service back end has a corresponding entry in iptables. The number of back ends of a given service impacts the size of the endpoints objects, which then impacts the size of data that is sent throughout the system. +-- + +In OpenShift Container Platform 4.8, half of a CPU core (500 millicore) is reserved by the system compared to previous versions of OpenShift Container Platform. diff --git a/modules/rosa-planning-considerations.adoc b/modules/rosa-planning-considerations.adoc new file mode 100644 index 0000000000..5d4502b6b7 --- /dev/null +++ b/modules/rosa-planning-considerations.adoc @@ -0,0 +1,42 @@ + +// Module included in the following assemblies: +// +// rosa_planning/rosa-planning-environment.adoc + +[id="initial-planning-considerations_{context}"] += Initial planning considerations + +Consider the following tested object maximums when you plan your {product-title} cluster. + +These guidelines are based on a cluster of 102 workers in a multi-availability zone configuration. For smaller clusters, the maximums are lower. + +The sizing of the control plane and infrastructure nodes is dynamically calculated during the installation process, based on the number of worker nodes. If you change the number of worker nodes after the installation, control plane and infra nodes must be resized manually. Infra nodes are resized by the Red Hat SRE team, and you can link:https://access.redhat.com/[open a ticket in the Customer Portal] to request the infra node resizing. + +The following table lists the size of control plane and infrastructure nodes that are assigned during installation. + +[options="header",cols="3*"] +|=== +| Number of worker nodes |Control plane size |Infrastructure node size + +|1 to 25 +|m5.2xlarge +|r5.xlarge + +|26 to 100 +|m5.4xlarge +|r5.2xlarge + +|101 to 180 ^[1]^ +|m5.8xlarge +|r5.4xlarge +|=== +[.small] +-- +1. The maximum number of worker nodes on ROSA is 180 +-- + +For larger clusters, infrastructure node sizing can become a large impacting factor to scalability. There are many factors that influence the stated thresholds, including the etcd version or storage data format. + +Exceeding these limits does not necessarily mean that the cluster will fail. In most cases, exceeding these numbers results in lower overall performance. + +The OpenShift Container Platform version used in all of the tests is OCP 4.8.0. diff --git a/modules/rosa-planning-environment-application-reqs.adoc b/modules/rosa-planning-environment-application-reqs.adoc new file mode 100644 index 0000000000..f43a56874a --- /dev/null +++ b/modules/rosa-planning-environment-application-reqs.adoc @@ -0,0 +1,169 @@ + +// Module included in the following assemblies: +// +// rosa_planning/rosa-planning-environment.adoc +[id="planning-environment-application-requirements_{context}"] += Planning your environment based on application requirements + +This document describes how to plan your {product-title} environment based on your application requirements. + +Consider an example application environment: + +[options="header",cols="5"] +|=== +|Pod type |Pod quantity |Max memory |CPU cores |Persistent storage + +|apache +|100 +|500 MB +|0.5 +|1 GB + +|node.js +|200 +|1 GB +|1 +|1 GB + +|postgresql +|100 +|1 GB +|2 +|10 GB + +|JBoss EAP +|100 +|1 GB +|1 +|1 GB +|=== + +Extrapolated requirements: 550 CPU cores, 450 GB RAM, and 1.4 TB storage. + +Instance size for nodes can be modulated up or down, depending on your preference. Nodes are often resource overcommitted. In this deployment scenario, you can choose to run additional smaller nodes or fewer larger nodes to provide the same amount of resources. Factors such as operational agility and cost-per-instance should be considered. + +[options="header",cols="4"] +|=== +|Node type |Quantity |CPUs |RAM (GB) + +|Nodes (option 1) +|100 +|4 +|16 + +|Nodes (option 2) +|50 +|8 +|32 + +|Nodes (option 3) +|25 +|16 +|64 +|=== + +Some applications lend themselves well to overcommitted environments, and some do not. Most Java applications and applications that use huge pages are examples of applications that would not allow for overcommitment. That memory can not be used for other applications. In the example above, the environment would be roughly 30 percent overcommitted, a common ratio. + +The application pods can access a service either by using environment variables or DNS. If using environment variables, for each active service the variables are injected by the kubelet when a pod is run on a node. A cluster-aware DNS server watches the Kubernetes API for new services and creates a set of DNS records for each one. If DNS is enabled throughout your cluster, then all pods should automatically be able to resolve services by their DNS name. Service discovery using DNS can be used in case you must go beyond 5000 services. When using environment variables for service discovery, if the argument list exceeds the allowed length after 5000 services in a namespace, then the pods and deployments will start failing. + +Disable the service links in the deployment’s service specification file to overcome this: + +.Example +[source,yaml] +---- +Kind: Template +apiVersion: v1 +metadata: + name: deploymentConfigTemplate + creationTimestamp: + annotations: + description: This template will create a deploymentConfig with 1 replica, 4 env vars and a service. + tags: '' +objects: + - kind: DeploymentConfig + apiVersion: v1 + metadata: + name: deploymentconfig${IDENTIFIER} + spec: + template: + metadata: + labels: + name: replicationcontroller${IDENTIFIER} + spec: + enableServiceLinks: false + containers: + - name: pause${IDENTIFIER} + image: "${IMAGE}" + ports: + - containerPort: 8080 + protocol: TCP + env: + - name: ENVVAR1_${IDENTIFIER} + value: "${ENV_VALUE}" + - name: ENVVAR2_${IDENTIFIER} + value: "${ENV_VALUE}" + - name: ENVVAR3_${IDENTIFIER} + value: "${ENV_VALUE}" + - name: ENVVAR4_${IDENTIFIER} + value: "${ENV_VALUE}" + resources: {} + imagePullPolicy: IfNotPresent + capabilities: {} + securityContext: + capabilities: {} + privileged: false + restartPolicy: Always + serviceAccount: '' + replicas: 1 + selector: + name: replicationcontroller${IDENTIFIER} + triggers: + - type: ConfigChange + strategy: + type: Rolling + - kind: Service + apiVersion: v1 + metadata: + name: service${IDENTIFIER} + spec: + selector: + name: replicationcontroller${IDENTIFIER} + ports: + - name: serviceport${IDENTIFIER} + protocol: TCP + port: 80 + targetPort: 8080 + portalIP: '' + type: ClusterIP + sessionAffinity: None + status: + loadBalancer: {} + parameters: + - name: IDENTIFIER + description: Number to append to the name of resources + value: '1' + required: true + - name: IMAGE + description: Image to use for deploymentConfig + value: gcr.io/google-containers/pause-amd64:3.0 + required: false + - name: ENV_VALUE + description: Value to use for environment variables + generate: expression + from: "[A-Za-z0-9]{255}" + required: false + labels: +template: deploymentConfigTemplate +---- + +The number of application pods that can run in a namespace is dependent on the number of services and the length of the service name when the environment variables are used for service discovery. `ARG_MAX` on the system defines the maximum argument length for a new process and it is set to 2097152 KiB by default. The kubelet injects environment variables in to each pod scheduled to run in the namespace including: + +* `_SERVICE_HOST=` +* `_SERVICE_PORT=` +* `_PORT=tcp://:` +* `_PORT__TCP=tcp://:` +* `_PORT__TCP_PROTO=tcp` +* `_PORT__TCP_PORT=` +* `_PORT__TCP_ADDR=` + +The pods in the namespace start to fail if the argument length exceeds the allowed value and if the number of characters in a service name impacts it. diff --git a/modules/rosa-planning-environment-cluster-max.adoc b/modules/rosa-planning-environment-cluster-max.adoc new file mode 100644 index 0000000000..383e77363f --- /dev/null +++ b/modules/rosa-planning-environment-cluster-max.adoc @@ -0,0 +1,42 @@ + +// Module included in the following assemblies: +// +// rosa_planning/rosa-planning-environment.adoc + +[id="planning-environment-cluster-maximums_{context}"] += Planning your environment based on tested cluster maximums + +This document describes how to plan your {product-title} environment based on the tested cluster maximums. + +Oversubscribing the physical resources on a node affects resource guarantees the Kubernetes scheduler makes during pod placement. Learn what measures you can take to avoid memory swapping. + +Some of the tested maximums are stretched only in a single dimension. They will vary when many objects are running on the cluster. + +The numbers noted in this documentation are based on Red Hat testing methodology, setup, configuration, and tunings. These numbers can vary based on your own individual setup and environments. + +While planning your environment, determine how many pods are expected to fit per node using the following formula: + +---- +required pods per cluster / pods per node = total number of nodes needed +---- + +The current maximum number of pods per node is 250. However, the number of pods that fit on a node is dependent on the application itself. Consider the application’s memory, CPU, and storage requirements, as described in _Planning your environment based on application requirements_. + +.Example scenario +If you want to scope your cluster for 2200 pods per cluster, you would need at least nine nodes, assuming that there are 250 maximum pods per node: + +---- +2200 / 250 = 8.8 +---- + +If you increase the number of nodes to 20, then the pod distribution changes to 110 pods per node: + +---- +2200 / 20 = 110 +---- + +Where: + +---- +required pods per cluster / total number of nodes = expected pods per node +---- diff --git a/modules/rosa-policy-change-management.adoc b/modules/rosa-policy-change-management.adoc new file mode 100644 index 0000000000..0b269a68de --- /dev/null +++ b/modules/rosa-policy-change-management.adoc @@ -0,0 +1,67 @@ + +// Module included in the following assemblies: +// +// * assemblies/rosa-policy-process-security.adoc + +[id="rosa-policy-change-management_{context}"] += Change management + + +This section describes the policies about how cluster changes, configuration changes, patches, and releases are managed. + +Cluster changes are initiated in one of two ways: + +1. A customer initiates changes through self-service capabilities such as cluster deployment, worker node scaling, or cluster deletion. +2. Red Hat site reliability engineering (SRE) initiates a change through Operator-driven capabilities such as configuration, upgrade, patching, or configuration changes. + +Change history is captured in the Cluster History section in the OpenShift Cluster Manager (OCM) Overview tab and is available to customers. The change history includes, but is not limited to, logs from the following changes: + +- Adding or removing identity providers +- Adding or removing users to or from the `dedicated-admins` group +- Scaling the cluster compute nodes +- Scaling the cluster load balancer +- Scaling the cluster persistent storage +- Upgrading the cluster + +The SRE-initiated changes that require manual intervention by SRE generally follow this process: + +- Preparing for change +* Change characteristics are identified and a gap analysis is performed against current state. +* Change steps are documented and validated. +* A communication plan and schedule are shared with all stakeholders. +* CI/CD and end-to-end tests are updated to automate change validation. +* A change request that captures change details is submitted for management approval. +- Managing change +* Automated nightly CI/CD jobs pick up the change and run tests. +* The change is made to integration and stage environments, and manually validated before updating the customer cluster. +* Major change notifications are sent before and after the event. +- Reinforcing the change +* Feedback on the change is collected and analyzed. +* Potential gaps are diagnosed to understand resistance and automate similar change requests. +* Corrective actions are implemented. + +[NOTE] +==== +SRE only uses manual changes as a fallback process because manual intervention is considered to be a failure of change management. +==== + +[id="rosa-policy-configuration-management_{context}"] +== Configuration management + +The infrastructure and configuration of the {product-title} environment is managed as code. SRE manages changes to the {product-title} environment using a GitOps workflow and automated CI/CD pipeline. + +Each proposed change undergoes a series of automated verifications immediately upon check-in. Changes are then deployed to a staging environment where they undergo automated integration testing. Finally, changes are deployed to the production environment. Each step is fully automated. + +An authorized SRE reviewer must approve advancement to each step. The reviewer cannot be the same individual who proposed the change. All changes and approvals are fully auditable as part of the GitOps workflow. + +[id="rosa-policy-patch-management_{context}"] +== Patch management + +OpenShift Container Platform software and the underlying immutable Red Hat CoreOS (RHCOS) operating system image are patched for bugs and vulnerabilities in regular z-stream upgrades. Read more about link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.6/html/architecture/architecture-rhcos[RHCOS architecture] in the OpenShift Container Platform documentation. + +[id="rosa-policy-release-management_{context}"] +== Release management + +ROSA clusters can be configured for automatic upgrades on a schedule. Alternatively, you can perform manual upgrades using the `rosa` CLI. For more details, see the link:https://access.redhat.com/support/policy/updates/openshift/dedicated[Life Cycle policy]. + +Customers can review the history of all cluster upgrade events in their OCM web console on the Events tab. diff --git a/modules/rosa-policy-customer-responsibility.adoc b/modules/rosa-policy-customer-responsibility.adoc new file mode 100644 index 0000000000..925bee61b7 --- /dev/null +++ b/modules/rosa-policy-customer-responsibility.adoc @@ -0,0 +1,39 @@ + +// Module included in the following assemblies: +// +// * assemblies/rosa-policy-responsibility-matrix.adoc + +[id="rosa-policy-customer-responsibility_{context}"] += Customer responsibilities for data and applications + + +The customer is responsible for the applications, workloads, and data that they deploy to {product-title}. However, Red Hat provides various tools to help the customer manage data and applications on the platform. + +[cols="2a,3a,3a",options="header"] +|=== + +|Resource +|Red Hat responsibilities +|Customer responsibilities + +|Customer data +|- Maintain platform-level standards for data encryption. +- Provide OpenShift components to help manage application data, such as secrets. +- Enable integration with third-party data services, AWS RDS, to store and manage data outside of the cluster and cloud provider. +|Maintain responsibility for all customer data stored on the platform and how customer applications consume and expose this data. + +|Customer applications +|- Provision clusters with OpenShift components installed so that customers can access the OpenShift and Kubernetes APIs to deploy and manage containerized applications. +- Create clusters with image pull secrets so that customer deployments can pull images from the Red Hat Container Catalog registry. +- Provide access to OpenShift APIs that a customer can use to set up Operators to add community, third-party, and Red Hat services to the cluster. +- Provide storage classes and plug-ins to support persistent volumes for use with customer applications. +|- Maintain responsibility for customer and third-party applications, data, and their complete lifecycle. +- If a customer adds Red Hat, community, third-party, their own, or other services to the cluster by using Operators or external images, the customer is responsible for these services and for working with the appropriate provider, including Red Hat, to troubleshoot any issues. +- Use the provided tools and features to configure and deploy; keep up to date; set up resource requests and limits; size the cluster to have enough resources to run apps; set up permissions; integrate with other services; manage any image streams or templates that the customer deploys; externally serve; save, back up, and restore data; and otherwise manage their highly available and resilient workloads. +- Maintain responsibility for monitoring the applications run on {product-title}, including installing and operating software to gather metrics and create alerts. + +|Developer services (CodeReady) +|Make CodeReady Workspaces available as an add-on through OpenShift Cluster Manager (OCM). +|Install, secure, and operate CodeReady Workspaces and the Developer CLI. + +|=== diff --git a/modules/rosa-policy-disaster-recovery.adoc b/modules/rosa-policy-disaster-recovery.adoc new file mode 100644 index 0000000000..6ab6ddedc5 --- /dev/null +++ b/modules/rosa-policy-disaster-recovery.adoc @@ -0,0 +1,16 @@ + +// Module included in the following assemblies: +// +// * assemblies/rosa-policy-process-security.adoc + +[id="rosa-policy-disaster-recovery_{context}"] += Disaster recovery + + +{product-title} (ROSA) provides disaster recovery for failures that occur at the pod, worker node, infrastructure node, master node, and availability zone levels. + +All disaster recovery requires that the customer use best practices for deploying highly available applications, storage, and cluster architecture, such as single-zone deployment or multi-zone deployment, to account for the level of desired availability. + +One single-zone cluster will not provide disaster avoidance or recovery in the event of an availability zone or region outage. Multiple single-zone clusters with customer-maintained failover can account for outages at the zone or at the regional level. + +One multi-zone cluster will not provide disaster avoidance or recovery in the event of a full region outage. Multiple multi-zone clusters with customer-maintained failover can account for outages at the regional level. diff --git a/modules/rosa-policy-failure-points.adoc b/modules/rosa-policy-failure-points.adoc new file mode 100644 index 0000000000..68505e28c3 --- /dev/null +++ b/modules/rosa-policy-failure-points.adoc @@ -0,0 +1,50 @@ + +// Module included in the following assemblies: +// +// * assemblies/rosa-policy-understand-availability.adoc + +[id="rosa-policy-failure-points_{context}"] += Potential points of failure + + +{product-title} (ROSA) provides many features and options for protecting your workloads against downtime, but applications must be architected appropriately to take advantage of these features. + +ROSA can help further protect you against many common Kubernetes issues by adding Red Hat site reliability engineering (SRE) support and the option to deploy a multiple availability zone cluster, but there are a number of ways in which a container or infrastructure can still fail. By understanding potential points of failure, you can understand risks and appropriately architect both your applications and your clusters to be as resilient as necessary at each specific level. + +[NOTE] +==== +An outage can occur at several different levels of infrastructure and cluster components. +==== + +[id="rosa-policy-container-pod-failure_{context}"] +== Container or pod failure +By design, pods are meant to exist for a short time. Appropriately scaling services so that multiple instances of your application pods are running can protect against issues with any individual pod or container. The OpenShift node scheduler can also make sure these workloads are distributed across different worker nodes to further improve resiliency. + +When accounting for possible pod failures, it is also important to understand how storage is attached to your applications. Single persistent volumes attached to single pods cannot leverage the full benefits of pod scaling, whereas replicated databases, database services, or shared storage can. + +To avoid disruption to your applications during planned maintenance, such as upgrades, it is important to define a Pod Disruption Budget. These are part of the Kubernetes API and can be managed with `oc` commands such as other object types. They allow for the specification of safety constraints on pods during operations, such as draining a node for maintenance. + +[id="rosa-policy-worker-node-failure_{context}"] +== Worker node failure +Worker nodes are the virtual machines that contain your application pods. By default, a ROSA cluster has a minimum of two worker nodes for a single availability-zone cluster. In the event of a worker node failure, pods are relocated to functioning worker nodes, as long as there is enough capacity, until any issue with an existing node is resolved or the node is replaced. More worker nodes means more protection against single-node outages, and ensures proper cluster capacity for rescheduled pods in the event of a node failure. + +[NOTE] +==== +When accounting for possible node failures, it is also important to understand how storage is affected. EFS volumes are not affected by node failure. However, EBS volumes are not accessible if they are connected to a node that fails. +==== + +[id="rosa-policy-container-cluster-failure_{context}"] +== Cluster failure +ROSA clusters have at least three control plane nodes and three infrastructure nodes that are preconfigured for high availability, either in a single zone or across multiple zones, depending on the type of cluster you have selected. Control plane and infrastructure nodes have the same resiliency as worker nodes, with the added benefit of being managed completely by Red Hat. + +In the event of a complete control plane outage, the OpenShift APIs will not function, and existing worker node pods are unaffected. However, if there is also a pod or node outage at the same time, the control planes must recover before new pods or nodes can be added or scheduled. + +All services running on infrastructure nodes are configured by Red Hat to be highly available and distributed across infrastructure nodes. In the event of a complete infrastructure outage, these services are unavailable until these nodes have been recovered. + +[id="rosa-policy-container-zone-failure_{context}"] +== Zone failure +A zone failure from AWS affects all virtual components, such as worker nodes, block or shared storage, and load balancers that are specific to a single availability zone. To protect against a zone failure, ROSA provides the option for clusters that are distributed across three availability zones, known as multiple availability zone clusters. Existing stateless workloads are redistributed to unaffected zones in the event of an outage, as long as there is enough capacity. + +[id="rosa-policy-container-storage-failure_{context}"] +== Storage failure +If you have deployed a stateful application, then storage is a critical component and must be accounted for when thinking about high availability. A single block storage PV is unable to withstand outages even at the pod level. The best ways to maintain availability of storage are to use replicated storage solutions, shared storage that is unaffected by outages, or a database service that is independent of the cluster. diff --git a/modules/rosa-policy-identity-access-management.adoc b/modules/rosa-policy-identity-access-management.adoc new file mode 100644 index 0000000000..0b745e9f89 --- /dev/null +++ b/modules/rosa-policy-identity-access-management.adoc @@ -0,0 +1,154 @@ + +// Module included in the following assemblies: +// +// * assemblies/rosa-policy-process-security.adoc + +[id="rosa-policy-identity-access-management_{context}"] += Identity and access management +Most access by Red Hat site reliability engineering (SRE) teams is done by using cluster Operators through automated configuration management. + +[id="subprocessors_{context}"] +== Subprocessors +For a list of the available subprocessors, see the link:https://access.redhat.com/articles/5528091[Red Hat Subprocessor List] on the Red Hat Customer Portal. + +[id="rosa-policy-sre-access_{context}"] +== SRE access to all {product-title} clusters +SREs access {product-title} clusters through the web console or command-line tools. Authentication requires multi-factor authentication (MFA) with industry-standard requirements for password complexity and account lockouts. SREs must authenticate as individuals to ensure auditability. All authentication attempts are logged to a Security Information and Event Management (SIEM) system. + +SREs access private clusters using an encrypted tunnel through a hardened SRE support pod running in the cluster. Connections to the SRE support pod are permitted only from a secured Red Hat network using an IP allow-list. In addition to the cluster authentication controls described above, authentication to the SRE support pod is controlled by using SSH keys. SSH key authorization is limited to SRE staff and automatically synchronized with Red Hat corporate directory data. Corporate directory data is secured and controlled by HR systems, including management review, approval, and audits. + +[id="rosa-policy-privileged-access-control_{context}"] +== Privileged access controls in {product-title} +SRE adheres to the principle of least privilege when accessing {product-title} and AWS components. There are four basic categories of manual SRE access: + +- SRE admin access through the Red Hat Portal with normal two-factor authentication and no privileged elevation. +- SRE admin access through the Red Hat corporate SSO with normal two-factor authentication and no privileged elevation. +- OpenShift elevation, which is a manual elevation using Red Hat SSO. Access is limited to 2 hours, is fully audited, and requires management approval. +- AWS access or elevation, which is a manual elevation for AWS console or CLI access. Access is limited to 60 minutes and is fully audited. + +Each of these access types have different levels of access to components: + +[cols= "4a,6a,5a,4a,3a",options="header"] + +|=== + +| Component | Typical SRE admin access (Red Hat Portal) | Typical SRE admin access (Red Hat SSO) |Openshift elevation | Cloud provider access or elevation + +| OpenShift Cluster Manager (OCM) | R/W | No access | No access | No access +| OpenShift console | No access | R/W | R/W | No access +| Node Operatiing system | No access | A specific list of elevated OS and network permissions. | A specific list of elevated OS and network permissions. | No access +| AWS Console | No access | No access, but this is the account used to request cloud provider access. | No access | All cloud provider permissions using the SRE identity. + +|=== + +[id="rosa-policy-sre-aws-infra-access_{context}"] +== SRE access to AWS accounts +Red Hat personnel do not access AWS accounts in the course of routine {product-title} operations. For emergency troubleshooting purposes, the SREs have well-defined and auditable procedures to access cloud infrastructure accounts. + +SREs generate a short-lived AWS access token for a reserved role using the AWS Security Token Service (STS). Access to the STS token is audit-logged and traceable back to individual users. Both STS and non-STS clusters use the AWS STS service for SRE access. For non-STS clusters, the `BYOCAdminAccess` role has the `AdministratorAccess` IAM policy attached, and this role is used for administration. For STS clusters, the `ManagedOpenShift-Support-Role` has the `ManagedOpenShift-Support-Access` policy attached, and this role is used for administration. + +[id="rosa-policy-rh-access_{context}"] +== Red Hat support access +Members of the Red Hat Customer Experience and Engagement (CEE) team typically have read-only access to parts of the cluster. Specifically, CEE has limited access to the core and product namespaces and does not have access to the customer namespaces. + +[cols= "2a,4a,4a,4a,4a",options="header"] + +|=== + +| Role | Core namespace | Layered product namespace | Customer namespace | AWS account^*^ + +|OpenShift SRE| Read: All + +Write: Very + +limited ^[1]^ +| Read: All + +Write: None +| Read: None^[2]^ + +Write: None +|Read: All ^[3]^ + +Write: All ^[3]^ + +|CEE +|Read: All + +Write: None + +|Read: All + +Write: None + +|Read: None^[2]^ + +Write: None + +|Read: None + +Write: None + +|Customer administrator +|Read: None + +Write: None + +|Read: None + +Write: None + +| Read: All + +Write: All + +|Read: All + +Write: All + +|Customer user +|Read: None + +Write: None + +|Read: None + +Write: None + +|Read: Limited^[4]^ + +Write: Limited^[4]^ + +|Read: None + +Write: None + +|Everybody else +|Read: None + +Write: None +|Read: None + +Write: None +|Read: None + +Write: None +|Read: None + +Write: None + +|=== +-- +1. Limited to addressing common use cases such as failing deployments, upgrading a cluster, and replacing bad worker nodes. +2. Red Hat associates have no access to customer data by default. +3. SRE access to the AWS account is an emergency procedure for exceptional troubleshooting during a documented incident. +4. Limited to what is granted through RBAC by the Customer Administrator, as well as namespaces created by the user. +-- + +[id="rosa-policy-customer-access_{context}"] +== Customer access +Customer access is limited to namespaces created by the customer and permissions that are granted using RBAC by the Customer Administrator role. Access to the underlying infrastructure or product namespaces is generally not permitted without `cluster-admin` access. More information on customer access and authentication can be found in the "Understanding Authentication" section of the documentation. + +[id="rosa-policy-access-approval_{context}"] +== Access approval and review +New SRE user access requires management approval. Separated or transferred SRE accounts are removed as authorized users through an automated process. Additionally, the SRE performs periodic access review, including management sign-off of authorized user lists. diff --git a/modules/rosa-policy-incident.adoc b/modules/rosa-policy-incident.adoc new file mode 100644 index 0000000000..d00c31c9cf --- /dev/null +++ b/modules/rosa-policy-incident.adoc @@ -0,0 +1,90 @@ + +// Module included in the following assemblies: +// +// * assemblies/rosa-policy-process-security.adoc + +[id="rosa-policy-incident_{context}"] += Incident and operations management + + +This documentation details the Red Hat responsibilities for the {product-title} (ROSA) managed service. + +[id="rosa-policy-platform-monitoring_{context}"] +== Platform monitoring +Red Hat site reliability engineers (SREs) maintain a centralized monitoring and alerting system for all ROSA cluster components, the SRE services, and underlying AWS accounts. Platform audit logs are securely forwarded to a centralized security information and event monitoring (SIEM) system, where they may trigger configured alerts to the SRE team and are also subject to manual review. Audit logs are retained in the SIEM system for one year. Audit logs for a given cluster are not deleted at the time the cluster is deleted. + +[id="rosa-policy-incident-management_{context}"] +== Incident management +An incident is an event that results in a degradation or outage of one or more Red Hat services. An incident can be raised by a customer or a Customer Experience and Engagement (CEE) member through a support case, directly by the centralized monitoring and alerting system, or directly by a member of the SRE team. + +Depending on the impact on the service and customer, the incident is categorized in terms of link:https://access.redhat.com/support/offerings/production/sla[severity]. + +When managing a new incident, Red Hat uses the following general workflow: + +. An SRE first responder is alerted to a new incident and begins an initial investigation. +. After the initial investigation, the incident is assigned an incident lead, who coordinates the recovery efforts. +. An incident lead manages all communication and coordination around recovery, including any relevant notifications and support case updates. +. The incident is recovered. +. The incident is documented and a root cause analysis (RCA) is performed within 3 business days of the incident. +. An RCA draft document will be shared with the customer within 7 business days of the incident. + +[id="rosa-policy-notifications_{context}"] +== Notifications +Platform notifications are configured using email. Some customer notifications are also sent to an account's corresponding Red Hat account team, including a Technical Account Manager, if applicable. + +The following activities can trigger notifications: + +- Platform incident +- Performance degradation +- Cluster capacity warnings +- Critical vulnerabilities and resolution +- Upgrade scheduling + +[id="rosa-policy-backup-recovery_{context}"] +== Backup and recovery +All {product-title} clusters are backed up using AWS snapshots. Notably, this does not include customer data stored on persistent volumes (PVs). All snapshots are taken using the appropriate AWS snapshot APIs and are uploaded to a secure AWS S3 object storage bucket in the same account as the cluster. + +[cols= "3a,2a,2a,3a",options="header"] + +|=== +|Component +|Snapshot frequency +|Retention +|Notes + +.2+|Full object store backup, all SRE-managed cluster PVs +|Daily +|7 days +.2+|This is a full backup of all Kubernetes objects, such as etcd, and all SRE-managed PVs in the cluster. + +|Weekly +|30 days + + +|Full object store backup +|Hourly +|24-hour +|This is a full backup of all Kubernetes objects, such as etcd. No PVs are backed up in this backup schedule. + +|Node root volume +|Never +|N/A +|Nodes are considered to be short-term. Do not store anything critical on a node's root volume. + +|=== + +- Red Hat rehearses recovery processes periodically. +- Red Hat does not commit to any Recovery Point Objective (RPO) or Recovery Time Objective (RTO). +- Customers are responsible for taking regular backups of their data. +- Backups performed by the SRE are taken as a precautionary measure only. They are stored in the same region as the cluster. +- Customers can access the SRE backup data on request through a support case. +- Red Hat encourages customers to deploy multiple availability zone (multi-AZ) clusters with workloads that follow Kubernetes best practices to ensure high availability within a region. +- In the event an entire AWS region is unavailable, customers must install a new cluster in a different region and restore their apps using their backup data. + +[id="rosa-policy-cluster-capacity_{context}"] +== Cluster capacity +Evaluating and managing cluster capacity is a responsibility that is shared between Red Hat and the customer. Red Hat SRE is responsible for the capacity of all control plane and infrastructure nodes on the cluster. + +Red Hat SRE also evaluates cluster capacity during upgrades and in response to cluster alerts. The impact of a cluster upgrade on capacity is evaluated as part of the upgrade testing process to ensure that capacity is not negatively impacted by new additions to the cluster. During a cluster upgrade, additional worker nodes are added to make sure that total cluster capacity is maintained during the upgrade process. + +Capacity evaluations by the Red Hat SRE staff also happen in response to alerts from the cluster, after usage thresholds are exceeded for a certain period of time. Such alerts can also result in a notification to the customer. diff --git a/modules/rosa-policy-responsibilities.adoc b/modules/rosa-policy-responsibilities.adoc new file mode 100644 index 0000000000..adb0e3232c --- /dev/null +++ b/modules/rosa-policy-responsibilities.adoc @@ -0,0 +1,55 @@ + +// Module included in the following assemblies: +// +// * assemblies/rosa-policy-responsibility-matrix.adoc + +[id="rosa-policy-responsibilities_{context}"] += Overview of responsibilities for {product-title} + + +While Red Hat and Amazon Web Services (AWS) manage the {product-title} service, the customer shares certain responsibilities. The {product-title} services are accessed remotely, hosted on public cloud resources, created in customer-owned AWS accounts, and have underlying platform and data security that is owned by Red Hat. + +[IMPORTANT] +==== +If the `cluster-admin` role is added to a user, see the responsibilities and exclusion notes in the link:https://www.redhat.com/en/about/agreements[Red Hat Enterprise Agreement Appendix 4 (Online Subscription Services)]. +==== + +[cols="2a,3a,3a,3a,3a,3a",options="header"] +|=== + +|Resource +|Incident and operations management +|Change management +|Identity and access management +|Security and regulation compliance +|Disaster recovery + +|Customer data |Customer |Customer |Customer |Customer |Customer + +|Customer applications |Customer |Customer |Customer |Customer |Customer + +|Developer services |Customer |Customer |Customer |Customer |Customer + +|Platform monitoring |Red Hat |Red Hat |Red Hat |Red Hat |Red Hat + +|Logging |Red Hat |Shared |Shared |Shared |Red Hat + +|Application networking |Shared |Shared |Shared |Red Hat |Red Hat + +|Cluster networking |Red Hat |Shared |Shared |Red Hat |Red Hat + +|Virtual networking |Shared |Shared |Shared |Shared |Shared + +|Master and infrastructure nodes |Red Hat |Red Hat |Red Hat |Red Hat |Red Hat + +|Worker nodes |Red Hat |Red Hat |Red Hat |Red Hat |Red Hat + +|Cluster version |Red Hat |Shared |Red Hat |Red Hat |Red Hat + +|Capacity managment |Red Hat |Shared |Red Hat |Red Hat |Red Hat + +|Virtual storage |Red Hat and cloud provider |Red Hat and cloud provider |Red Hat and cloud provider |Red Hat and cloud provider |Red Hat and cloud provider + +|Physical infrastructure and security |Cloud provider |Cloud provider |Cloud provider |Cloud provider |Cloud provider + +|=== diff --git a/modules/rosa-policy-security-regulation-compliance.adoc b/modules/rosa-policy-security-regulation-compliance.adoc new file mode 100644 index 0000000000..529b5d09f3 --- /dev/null +++ b/modules/rosa-policy-security-regulation-compliance.adoc @@ -0,0 +1,53 @@ + +// Module included in the following assemblies: +// +// * assemblies/rosa-policy-process-security.adoc + +[id="rosa-policy-security-regulation-compliance_{context}"] += Security and regulation compliance + + +Security and regulation compliance includes tasks such as the implementation of security controls and compliance certification. + +[id="rosa-policy-data-classification_{context}"] +== Data classification +Red Hat defines and follows a data classification standard to determine the sensitivity of data and highlight inherent risk to the confidentiality and integrity of that data while it is collected, used, transmitted, stored, and processed. Customer-owned data is classified at the highest level of sensitivity and handling requirements. + +[id="rosa-policy-data-management_{context}"] +== Data management +{product-title} (ROSA) uses AWS KMS to help securely manage keys for encrypted data. These keys are used for control plane data volumes that are encrypted by default. Persistent volumes (PVs) for customer applications also use AWS KMS for key management. + +When a customer deletes their ROSA cluster, all cluster data is permanently deleted, including control plane data volumes, customer application data volumes, such as PVs, and backup data. + +[id="rosa-policy-vulnerability-management_{context}"] +== Vulnerability management +Red Hat performs periodic vulnerability scanning of ROSA using industry standard tools. Identified vulnerabilities are tracked to their remediation according to timelines based on severity. Vulnerability scanning and remediation activities are documented for verification by third-party assessors in the course of compliance certification audits. + +[id="rosa-policy-network-security_{context}"] +== Network security + +[id="rosa-policy-firewall-ddos-protection_{context}"] +=== Firewall and DDoS protection +Each ROSA cluster is protected by a secure network configuration using firewall rules for AWS Security Groups. ROSA customers are also protected against DDoS attacks with link:https://docs.aws.amazon.com/waf/latest/developerguide/ddos-overview.html[AWS Shield Standard]. + +[id="rosa-policy-private-clusters-network-connectivity_{context}"] +=== Private clusters and network connectivity +Customers can optionally configure their ROSA cluster endpoints, such as web console, API, and application router, to be made private so that the cluster control plane and applications are not accessible from the Internet. Red Hat SRE still requires Internet-accessible endpoints that are protected with IP allow-lists. + +AWS customers can configure a private network connection to their ROSA cluster through technologies such as AWS VPC peering, AWS VPN, or AWS Direct Connect. + +[id="rosa-policy-cluster-network-access_{context}"] +=== Cluster network access controls +Fine-grained network access control rules can be configured by customers, on a per-project basis, using `NetworkPolicy` objects and the OpenShift SDN. + +[id="rosa-policy-penetration-testing_{context}"] +== Penetration testing +Red Hat performs periodic penetration tests against ROSA. Tests are performed by an independent internal team by using industry standard tools and best practices. + +Any issues that may be discovered are prioritized based on severity. Any issues found belonging to open source projects are shared with the community for resolution. + +[id="rosa-policy-compliance_{context}"] +== Compliance +ROSA follows common industry best practices for security and controls. + +ROSA is certified for PCI-DSS, ISO 27001, and SOC 2 Type 2. diff --git a/modules/rosa-policy-shared-responsibility.adoc b/modules/rosa-policy-shared-responsibility.adoc new file mode 100644 index 0000000000..6c51b112f8 --- /dev/null +++ b/modules/rosa-policy-shared-responsibility.adoc @@ -0,0 +1,167 @@ + +// Module included in the following assemblies: +// +// * assemblies/rosa-policy-responsibility-matrix.adoc + +[id="rosa-policy-shared-responsibility_{context}"] += Shared responsibility matrix + + +The customer, Red Hat, and Amazon Web Services (AWS) share responsibility for the monitoring and maintenance of an {product-title} cluster. This documentation illustrates the delineation of responsibilities by area and task. + +[id="rosa-policy-incident-operations-management_{context}"] +== Incident and operations management +The customer is responsible for incident and operations management of customer application data and any custom networking the customer may have configured for the cluster network or virtual network. + +[cols= "2a,3a,3a",options="header"] +|=== + +|Resource +|Red Hat and AWS responsibilities +|Customer responsibilities + +|Application networking +|Monitor cloud load balancers and native OpenShift router service, and respond to alerts. +|- Monitor health of service load balancer endpoints. +- Monitor health of application routes, and the endpoints behind them. +- Report outages to Red Hat. + +|Virtual networking +|Monitor cloud load balancers, subnets, and public cloud components necessary for default platform networking, and respond to alerts. +|Monitor network traffic that is optionally configured through VPC to VPC connection, VPN connection, or Direct connection for potential issues or security threats. + +|=== + +[id="rosa-policy-change-management_{context}"] +== Change management +Red Hat is responsible for enabling changes to the cluster infrastructure and services that the customer will control, as well as maintaining versions for the master nodes, infrastructure nodes and services, and worker nodes. The customer is responsible for initiating infrastructure change requests and installing and maintaining optional services and networking configurations on the cluster, as well as all changes to customer data and customer applications. + +[cols="2a,3a,3a",options="header"] +|=== + +|Resource +|Red Hat responsibilities +|Customer responsibilities + +|Logging +|- Centrally aggregate and monitor platform audit logs. +- Provide and maintain a logging Operator to enable the customer to deploy a logging stack for default application logging. +- Provide audit logs upon customer request. +|- Install the optional default application logging Operator on the cluster. +- Install, configure, and maintain any optional app logging solutions, such as logging sidecar containers or third-party logging applications. +- Tune size and frequency of application logs being produced by customer applications if they are affecting the stability of the logging stack or the cluster. +- Request platform audit logs through a support case for researching specific incidents. + +|Application networking +|- Set up public cloud load balancers. Provide the ability to set up private load balancers and up to one additional load balancer when required. +- Set up native OpenShift router service. Provide the ability to set the router as private and add up to one additional router shard. +- Install, configure, and maintain OpenShift SDN components for default internal pod traffic. +- Provide the ability for the customer to manage `NetworkPolicy` and `EgressNetworkPolicy` (firewall) objects. +|- Configure non-default pod network permissions for project and pod networks, pod ingress, and pod egress using `NetworkPolicy` objects. +- Use OpenShift Cluster Manager (OCM) to request a private load balancer for default application routes. +- Use OCM to configure up to one additional public or private router shard and corresponding load balancer. +- Request and configure any additional service load balancers for specific services. +- Configure any necessary DNS forwarding rules. + +|Cluster networking +|- Set up cluster management components, such as public or private service endpoints and necessary integration with virtual networking components. +- Set up internal networking components required for internal cluster communication between worker, infrastructure, and master nodes. +|- Provide optional non-default IP address ranges for machine CIDR, service CIDR, and pod CIDR if needed through OCM when the cluster is provisioned. +- Request that the API service endpoint be made public or private on cluster creation or after cluster creation through OCM. + +|Virtual networking +|- Set up and configure virtual networking components required to provision the cluster, including virtual private cloud, subnets, load balancers, Internet gateways, NAT gateways, etc. +- Provide the ability for the customer to manage VPN connectivity with on-premises resources, VPC to VPC connectivity, and Direct connectivity as required through OCM. +- Enable customers to create and deploy public cloud load balancers for use with service load balancers. +|- Set up and maintain optional public cloud networking components, such as VPC to VPC connection, VPN connection, or Direct connection. +- Request and configure any additional service load balancers for specific services. + +|Cluster version +|- Communicate schedule and status of upgrades for minor and maintenance versions. +- Publish change logs and release notes for minor and maintenance upgrades. +|- Work with Red Hat to establish maintenance start times for upgrades. +- Test customer applications on minor and maintenance versions to ensure compatibility. + +|Capacity management +|- Monitor the use of control plane. Control planes are master nodes and infrastructure nodes. +- Scale and resize control plane nodes to maintain quality of service. +- Monitor the use of customer resources including network, storage and compute capacity. Where autoscaling features are not enabled alert customer for any changes required to cluster resources, such as new compute nodes to scale and additional storage. +|- Use the provided OCM controls to add or remove additional worker nodes as required. +- Respond to Red Hat notifications regarding cluster resource requirements. + +|=== + +[id="rosa-policy-identity-access-management_{context}"] +== Identity and access management +The Identity and Access Management matrix includes responsibilities for managing authorized access to clusters, applications, and infrastructure resources. This includes tasks such as providing access control mechanisms, authentication, authorization, and managing access to resources. + +[cols="2a,3a,3a",options="header"] +|=== +|Resource +|Red Hat responsibilities +|Customer responsibilities + +|Logging +|- Adhere to an industry standards-based tiered internal access process for platform audit logs. +- Provide native OpenShift RBAC capabilities. +|- Configure OpenShift RBAC to control access to projects and by extension a project’s application logs. +- For third-party or custom application logging solutions, the customer is responsible for access management. + +|Application networking +|Provide native OpenShift RBAC and `dedicated-admin` capabilities. +|- Configure OpenShift `dedicated-admin` and RBAC to control access to route configuration as required. +- Manage organization administrators for Red Hat to grant access to OCM. OCM is used to configure router options and provide service load balancer quota. + +|Cluster networking +|- Provide customer access controls through OCM. +- Provide native OpenShift RBAC and `dedicated-admin` capabilities. +|- Manage Red Hat organization membership of Red Hat accounts. +- Manage organization administrators for Red Hat to grant access to OCM. +- Configure OpenShift `dedicated-admin` and RBAC to control access to route configuration as required. + +|Virtual networking +|Provide customer access controls through OCM. +|Manage optional user access to public cloud components through OCM. + +|=== + +[id="rosa-policy-security-regulation-compliance_{context}"] +== Security and regulation compliance +The following are the responsibilities and controls related to compliance: + +[cols="2a,3a,3a",options="header"] +|=== + +|Resource +|Red Hat responsibilities +|Customer responsibilities + +|Logging +|Send cluster audit logs to a Red Hat SIEM to analyze for security events. Retain audit logs for a defined period of time to support forensic analysis. +|Analyze application logs for security events. Send application logs to an external endpoint through logging sidecar containers or third-party logging applications if longer retention is required than is offered by the default logging stack. + +|Virtual networking +|- Monitor virtual networking components for potential issues and security threats. +- Leverage additional public cloud provider tools for additional monitoring and protection. +|- Monitor optional configured virtual networking components for potential issues and security threats. +- Configure any necessary firewall rules or data center protections as required. + +|=== + +[id="rosa-policy-disaster-recovery_{context}"] +== Disaster recovery +Disaster recovery includes data and configuration backup, replicating data and configuration to the disaster recovery environment, and failover on disaster events. + + +[cols="2a,3a,3a" ,options="header"] +|=== +|Resource +|Red Hat responsibilities +|Customer responsibilities + +|Virtual networking +|Restore or recreate affected virtual network components that are necessary for the platform to function. +|- Configure virtual networking connections with more than one tunnel where possible for protection against outages as recommended by the public cloud provider. +- Maintain failover DNS and load balancing if using a global load balancer with multiple clusters. + +|=== diff --git a/modules/rosa-quickstart-instructions.adoc b/modules/rosa-quickstart-instructions.adoc new file mode 100644 index 0000000000..f98ceb537b --- /dev/null +++ b/modules/rosa-quickstart-instructions.adoc @@ -0,0 +1,29 @@ + +// Module included in the following assemblies: +// +// getting_started_rosa/rosa-quickstart.adoc + + +[id="rosa-quickstart-instructions"] += Command quick reference list + +If you have already created your first cluster and users, this list can serve as a command quick reference list when creating additional clusters and users. + +[source, terminal] +---- +## Configures your AWS account and ensures everything is setup correctly +$ rosa init + +## Starts the cluster creation process (~30-40minutes) +$ rosa create cluster --cluster-name= + +## Connect your IDP to your cluster +$ rosa create idp --cluster= --interactive + +## Promotes a user from your IDP to dedicated-admin level +$ rosa grant user dedicated-admin --user= --cluster= + +## Checks if your install is ready (look for State: Ready), +## and provides your Console URL to login to the web console. +$ rosa describe cluster --cluster= +---- diff --git a/modules/rosa-required-aws-service-quotas.adoc b/modules/rosa-required-aws-service-quotas.adoc new file mode 100644 index 0000000000..11609b8daa --- /dev/null +++ b/modules/rosa-required-aws-service-quotas.adoc @@ -0,0 +1,88 @@ + +// Module included in the following assemblies: +// +// getting_started_rosa/rosa-required-aws-service-quotas.adoc + + +[id="rosa-required-aws-service-quotas_{context}"] += Required AWS service quotas + +The table below describes the AWS service quotas and levels required to create and run an {product-title} cluster. + +[NOTE] +==== +The AWS SDK allows ROSA to check quotas, but the AWS SDK calculation does not include your existing usage. Therefore, it is possible that the quota check can pass in the AWS SDK yet the cluster creation can fail. To fix this issue, increase your quota. +==== + +If you need to modify or increase a specific quota, see Amazon's documentation on link:https://docs.aws.amazon.com/servicequotas/latest/userguide/request-quota-increase.html[requesting a quota increase]. + +[options="header"] +|=== +|Quota name |Service code |Quota code| Minimum required value | Recommended value + +|Number of EIPs - VPC EIPs +|ec2 +|L-0263D0A3 +|5 +|5 + +|Running On-Demand Standard (A, C, D, H, I, M, R, T, Z) instances +|ec2 +|L-1216C47A +|100 +|100 + +|VPCs per Region +|vpc +|L-F678F1CE +|5 +|5 + +|Internet gateways per Region +|vpc +|L-A4707A72 +|5 +|5 + +|Network interfaces per Region +|vpc +|L-DF5E4CA3 +|5,000 +|5,000 + +|General Purpose SSD (gp2) volume storage +|ebs +|L-D18FCD1D +|50 +|300 + +|Number of EBS snapshots +|ebs +|L-309BACF6 +|300 +|300 + +|Provisioned IOPS +|ebs +|L-B3A130E6 +|300,000 +|300,000 + +|Provisioned IOPS SSD (io1) volume storage +|ebs +|L-FD252861 +|50 +|300 + +|Application Load Balancers per Region +|elasticloadbalancing +|L-53DA6B97 +|50 +|50 + +|Classic Load Balancers per Region +|elasticloadbalancing +|L-E9E9831D +|20 +|20 +|=== diff --git a/modules/rosa-requirements-deploying-in-opt-in-regions.adoc b/modules/rosa-requirements-deploying-in-opt-in-regions.adoc new file mode 100644 index 0000000000..18fd583374 --- /dev/null +++ b/modules/rosa-requirements-deploying-in-opt-in-regions.adoc @@ -0,0 +1,16 @@ +// Module included in the following assemblies: +// +// * rosa_getting_started_sts/rosa-sts-aws-prereqs.adoc + +[id="rosa-requirements-deploying-in-opt-in-regions_{context}"] += Requirements for deploying a cluster in an opt-in region + +An AWS opt-in region is a region that is not enabled by default. If you want to deploy a {product-title} (ROSA) cluster that uses the AWS Security Token Service (STS) in an opt-in region, you must meet the following requirements: + +* The region must be enabled in your AWS account. For more information about enabling opt-in regions, see link:https://docs.aws.amazon.com/general/latest/gr/rande-manage.html[Managing AWS Regions] in the AWS documentation. +* The security token version in your AWS account must be set to version 2. You cannot use version 1 security tokens for opt-in regions. ++ +[IMPORTANT] +==== +Updating to security token version 2 can impact the systems that store the tokens, due to the increased token length. For more information, see link:https://docs.aws.amazon.com/cli/latest/reference/iam/set-security-token-service-preferences.html[the AWS documentation on setting STS preferences]. +==== diff --git a/modules/rosa-scaling-worker-nodes.adoc b/modules/rosa-scaling-worker-nodes.adoc new file mode 100644 index 0000000000..fa483dea4f --- /dev/null +++ b/modules/rosa-scaling-worker-nodes.adoc @@ -0,0 +1,49 @@ +// Module included in the following assemblies: +// +// * nodes/nodes/rosa-managing-worker-nodes.adoc + +[id="rosa-scaling-worker-nodes_{context}"] += Scaling worker nodes + + +Worker nodes can be scaled manually if you do not want to configure node autoscaling. + +.Procedure + +. To get a list of the machine pools in a cluster, enter the following command. Each cluster has a default machine pool that is created when you create a cluster. ++ +[source,terminal] +---- +$ rosa list machinepools --cluster= +---- ++ +.Example output ++ +[source,terminal] +---- +ID AUTOSCALING REPLICAS INSTANCE TYPE LABELS TAINTS AVAILABILITY ZONES +default No 2 m5.xlarge us-east-1a +mp1 No 2 m5.xlarge us-east-1a +---- + +. Review the output from the `rosa list machinepools` command to find the ID of the machine pool you want to scale and see the current number of replicas. + +. To change the scale, enter the following command and increase or decrease the number of replicas: ++ +[source,terminal] +---- +$ rosa edit machinepool --cluster= --replicas= +---- + +. To verify that the change has taken effect, enter the following command: ++ +[source,terminal] +---- +$ rosa describe cluster --cluster= +---- ++ +The response output shows the number of worker nodes, or replicas, as `Compute` nodes. + +. Optional: To view this change in the link:https://cloud.redhat.com/openshift[OCM console]: +.. Select the cluster. +.. From the *Overview* tab, in the `Details` pane, review the `Compute` node number. diff --git a/modules/rosa-sdpolicy-account-management.adoc b/modules/rosa-sdpolicy-account-management.adoc new file mode 100644 index 0000000000..03d7f65631 --- /dev/null +++ b/modules/rosa-sdpolicy-account-management.adoc @@ -0,0 +1,134 @@ + +// Module included in the following assemblies: +// +// * assemblies/rosa-service-definition.adoc + +[id="rosa-sdpolicy-account-management_{context}"] += Account management + + +This section provides information about the service definition for {product-title} account management. + +[id="rosa-sdpolicy-billing_{context}"] +== Billing + +{product-title} is billed through Amazon Web Services (AWS) based on the usage of AWS components used by the service, such as load balancers, storage, EC2 instances, other components, and Red Hat subscriptions for the OpenShift service. + +Any additional Red Hat software must be purchased separately. + +[id="rosa-sdpolicy-cluster-self-service_{context}"] +== Cluster self-service + +Customers can self-service their clusters, including, but not limited to: + +* Create a cluster +* Delete a cluster +* Add or remove an identity provider +* Add or remove a user from an elevated group +* Configure cluster privacy +* Add or remove machine pools and configure autoscaling +* Define upgrade policies + +These tasks can be self-serviced using the `rosa` CLI utility. + +[id="rosa-sdpolicy-compute_{context}"] +== Compute + +Single availability zone clusters require a minimum of 3 control planes, 2 infrastructure nodes, and 2 worker nodes deployed to a single availability zone. + +Multiple availability zone clusters require a minimum of 3 control planes. 3 infrastructure nodes, and 3 worker nodes. Additional nodes must be purchased in multiples of three to maintain proper node distribution. + +All {product-title} clusters support a maximum of 180 worker nodes. + +[NOTE] +==== +The `Default` machine pool node type and size cannot be changed after the cluster is created. +==== + +Control plane and infrastructure nodes are deployed and managed by Red Hat. Shutting down the underlying infrastructure through the cloud provider console is unsupported and can lead to data loss. There are at least 3 control plane nodes that handle etcd- and API-related workloads. There are at least 2 infrastructure nodes that handle metrics, routing, the web console, and other workloads. Control plane and infrastructure nodes are strictly for Red Hat workloads to operate the service, and customer workloads are not permitted to be deployed on these nodes. + +[NOTE] +==== +1 vCPU core and 1 GiB of memory are reserved on each worker node to run processes required as part of the managed service. This includes, but is not limited to, audit log aggregation, metrics collection, DNS, image registry, and SDN. +==== + +[id="rosa-sdpolicy-aws-compute-types_{context}"] +== AWS compute types + +{product-title} offers the following worker node types and sizes: + +General purpose + +- M5.xlarge (4 vCPU, 16 GiB) +- M5.2xlarge (8 vCPU, 32 GiB) +- M5.4xlarge (16 vCPU, 64 GiB) +- M5.8xlarge (32 vCPU, 128 GiB) +- M5.12xlarge (48 vCPU, 192 GiB) +- M5.16xlarge (64 vCPU, 256 GiB) +- M5.24xlarge (96 vCPU, 384 GiB) + +Memory-optimized + +- R5.xlarge (4 vCPU, 32 GiB) +- R5.2xlarge (8 vCPU, 64 GiB) +- R5.4xlarge (16 vCPU, 128 GiB) +- R5.8xlarge (32 vCPU, 256 GiB) +- R5.12xlarge (48 vCPU, 384 GiB) +- R5.16xlarge (64 vCPU, 512 GiB) +- R5.24xlarge (96 vCPU, 768 GiB) + +Compute-optimized + +- C5.2xlarge (8 vCPU, 16 GiB) +- C5.4xlarge (16 vCPU, 32 GiB) +- C5.9xlarge (36 vCPU, 72 GiB) +- C5.12xlarge (48 vCPU, 96 GiB) +- C5.18xlarge (72 vCPU, 144 GiB) +- C5.24xlarge (96 vCPU, 192 GiB) + + +[id="rosa-sdpolicy-regions-az_{context}"] +== Regions and availability zones +The following AWS regions are supported by Red Hat OpenShift 4 and are supported for {product-title}. Note: China and GovCloud (US) regions are not supported, regardless of their support on OpenShift 4. + +- af-south-1 (Cape Town, AWS opt-in required) +- ap-east-1 (Hong Kong, AWS opt-in required) +- ap-northeast-1 (Tokyo) +- ap-northeast-2 (Seoul) +- ap-south-1 (Mumbai) +- ap-southeast-1 (Singapore) +- ap-southeast-2 (Sydney) +- ca-central-1 (Central Canada) +- eu-central-1 (Frankfurt) +- eu-north-1 (Stockholm) +- eu-south-1 (Milan, AWS opt-in required) +- eu-west-1 (Ireland) +- eu-west-2 (London) +- eu-west-3 (Paris) +- me-south-1 (Bahrain, AWS opt-in required) +- sa-east-1 (São Paulo) +- us-east-1 (N. Virginia) +- us-east-2 (Ohio) +- us-west-1 (N. California) +- us-west-2 (Oregon) + +Multiple availability zone clusters can only be deployed in regions with at least 3 availability zones. For more information, see the link:https://aws.amazon.com/about-aws/global-infrastructure/regions_az/[Regions and Availability Zones] section in the AWS documentation. + +Each new {product-title} cluster is installed within an installer-created or preexisting Virtual Private Cloud (VPC) in a single region, with the option to deploy into a single availability zone (Single-AZ) or across multiple availability zones (Multi-AZ). This provides cluster-level network and resource isolation, and enables cloud-provider VPC settings, such as VPN connections and VPC Peering. Persistent volumes (PVs) are backed by AWS Elastic Block Storage (EBS), and are specific to the availability zone in which they are provisioned. Persistent volume claims (PVCs) do not bind to a volume until the associated pod resource is assigned into a specific availability zone to prevent unschedulable pods. Availability zone-specific resources are only usable by resources in the same availability zone. + +[WARNING] +==== +The region and the choice of single or multiple availability zone cannot be changed after a cluster has been deployed. +==== + +[id="rosa-sdpolicy-sla_{context}"] +== Service Level Agreement (SLA) +Any SLAs for the service itself are defined in Appendix 4 of the link:https://www.redhat.com/licenses/Appendix_4_Red_Hat_Online_Services_20210503.pdf[Red Hat Enterprise Agreement Appendix 4 (Online Subscription Services)]. + +[id="rosa-sdpolicy-support_{context}"] +== Support +{product-title} includes Red Hat Premium Support, which can be accessed by using the link:https://access.redhat.com/support?extIdCarryOver=true&sc_cid=701f2000001Css5AAC[Red Hat Customer Portal]. + +See {product-title} link:https://access.redhat.com/support/offerings/openshift/sla?extIdCarryOver=true&sc_cid=701f2000001Css5AAC[SLAs] for support response times. + +AWS support is subject to a customer's existing support contract with AWS. diff --git a/modules/rosa-sdpolicy-logging.adoc b/modules/rosa-sdpolicy-logging.adoc new file mode 100644 index 0000000000..12a1bde0d9 --- /dev/null +++ b/modules/rosa-sdpolicy-logging.adoc @@ -0,0 +1,18 @@ + +// Module included in the following assemblies: +// +// * assemblies/rosa-service-definition.adoc + +[id="rosa-sdpolicy-logging_{context}"] += Logging + + +{product-title} provides optional integrated log forwarding to Amazon (AWS) CloudWatch. + +[id="rosa-sdpolicy-cluster-audit-logging_{context}"] +== Cluster audit logging +Cluster audit logs are available through AWS CloudWatch, if the integration is enabled. If the integration is not enabled, you can request the audit logs by opening a support case. + +[id="rosa-sdpolicy-application-logging_{context}"] +== Application logging +Application logs sent to `STDOUT` are collected by Fluentd and forwarded to AWS CloudWatch through the cluster logging stack, if it is installed. diff --git a/modules/rosa-sdpolicy-monitoring.adoc b/modules/rosa-sdpolicy-monitoring.adoc new file mode 100644 index 0000000000..7565933efb --- /dev/null +++ b/modules/rosa-sdpolicy-monitoring.adoc @@ -0,0 +1,21 @@ + +// Module included in the following assemblies: +// +// * assemblies/rosa-service-definition.adoc + +[id="rosa-sdpolicy-monitoring_{context}"] += Monitoring + + +This section provides information about the service definition for {product-title} monitoring. + +[id="rosa-sdpolicy-cluster-metrics_{context}"] +== Cluster metrics + + +{product-title} clusters come with an integrated Prometheus stack for cluster monitoring including CPU, memory, and network-based metrics. This is accessible through the web console. These metrics also allow for horizontal pod autoscaling based on CPU or memory metrics provided by an {product-title} user. + +[id="rosa-sdpolicy-cluster-status-notifications_{context}"] +== Cluster status notification + +Red Hat communicates the health and status of {product-title} clusters through a combination of a cluster dashboard available in the OpenShift Cluster Manager (OCM), and email notifications sent to the email address of the contact that originally deployed the cluster, and any additional contacts specified by the customer. diff --git a/modules/rosa-sdpolicy-networking.adoc b/modules/rosa-sdpolicy-networking.adoc new file mode 100644 index 0000000000..f08ffbaa1e --- /dev/null +++ b/modules/rosa-sdpolicy-networking.adoc @@ -0,0 +1,73 @@ + +// Module included in the following assemblies: +// +// * assemblies/rosa-service-definition.adoc + +[id="rosa-sdpolicy-networking_{context}"] += Networking + + +This section provides information about the service definition for {product-title} networking. + +[id="rosa-sdpolicy-custom-domains_{context}"] +== Custom domains for applications +To use a custom hostname for a route, you must update your DNS provider by creating a canonical name (CNAME) record. Your CNAME record should map the OpenShift canonical router hostname to your custom domain. The OpenShift canonical router hostname is shown on the _Route Details_ page after a route is created. Alternatively, a wildcard CNAME record can be created once to route all subdomains for a given hostname to the cluster's router. + +[id="rosa-sdpolicy-validated-certificates_{context}"] +== Domain validated certificates +{product-title} includes TLS security certificates needed for both internal and external services on the cluster. For external routes, there are two separate TLS wildcard certificates that are provided and installed on each cluster: one is for the web console and route default hostnames, and the other is for the API endpoint. Let’s Encrypt is the certificate authority used for certificates. Routes within the cluster, such as the internal link:https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod[API endpoint], use TLS certificates signed by the cluster's built-in certificate authority and require the CA bundle available in every pod for trusting the TLS certificate. + +[id="rosa-sdpolicy-custom-certificates_{context}"] +== Custom certificate authorities for builds +{product-title} supports the use of custom certificate authorities to be trusted by builds when pulling images from an image registry. + +[id="rosa-sdpolicy-load-balancers_{context}"] +== Load Balancers +{product-title} uses up to five different load balancers: + +- An internal control plane load balancer that is internal to the cluster and used to balance traffic for internal cluster communications. +- An external control plane load balancer that is used for accessing the OpenShift and Kubernetes APIs. This load balancer can be disabled in OCM. If this load balancer is disabled, Red Hat reconfigures the API DNS to point to the internal control plane load balancer. +- An external control plane load balancer for Red Hat that is reserved for cluster management by Red Hat. Access is strictly controlled, and communication is only possible from whitelisted bastion hosts. +- A default external router/ingress load balancer that is the default application load balancer, denoted by `apps` in the URL. The default load balancer can be configured in OCM to be either publicly accessible over the Internet or only privately accessible over a pre-existing private connection. All application routes on the cluster are exposed on this default router load balancer, including cluster services such as the logging UI, metrics API, and registry. +- Optional: A secondary router/ingress load balancer that is a secondary application load balancer, denoted by `apps2` in the URL. The secondary load balancer can be configured in OCM to be either publicly accessible over the Internet or only privately accessible over a pre-existing private connection. If a `Label match` is configured for this router load balancer, then only application routes matching this label are exposed on this router load balancer; otherwise, all application routes are also exposed on this router load balancer. +- Optional: Load balancers for services. Enable non-HTTP/SNI traffic and non-standard ports for services. These load balancers can be mapped to a service running on {product-title} to enable advanced ingress features, such as non-HTTP/SNI traffic or the use of non-standard ports. Each AWS account has a quota which link:https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-limits.html[limits the number of Classic Load Balancers] that can be used within each cluster. + +[id="rosa-sdpolicy-cluster-ingress_{context}"] +== Cluster ingress +Project administrators can add route annotations for many different purposes, including ingress control through IP allow-listing. + +Ingress policies can also be changed by using `NetworkPolicy` objects, which leverage the `ovs-networkpolicy` plug-in. This allows for full control over the ingress network policy down to the pod level, including between pods on the same cluster and even in the same namespace. + +All cluster ingress traffic will go through the defined load balancers. Direct access to all nodes is blocked by cloud configuration. + +[id="rosa-sdpolicy-cluster-egress_{context}"] +== Cluster egress +Pod egress traffic control through `EgressNetworkPolicy` objects can be used to prevent or limit outbound traffic in {product-title}. + +Public outbound traffic from the control plane and infrastructure nodes is required and necessary to maintain cluster image security and cluster monitoring. This requires that the `0.0.0.0/0` route belongs only to the Internet gateway; it is not possible to route this range over private connections. + +OpenShift 4 clusters use NAT gateways to present a public, static IP for any public outbound traffic leaving the cluster. Each availability zone a cluster is deployed into receives a distinct NAT gateway, therefore up to 3 unique static IP addresses can exist for cluster egress traffic. Any traffic that remains inside the cluster, or that does not go out to the public Internet, will not pass through the NAT gateway and will have a source IP address belonging to the node that the traffic originated from. Node IP addresses are dynamic; therefore, a customer must not rely on whitelisting individual IP addresses when accessing private resources. + +Customers can determine their public static IP addresses by running a pod on the cluster and then querying an external service. For example: +[source,terminal] +---- +$ oc run ip-lookup --image=busybox -i -t --restart=Never --rm -- /bin/sh -c "/bin/nslookup -type=a myip.opendns.com resolver1.opendns.com | grep -E 'Address: [0-9.]+'" +---- + +[id="rosa-sdpolicy-cloud-network-config_{context}"] +== Cloud network configuration +{Product-title} allows for the configuration of a private network connection through AWS-managed technologies: + +- VPN connections +- VPC peering +- Transit Gateway +- Direct Connect + +[IMPORTANT] +==== +Red Hat site reliability engineers (SREs) do not monitor private network connections. Monitoring these connections is the responsibility of the customer. +==== + +[id="rosa-sdpolicy-dns-forwarding_{context}"] +== DNS forwarding +For {product-title} clusters that have a private cloud network configuration, a customer can specify internal DNS servers available on that private connection, that should be queried for explicitly provided domains. diff --git a/modules/rosa-sdpolicy-platform.adoc b/modules/rosa-sdpolicy-platform.adoc new file mode 100644 index 0000000000..614491cfaf --- /dev/null +++ b/modules/rosa-sdpolicy-platform.adoc @@ -0,0 +1,99 @@ + +// Module included in the following assemblies: +// +// * assemblies/rosa-service-definition.adoc + +[id="rosa-sdpolicy-platform_{context}"] += Platform + +This section provides information about the service definition for the {product-title} (ROSA) platform. + +[id="rosa-sdpolicy-backup-policy_{context}"] +== Cluster backup policy + +[IMPORTANT] +==== +It is critical that customers have a backup plan for their applications and application data. +==== + +Application and application data backups are not a part of the {product-title} service. +All Kubernetes objects and persistent volumes (PVs) in each {product-title} cluster are backed up to facilitate a prompt recovery in the unlikely event that a cluster becomes irreparably inoperable. + +The backups are stored in a secure object storage, or multiple availability zone, bucket in the same account as the cluster. +Node root volumes are not backed up, as Red Hat CoreOS is fully managed by the {product-title} cluster and no stateful data should be stored on a node's root volume. + +The following table shows the frequency of backups: +[cols="4",options="header"] +|=== + +|Component +|Snapshot frequency +|Retention +|Notes + +|Full object store backup, all cluster PVs +|Daily at 0100 UTC +|7 days +|This is a full backup of all Kubernetes objects, as well as all mounted PVs in the cluster. + +|Full object store backup, all cluster PVs +|Weekly on Mondays at 0200 UTC +|30 days +|This is a full backup of all Kubernetes objects, as well as all mounted PVs in the cluster. + +|Full object store backup +|Hourly at 17 minutes past the hour +|24 hours +|This is a full backup of all Kubernetes objects. No PVs are backed up in this backup schedule. + +|=== + +[id="rosa-sdpolicy-autoscaling_{context}"] +== Autoscaling +Node autoscaling is available on {product-title}. + +[id="rosa-sdpolicy-daemonsets_{context}"] +== Daemonsets +Customers can create and run daemonsets on {product-title}. To restrict daemonsets to only running on worker nodes, use the following `nodeSelector`: +[source,yaml] +---- +... +spec: + nodeSelector: + role: worker +... +---- + +[id="rosa-sdpolicy-multiple-availability-zone_{context}"] +== Multiple availability zone +In a multiple availability zone cluster, control plane nodes are distributed across availability zones and at least one worker node is required in each availability zone. + +[id="rosa-sdpolicy-node-labels_{context}"] +== Node labels +Custom node labels are created by Red Hat during node creation and cannot be changed on {product-title} clusters at this time. However, custom labels are supported when creating new machine pools. + +[id="rosa-sdpolicy-openshift-version_{context}"] +== OpenShift version +{product-title} is run as a service and is kept up to date with the latest OpenShift Container Platform version. Upgrade scheduling to the latest version is available. + +[id="rosa-sdpolicy-upgrades_{context}"] +== Upgrades +Upgrades can be scheduled using the `rosa` CLI utility or through OpenShift Cluster Manager (OCM). + +See the link:https://docs.openshift.com/rosa/rosa_policy/rosa-life-cycle.html[{product-title} Life Cycle] for more information on the upgrade policy and procedures. + +[id="rosa-sdpolicy-window-containers_{context}"] +== Windows Containers +Windows Containers are not available on {product-title} at this time. + +[id="rosa-sdpolicy-container-engine_{context}"] +== Container engine +{product-title} runs on OpenShift 4 and uses link:https://www.redhat.com/en/blog/red-hat-openshift-container-platform-4-now-defaults-cri-o-underlying-container-engine[CRI-O] as the only available container engine. + +[id="rosa-sdpolicy-operating-system_{context}"] +== Operating system +{product-title} runs on OpenShift 4 and uses Red Hat CoreOS as the operating system for all control plane and worker nodes. + +[id="rosa-sdpolicy-kubernetes-operator_{context}"] +== Kubernetes Operator support +All Operators listed in the Operator Hub marketplace should be available for installation. These operators are considered customer workloads, and are not monitored by Red Hat SRE. diff --git a/modules/rosa-sdpolicy-security.adoc b/modules/rosa-sdpolicy-security.adoc new file mode 100644 index 0000000000..824b21a7db --- /dev/null +++ b/modules/rosa-sdpolicy-security.adoc @@ -0,0 +1,60 @@ + +// Module included in the following assemblies: +// +// * assemblies/rosa-service-definition.adoc + +[id="rosa-sdpolicy-security_{context}"] += Security + + +This section provides information about the service definition for {product-title} security. + +[id="rosa-sdpolicy-auth-provider_{context}"] +== Authentication provider +Authentication for the cluster can be configured using either the link:https://cloud.redhat.com/openshift[OpenShift Cluster Manager (OCM)] or cluster creation process or using the `rosa` CLI. {product-title} is not an identity provider, and all access to the cluster must be managed by the customer as part of their integrated solution. The use of multiple identity providers provisioned at the same time is supported. The following identity providers are supported: + +- GitHub or GitHub Enterprise +- GitLab +- Google +- LDAP +- OpenID Connect + +[id="rosa-sdpolicy-privileged-containers_{context}"] +== Privileged containers +Privileged containers are available for users with the `cluster-admin` role. Usage of privileged containers as `cluster-admin` is subject to the responsibilities and exclusion notes in the link:https://www.redhat.com/en/about/agreements[Red Hat Enterprise Agreement Appendix 4] (Online Subscription Services). + +[id="rosa-sdpolicy-customer-admin-user_{context}"] +== Customer administrator user +In addition to normal users, {product-title} provides access to an {product-title}-specific group called `dedicated-admin`. Any users on the cluster that are members of the `dedicated-admin` group: + +- Have administrator access to all customer-created projects on the cluster. +- Can manage resource quotas and limits on the cluster. +- Can add and manage `NetworkPolicy` objects. +- Are able to view information about specific nodes and PVs in the cluster, including scheduler information. +- Can access the reserved `dedicated-admin` project on the cluster, which allows for the creation of service accounts with elevated privileges and also gives the ability to update default limits and quotas for projects on the cluster. + +[id="rosa-sdpolicy-cluster-admin-role_{context}"] +== Cluster administration role +The administrator of {product-title} has default access to the `cluster-admin` role for your organization's cluster. While logged into an account with the `cluster-admin` role, users have increased permissions to run privileged security contexts. + +[id="rosa-sdpolicy-project-self-service_{context}"] +== Project self-service +By default, all users have the ability to create, update, and delete their projects. This can be restricted if a member of the `dedicated-admin` group removes the `self-provisioner` role from authenticated users: +[source,terminal] +---- +$ oc adm policy remove-cluster-role-from-group self-provisioner system:authenticated:oauth +---- + +Restrictions can be reverted by applying: +[source,terminal] +---- +$ oc adm policy add-cluster-role-to-group self-provisioner system:authenticated:oauth +---- + +[id="rosa-sdpolicy-regulatory-compliance_{context}"] +== Regulatory compliance +See Understanding process and security for ROSA for the latest compliance information. + +[id="rosa-sdpolicy-network-security_{context}"] +== Network security +With {product-title}, AWS provides a standard DDoS protection on all load balancers, called AWS Shield. This provides 95% protection against most commonly used level 3 and 4 attacks on all the public facing load balancers used for {product-title}. A 10-second timeout is added for HTTP requests coming to the `haproxy` router to receive a response or the connection is closed to provide additional protection. diff --git a/modules/rosa-sdpolicy-storage.adoc b/modules/rosa-sdpolicy-storage.adoc new file mode 100644 index 0000000000..2a05c7aae0 --- /dev/null +++ b/modules/rosa-sdpolicy-storage.adoc @@ -0,0 +1,30 @@ + +// Module included in the following assemblies: +// +// * assemblies/rosa-service-definition.adoc + +[id="rosa-sdpolicy-storage_{context}"] += Storage + + +This section provides information about the service definition for {product-title} storage. + +[id="rosa-sdpolicy-encrytpted-at-rest-storage_{context}"] +== Encrypted-at-rest OS and node storage +Control plane nodes use encrypted-at-rest AWS Elastic Block Store (EBS) storage. + +[id="rosa-sdpolicy-encrytpted-at-rest-pv_{context}"] +== Encrypted-at-rest PV +EBS volumes that are used for PVs are encrypted-at-rest by default. + +[id="rosa-sdpolicy-block-storage_{context}"] +== Block storage (RWO) +Persistent volumes (PVs) are backed by AWS EBS, which is Read-Write-Once. + +PVs can be attached only to a single node at a time and are specific to the availability zone in which they were provisioned. However, PVs can be attached to any node in the availability zone. + +Each cloud provider has its own limits for how many PVs can be attached to a single node. See link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/volume_limits.html#instance-type-volume-limits[AWS instance type limits] for details. + +== Shared Storage (RWX) + +The AWS CSI Driver can be used to provide RWX support for {product-title}. A community Operator is provided to simplify setup. diff --git a/modules/rosa-setting-the-aws-security-token-version.adoc b/modules/rosa-setting-the-aws-security-token-version.adoc new file mode 100644 index 0000000000..96cb2ee9fd --- /dev/null +++ b/modules/rosa-setting-the-aws-security-token-version.adoc @@ -0,0 +1,49 @@ +// Module included in the following assemblies: +// +// * rosa_getting_started_sts/rosa-sts-aws-prereqs.adoc + +[id="rosa-setting-the-aws-security-token-version_{context}"] += Setting the AWS security token version + +If you want to create a {product-title} (ROSA) cluster with the AWS Security Token Service (STS) in an AWS opt-in region, you must set the security token version to version 2 in your AWS account. + +.Prerequisites + +* You have installed and configured the latest AWS CLI on your installation host. + +.Procedure + +. List the ID of the AWS account that is defined in your AWS CLI configuration: ++ +[source,terminal] +---- +$ aws sts get-caller-identity --query Account --output json +---- ++ +Ensure that the output matches the ID of the relevant AWS account. + +. List the security token version that is set in your AWS account: ++ +[source,terminal] +---- +$ aws iam get-account-summary --query SummaryMap.GlobalEndpointTokenVersion --output json +---- ++ +.Example output ++ +[source,terminal] +---- +1 +---- + +. To update the security token version to version 2 for all regions in your AWS account, run the following command: ++ +[source,terminal] +---- +$ aws iam set-security-token-service-preferences --global-endpoint-token-version v2Token +---- ++ +[IMPORTANT] +==== +Updating to security token version 2 can impact the systems that store the tokens, due to the increased token length. For more information, see link:https://docs.aws.amazon.com/cli/latest/reference/iam/set-security-token-service-preferences.html[the AWS documentation on setting STS preferences]. +==== diff --git a/modules/rosa-setting-up-cli.adoc b/modules/rosa-setting-up-cli.adoc new file mode 100644 index 0000000000..e5a744b72e --- /dev/null +++ b/modules/rosa-setting-up-cli.adoc @@ -0,0 +1,26 @@ + +// Module included in the following assemblies: +// +// * cli_reference/rosa_cli/rosa-get-started-cli.adoc + + +[id="rosa-setting-up-cli_{context}"] += Setting up the rosa CLI + + +To set up the `rosa` CLI, download the latest release, then configure and initialize `rosa`: + +.Procedure + +. Download the latest release of the `rosa` CLI for your operating system from the link:https://access.redhat.com/products/red-hat-openshift-service-aws/[{product-title}] product page. ++ +. It is recommended that after you download the release, you rename the executable file that you downloaded to `rosa`, and then add `rosa` to your path. ++ +. Optional: After downloading `rosa`, enable Bash completion for `rosa`. Bash completion helps to automatically complete commands and suggest options when you press `Tab`. The command generates a Bash completion file for `rosa` and sources it to your current shell session. ++ +To configure your Bash shell to load `rosa` completions for each session, add the following command to your `Bashrc` file (`~/.Bashrc` or `~/.profile`). ++ +[source,terminal] +---- +$ . <(rosa completion) +---- diff --git a/modules/rosa-sts-about-iam-resources.adoc b/modules/rosa-sts-about-iam-resources.adoc new file mode 100644 index 0000000000..950216580f --- /dev/null +++ b/modules/rosa-sts-about-iam-resources.adoc @@ -0,0 +1,14 @@ +// Module included in the following assemblies: +// +// * rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-about-iam-resources.adoc + +[id="rosa-sts-about-iam-resources_{context}"] += About IAM resources for clusters that use STS + +To deploy a {product-title} (ROSA) cluster that uses the AWS Security Token Service (STS), you must create the following AWS Identity Access Management (IAM) resources: + +* Specific account-wide IAM roles and policies that provide the STS permissions required for ROSA support, installation, control plane and compute functionality. This includes account-wide Operator policies. +* Cluster-specific Operator IAM roles that permit the ROSA cluster Operators to carry out core OpenShift functionality. +* An OpenID Connect (OIDC) provider that the cluster Operators use to authenticate. + +This document provides reference information about the IAM resources that you must deploy when you create a ROSA cluster that uses STS. It also includes the `aws` CLI commands that are generated when you use `manual` mode with the `rosa create` command. diff --git a/modules/rosa-sts-account-wide-role-and-policy-commands.adoc b/modules/rosa-sts-account-wide-role-and-policy-commands.adoc new file mode 100644 index 0000000000..0ecfaa0ab3 --- /dev/null +++ b/modules/rosa-sts-account-wide-role-and-policy-commands.adoc @@ -0,0 +1,92 @@ +// Module included in the following assemblies: +// +// * rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-about-iam-resources.adoc + +[id="rosa-sts-account-wide-role-and-policy-aws-cli_{context}"] += Account-wide IAM role and policy AWS CLI reference + +This section lists the `aws` CLI commands that are shown in the terminal when you run the following `rosa` command using `manual` mode: + +[source,terminal] +---- +$ rosa create account-roles --mode manual +---- + +[NOTE] +==== +When using `manual` mode, the `aws` commands are printed to the terminal for your review. After reviewing the `aws` commands, you must run them manually. Alternatively, you can specify `--mode auto` with the `rosa create` command to run the `aws` commands immediately. +==== + +.Command output +[source,terminal] +---- +aws iam create-role \ + --role-name ManagedOpenShift-Installer-Role \ + --assume-role-policy-document file://sts_installer_trust_policy.json \ + --tags Key=rosa_openshift_version,Value=4.8 Key=rosa_role_prefix,Value=ManagedOpenShift Key=rosa_role_type,Value=installer + +aws iam put-role-policy \ + --role-name ManagedOpenShift-Installer-Role \ + --policy-name ManagedOpenShift-Installer-Role-Policy \ + --policy-document file://sts_installer_permission_policy.json + +aws iam create-role \ + --role-name ManagedOpenShift-ControlPlane-Role \ + --assume-role-policy-document file://sts_instance_controlplane_trust_policy.json \ + --tags Key=rosa_openshift_version,Value=4.8 Key=rosa_role_prefix,Value=ManagedOpenShift Key=rosa_role_type,Value=instance_controlplane + +aws iam put-role-policy \ + --role-name ManagedOpenShift-ControlPlane-Role \ + --policy-name ManagedOpenShift-ControlPlane-Role-Policy \ + --policy-document file://sts_instance_controlplane_permission_policy.json + +aws iam create-role \ + --role-name ManagedOpenShift-Worker-Role \ + --assume-role-policy-document file://sts_instance_worker_trust_policy.json \ + --tags Key=rosa_openshift_version,Value=4.8 Key=rosa_role_prefix,Value=ManagedOpenShift Key=rosa_role_type,Value=instance_worker + +aws iam put-role-policy \ + --role-name ManagedOpenShift-Worker-Role \ + --policy-name ManagedOpenShift-Worker-Role-Policy \ + --policy-document file://sts_instance_worker_permission_policy.json + +aws iam create-role \ + --role-name ManagedOpenShift-Support-Role \ + --assume-role-policy-document file://sts_support_trust_policy.json \ + --tags Key=rosa_openshift_version,Value=4.8 Key=rosa_role_prefix,Value=ManagedOpenShift Key=rosa_role_type,Value=support + +aws iam put-role-policy \ + --role-name ManagedOpenShift-Support-Role \ + --policy-name ManagedOpenShift-Support-Role-Policy \ + --policy-document file://sts_support_permission_policy.json + +aws iam create-policy \ + --policy-name ManagedOpenShift-openshift-ingress-operator-cloud-credentials \ + --policy-document file://openshift_ingress_operator_cloud_credentials_policy.json \ + --tags Key=rosa_openshift_version,Value=4.8 Key=rosa_role_prefix,Value=ManagedOpenShift Key=operator_namespace,Value=openshift-ingress-operator Key=operator_name,Value=cloud-credentials + +aws iam create-policy \ + --policy-name ManagedOpenShift-openshift-cluster-csi-drivers-ebs-cloud-credent \ + --policy-document file://openshift_cluster_csi_drivers_ebs_cloud_credentials_policy.json \ + --tags Key=rosa_openshift_version,Value=4.8 Key=rosa_role_prefix,Value=ManagedOpenShift Key=operator_namespace,Value=openshift-cluster-csi-drivers Key=operator_name,Value=ebs-cloud-credentials + +aws iam create-policy \ + --policy-name ManagedOpenShift-openshift-machine-api-aws-cloud-credentials \ + --policy-document file://openshift_machine_api_aws_cloud_credentials_policy.json \ + --tags Key=rosa_openshift_version,Value=4.8 Key=rosa_role_prefix,Value=ManagedOpenShift Key=operator_namespace,Value=openshift-machine-api Key=operator_name,Value=aws-cloud-credentials + +aws iam create-policy \ + --policy-name ManagedOpenShift-openshift-cloud-credential-operator-cloud-crede \ + --policy-document file://openshift_cloud_credential_operator_cloud_credential_operator_iam_ro_creds_policy.json \ + --tags Key=rosa_openshift_version,Value=4.8 Key=rosa_role_prefix,Value=ManagedOpenShift Key=operator_namespace,Value=openshift-cloud-credential-operator Key=operator_name,Value=cloud-credential-operator-iam-ro-creds + +aws iam create-policy \ + --policy-name ManagedOpenShift-openshift-image-registry-installer-cloud-creden \ + --policy-document file://openshift_image_registry_installer_cloud_credentials_policy.json \ + --tags Key=rosa_openshift_version,Value=4.8 Key=rosa_role_prefix,Value=ManagedOpenShift Key=operator_namespace,Value=openshift-image-registry Key=operator_name,Value=installer-cloud-credentials +---- + +[NOTE] +==== +The command examples provided in the table include the `ManagedOpenShift` prefix. The prefix is implied if you do not specify a custom prefix by using the `--prefix` option. +==== diff --git a/modules/rosa-sts-account-wide-roles-and-policies.adoc b/modules/rosa-sts-account-wide-roles-and-policies.adoc new file mode 100644 index 0000000000..887c8148a7 --- /dev/null +++ b/modules/rosa-sts-account-wide-roles-and-policies.adoc @@ -0,0 +1,1422 @@ +// Module included in the following assemblies: +// +// * rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-about-iam-resources.adoc + +[id="rosa-sts-account-wide-roles-and-policies_{context}"] += Account-wide IAM role and policy reference + +This section provides details about the account-wide IAM roles and policies that are required for ROSA deployments that use STS, including the Operator policies. It also includes the JSON files that define the policies. + +The account-wide roles and policies are specific to an OpenShift minor release version, for example OpenShift 4.8, and are backward compatible. You can minimize the required STS resources by reusing the account-wide roles and policies for multiple clusters of the same minor version, regardless of their patch version. + +.ROSA installer role, policy, and policy files +[cols="1,2",options="header"] +|=== + +|Resource|Description + +|`ManagedOpenShift-Installer-Role` +|An IAM role used by the ROSA installer. + +|`ManagedOpenShift-Installer-Role-Policy` +|An inline IAM policy that provides the ROSA installer with the permissions required to complete cluster installation tasks. + +|=== + +.`sts_installer_trust_policy.json` for all versions +[%collapsible] +==== +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "AWS": [ + "arn:aws:iam::710019948333:role/RH-Managed-OpenShift-Installer" + ] + }, + "Action": [ + "sts:AssumeRole" + ] + } + ] +} +---- +==== + +.`sts_installer_permission_policy.json` for 4.7 +[%collapsible] +==== +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "ec2:AllocateAddress", + "ec2:AssociateAddress", + "ec2:AssociateDhcpOptions", + "ec2:AssociateRouteTable", + "ec2:AttachInternetGateway", + "ec2:AttachNetworkInterface", + "ec2:AuthorizeSecurityGroupEgress", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CopyImage", + "ec2:CreateDhcpOptions", + "ec2:CreateInternetGateway", + "ec2:CreateNatGateway", + "ec2:CreateNetworkInterface", + "ec2:CreateRoute", + "ec2:CreateRouteTable", + "ec2:CreateSecurityGroup", + "ec2:CreateSubnet", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:CreateVpc", + "ec2:CreateVpcEndpoint", + "ec2:DeleteDhcpOptions", + "ec2:DeleteInternetGateway", + "ec2:DeleteNatGateway", + "ec2:DeleteNetworkInterface", + "ec2:DeleteRoute", + "ec2:DeleteRouteTable", + "ec2:DeleteSecurityGroup", + "ec2:DeleteSnapshot", + "ec2:DeleteSubnet", + "ec2:DeleteTags", + "ec2:DeleteVolume", + "ec2:DeleteVpc", + "ec2:DeleteVpcEndpoints", + "ec2:DeregisterImage", + "ec2:DescribeAccountAttributes", + "ec2:DescribeAddresses", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeDhcpOptions", + "ec2:DescribeImages", + "ec2:DescribeInstanceAttribute", + "ec2:DescribeInstanceCreditSpecifications", + "ec2:DescribeInstances", + "ec2:DescribeInstanceStatus", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInternetGateways", + "ec2:DescribeKeyPairs", + "ec2:DescribeNatGateways", + "ec2:DescribeNetworkAcls", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribePrefixLists", + "ec2:DescribeRegions", + "ec2:DescribeReservedInstancesOfferings", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcClassicLink", + "ec2:DescribeVpcClassicLinkDnsSupport", + "ec2:DescribeVpcEndpoints", + "ec2:DescribeVpcs", + "ec2:DetachInternetGateway", + "ec2:DisassociateRouteTable", + "ec2:GetEbsDefaultKmsKeyId", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyNetworkInterfaceAttribute", + "ec2:ModifySubnetAttribute", + "ec2:ModifyVpcAttribute", + "ec2:ReleaseAddress", + "ec2:ReplaceRouteTableAssociation", + "ec2:RevokeSecurityGroupEgress", + "ec2:RevokeSecurityGroupIngress", + "ec2:RunInstances", + "ec2:StartInstances", + "ec2:StopInstances", + "ec2:TerminateInstances", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:DeregisterTargets", + "elasticloadbalancing:DescribeInstanceHealth", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeTags", + "elasticloadbalancing:DescribeTargetGroupAttributes", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:ModifyTargetGroupAttributes", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", + "iam:AddRoleToInstanceProfile", + "iam:CreateInstanceProfile", + "iam:DeleteInstanceProfile", + "iam:GetInstanceProfile", + "iam:GetRole", + "iam:GetRolePolicy", + "iam:GetUser", + "iam:ListAttachedRolePolicies", + "iam:ListInstanceProfiles", + "iam:ListInstanceProfilesForRole", + "iam:ListRolePolicies", + "iam:ListRoles", + "iam:ListUserPolicies", + "iam:ListUsers", + "iam:PassRole", + "iam:RemoveRoleFromInstanceProfile", + "iam:SimulatePrincipalPolicy", + "iam:TagRole", + "iam:UntagRole", + "route53:ChangeResourceRecordSets", + "route53:ChangeTagsForResource", + "route53:CreateHostedZone", + "route53:DeleteHostedZone", + "route53:GetChange", + "route53:GetHostedZone", + "route53:ListHostedZones", + "route53:ListHostedZonesByName", + "route53:ListResourceRecordSets", + "route53:ListTagsForResource", + "route53:UpdateHostedZoneComment", + "s3:CreateBucket", + "s3:DeleteBucket", + "s3:DeleteObject", + "s3:GetAccelerateConfiguration", + "s3:GetBucketAcl", + "s3:GetBucketCORS", + "s3:GetBucketLocation", + "s3:GetBucketLogging", + "s3:GetBucketObjectLockConfiguration", + "s3:GetBucketRequestPayment", + "s3:GetBucketTagging", + "s3:GetBucketVersioning", + "s3:GetBucketWebsite", + "s3:GetEncryptionConfiguration", + "s3:GetLifecycleConfiguration", + "s3:GetObject", + "s3:GetObjectAcl", + "s3:GetObjectTagging", + "s3:GetObjectVersion", + "s3:GetReplicationConfiguration", + "s3:ListBucket", + "s3:ListBucketVersions", + "s3:PutBucketAcl", + "s3:PutBucketTagging", + "s3:PutEncryptionConfiguration", + "s3:PutObject", + "s3:PutObjectAcl", + "s3:PutObjectTagging", + "sts:AssumeRole", + "sts:AssumeRoleWithWebIdentity", + "sts:GetCallerIdentity", + "tag:GetResources", + "tag:UntagResources", + "ec2:CreateVpcEndpointServiceConfiguration", + "ec2:DeleteVpcEndpointServiceConfigurations", + "ec2:DescribeVpcEndpointServiceConfigurations", + "ec2:DescribeVpcEndpointServicePermissions", + "ec2:DescribeVpcEndpointServices", + "ec2:ModifyVpcEndpointServicePermissions" + ], + "Resource": "*" + } + ] +} +---- +==== + +.`sts_installer_permission_policy.json` for 4.8 +[%collapsible] +==== +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "ec2:AllocateAddress", + "ec2:AssociateAddress", + "ec2:AssociateDhcpOptions", + "ec2:AssociateRouteTable", + "ec2:AttachInternetGateway", + "ec2:AttachNetworkInterface", + "ec2:AuthorizeSecurityGroupEgress", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CopyImage", + "ec2:CreateDhcpOptions", + "ec2:CreateInternetGateway", + "ec2:CreateNatGateway", + "ec2:CreateNetworkInterface", + "ec2:CreateRoute", + "ec2:CreateRouteTable", + "ec2:CreateSecurityGroup", + "ec2:CreateSubnet", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:CreateVpc", + "ec2:CreateVpcEndpoint", + "ec2:DeleteDhcpOptions", + "ec2:DeleteInternetGateway", + "ec2:DeleteNatGateway", + "ec2:DeleteNetworkInterface", + "ec2:DeleteRoute", + "ec2:DeleteRouteTable", + "ec2:DeleteSecurityGroup", + "ec2:DeleteSnapshot", + "ec2:DeleteSubnet", + "ec2:DeleteTags", + "ec2:DeleteVolume", + "ec2:DeleteVpc", + "ec2:DeleteVpcEndpoints", + "ec2:DeregisterImage", + "ec2:DescribeAccountAttributes", + "ec2:DescribeAddresses", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeDhcpOptions", + "ec2:DescribeImages", + "ec2:DescribeInstanceAttribute", + "ec2:DescribeInstanceCreditSpecifications", + "ec2:DescribeInstances", + "ec2:DescribeInstanceStatus", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInternetGateways", + "ec2:DescribeKeyPairs", + "ec2:DescribeNatGateways", + "ec2:DescribeNetworkAcls", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribePrefixLists", + "ec2:DescribeRegions", + "ec2:DescribeReservedInstancesOfferings", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcClassicLink", + "ec2:DescribeVpcClassicLinkDnsSupport", + "ec2:DescribeVpcEndpoints", + "ec2:DescribeVpcs", + "ec2:DetachInternetGateway", + "ec2:DisassociateRouteTable", + "ec2:GetEbsDefaultKmsKeyId", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyNetworkInterfaceAttribute", + "ec2:ModifySubnetAttribute", + "ec2:ModifyVpcAttribute", + "ec2:ReleaseAddress", + "ec2:ReplaceRouteTableAssociation", + "ec2:RevokeSecurityGroupEgress", + "ec2:RevokeSecurityGroupIngress", + "ec2:RunInstances", + "ec2:StartInstances", + "ec2:StopInstances", + "ec2:TerminateInstances", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:DeregisterTargets", + "elasticloadbalancing:DescribeInstanceHealth", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeTags", + "elasticloadbalancing:DescribeTargetGroupAttributes", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:ModifyTargetGroupAttributes", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", + "iam:AddRoleToInstanceProfile", + "iam:CreateInstanceProfile", + "iam:DeleteInstanceProfile", + "iam:GetInstanceProfile", + "iam:GetRole", + "iam:GetRolePolicy", + "iam:GetUser", + "iam:ListAttachedRolePolicies", + "iam:ListInstanceProfiles", + "iam:ListInstanceProfilesForRole", + "iam:ListRolePolicies", + "iam:ListRoles", + "iam:ListUserPolicies", + "iam:ListUsers", + "iam:PassRole", + "iam:RemoveRoleFromInstanceProfile", + "iam:SimulatePrincipalPolicy", + "iam:TagRole", + "iam:UntagRole", + "route53:ChangeResourceRecordSets", + "route53:ChangeTagsForResource", + "route53:CreateHostedZone", + "route53:DeleteHostedZone", + "route53:GetChange", + "route53:GetHostedZone", + "route53:ListHostedZones", + "route53:ListHostedZonesByName", + "route53:ListResourceRecordSets", + "route53:ListTagsForResource", + "route53:UpdateHostedZoneComment", + "s3:CreateBucket", + "s3:DeleteBucket", + "s3:DeleteObject", + "s3:GetAccelerateConfiguration", + "s3:GetBucketAcl", + "s3:GetBucketCORS", + "s3:GetBucketLocation", + "s3:GetBucketLogging", + "s3:GetBucketObjectLockConfiguration", + "s3:GetBucketRequestPayment", + "s3:GetBucketTagging", + "s3:GetBucketVersioning", + "s3:GetBucketWebsite", + "s3:GetEncryptionConfiguration", + "s3:GetLifecycleConfiguration", + "s3:GetObject", + "s3:GetObjectAcl", + "s3:GetObjectTagging", + "s3:GetObjectVersion", + "s3:GetReplicationConfiguration", + "s3:ListBucket", + "s3:ListBucketVersions", + "s3:PutBucketAcl", + "s3:PutBucketTagging", + "s3:PutEncryptionConfiguration", + "s3:PutObject", + "s3:PutObjectAcl", + "s3:PutObjectTagging", + "sts:AssumeRole", + "sts:AssumeRoleWithWebIdentity", + "sts:GetCallerIdentity", + "tag:GetResources", + "tag:UntagResources", + "ec2:CreateVpcEndpointServiceConfiguration", + "ec2:DeleteVpcEndpointServiceConfigurations", + "ec2:DescribeVpcEndpointServiceConfigurations", + "ec2:DescribeVpcEndpointServicePermissions", + "ec2:DescribeVpcEndpointServices", + "ec2:ModifyVpcEndpointServicePermissions" + ], + "Resource": "*" + } + ] +} +---- +==== + +.ROSA control plane role, policy, and policy files +[cols="1,2",options="header"] +|=== + +|Resource|Description + +|`ManagedOpenShift-ControlPlane-Role` +|An IAM role used by the ROSA control plane. + +|`ManagedOpenShift-ControlPlane-Role-Policy` +|An inline IAM policy that provides the ROSA control plane with the permissions required to manage its components. + +|=== + +.`sts_instance_controlplane_trust_policy.json` for all versions +[%collapsible] +==== +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": [ + "ec2.amazonaws.com" + ] + }, + "Action": [ + "sts:AssumeRole" + ] + } + ] +} +---- +==== + +.`sts_instance_controlplane_permission_policy.json` for 4.7 +[%collapsible] +==== +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:Describe*", + "ec2:DetachVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "ec2:RevokeSecurityGroupIngress", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateLoadBalancerPolicy", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancerListeners", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:DeregisterTargets", + "elasticloadbalancing:Describe*", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:ModifyTargetGroupAttributes", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", + "kms:DescribeKey" + ], + "Resource": "*" + } + ] +} +---- +==== + +.`sts_instance_controlplane_permission_policy.json` for 4.8 +[%collapsible] +==== +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:Describe*", + "ec2:DetachVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "ec2:RevokeSecurityGroupIngress", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateLoadBalancerPolicy", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancerListeners", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:DeregisterTargets", + "elasticloadbalancing:Describe*", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:ModifyTargetGroupAttributes", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", + "kms:DescribeKey" + ], + "Resource": "*" + } + ] +} +---- +==== + +.ROSA compute node role, policy, and policy files +[cols="1,2",options="header"] +|=== + +|Resource|Description + +|`ManagedOpenShift-Worker-Role` +|An IAM role used by the ROSA compute instances. + +|`ManagedOpenShift-Worker-Role-Policy` +|An inline IAM policy that provides the ROSA compute instances with the permissions required to manage their components. + +|=== + +.`sts_instance_worker_trust_policy.json` for all versions +[%collapsible] +==== +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": [ + "ec2.amazonaws.com" + ] + }, + "Action": [ + "sts:AssumeRole" + ] + } + ] +} +---- +==== + +.`sts_instance_worker_permission_policy.json` for 4.7 +[%collapsible] +==== +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeInstances", + "ec2:DescribeRegions" + ], + "Resource": "*" + } + ] +} +---- +==== + +.`sts_instance_worker_permission_policy.json` for 4.8 +[%collapsible] +==== +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeInstances", + "ec2:DescribeRegions" + ], + "Resource": "*" + } + ] +} +---- +==== + +.ROSA support role, policy, and policy files +[cols="1,2",options="header"] +|=== + +|Resource|Description + +|`ManagedOpenShift-Support-Role` +|An IAM role used by the Red Hat Site Reliability Engineering (SRE) support team. + +|`ManagedOpenShift-Support-Role-Policy` +|An inline IAM policy that provides the Red Hat SRE support team with the permissions required to support ROSA clusters. + +|=== + +.`sts_support_trust_policy.json` for all versions +[%collapsible] +==== +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "AWS": [ + "arn:aws:iam::710019948333:role/RH-Technical-Support-Access" + ] + }, + "Action": [ + "sts:AssumeRole" + ] + } + ] +} +---- +==== + +.`sts_support_permission_policy.json` for 4.7 +[%collapsible] +==== +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "cloudtrail:DescribeTrails", + "cloudtrail:LookupEvents", + "cloudwatch:GetMetricData", + "cloudwatch:GetMetricStatistics", + "cloudwatch:ListMetrics", + "ec2:CopySnapshot", + "ec2:CreateSnapshot", + "ec2:CreateSnapshots", + "ec2:DescribeAccountAttributes", + "ec2:DescribeAddresses", + "ec2:DescribeAddressesAttribute", + "ec2:DescribeAggregateIdFormat", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeByoipCidrs", + "ec2:DescribeCapacityReservations", + "ec2:DescribeCarrierGateways", + "ec2:DescribeClassicLinkInstances", + "ec2:DescribeClientVpnAuthorizationRules", + "ec2:DescribeClientVpnConnections", + "ec2:DescribeClientVpnEndpoints", + "ec2:DescribeClientVpnRoutes", + "ec2:DescribeClientVpnTargetNetworks", + "ec2:DescribeCoipPools", + "ec2:DescribeCustomerGateways", + "ec2:DescribeDhcpOptions", + "ec2:DescribeEgressOnlyInternetGateways", + "ec2:DescribeIamInstanceProfileAssociations", + "ec2:DescribeIdFormat", + "ec2:DescribeIdentityIdFormat", + "ec2:DescribeImageAttribute", + "ec2:DescribeImages", + "ec2:DescribeInstanceAttribute", + "ec2:DescribeInstanceStatus", + "ec2:DescribeInstanceTypeOfferings", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstances", + "ec2:DescribeInternetGateways", + "ec2:DescribeIpv6Pools", + "ec2:DescribeKeyPairs", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations", + "ec2:DescribeLocalGatewayRouteTableVpcAssociations", + "ec2:DescribeLocalGatewayRouteTables", + "ec2:DescribeLocalGatewayVirtualInterfaceGroups", + "ec2:DescribeLocalGatewayVirtualInterfaces", + "ec2:DescribeLocalGateways", + "ec2:DescribeNatGateways", + "ec2:DescribeNetworkAcls", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribePlacementGroups", + "ec2:DescribePrefixLists", + "ec2:DescribePrincipalIdFormat", + "ec2:DescribePublicIpv4Pools", + "ec2:DescribeRegions", + "ec2:DescribeReservedInstances", + "ec2:DescribeRouteTables", + "ec2:DescribeScheduledInstances", + "ec2:DescribeSecurityGroupReferences", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSnapshotAttribute", + "ec2:DescribeSnapshots", + "ec2:DescribeSpotFleetInstances", + "ec2:DescribeStaleSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeTags", + "ec2:DescribeTransitGatewayAttachments", + "ec2:DescribeTransitGatewayConnectPeers", + "ec2:DescribeTransitGatewayConnects", + "ec2:DescribeTransitGatewayMulticastDomains", + "ec2:DescribeTransitGatewayPeeringAttachments", + "ec2:DescribeTransitGatewayRouteTables", + "ec2:DescribeTransitGatewayVpcAttachments", + "ec2:DescribeTransitGateways", + "ec2:DescribeVolumeAttribute", + "ec2:DescribeVolumeStatus", + "ec2:DescribeVolumes", + "ec2:DescribeVolumesModifications", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcClassicLink", + "ec2:DescribeVpcClassicLinkDnsSupport", + "ec2:DescribeVpcEndpointConnectionNotifications", + "ec2:DescribeVpcEndpointConnections", + "ec2:DescribeVpcEndpointServiceConfigurations", + "ec2:DescribeVpcEndpointServicePermissions", + "ec2:DescribeVpcEndpointServices", + "ec2:DescribeVpcEndpoints", + "ec2:DescribeVpcPeeringConnections", + "ec2:DescribeVpcs", + "ec2:DescribeVpnConnections", + "ec2:DescribeVpnGateways", + "ec2:GetAssociatedIpv6PoolCidrs", + "ec2:GetTransitGatewayAttachmentPropagations", + "ec2:GetTransitGatewayMulticastDomainAssociations", + "ec2:GetTransitGatewayPrefixListReferences", + "ec2:GetTransitGatewayRouteTableAssociations", + "ec2:GetTransitGatewayRouteTablePropagations", + "ec2:RebootInstances", + "ec2:SearchLocalGatewayRoutes", + "ec2:SearchTransitGatewayMulticastGroups", + "ec2:SearchTransitGatewayRoutes", + "ec2:StartInstances", + "ec2:TerminateInstances", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:DescribeAccountLimits", + "elasticloadbalancing:DescribeInstanceHealth", + "elasticloadbalancing:DescribeListenerCertificates", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeLoadBalancerPolicyTypes", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeRules", + "elasticloadbalancing:DescribeSSLPolicies", + "elasticloadbalancing:DescribeTags", + "elasticloadbalancing:DescribeTags", + "elasticloadbalancing:DescribeTargetGroupAttributes", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "route53:GetHostedZone", + "route53:GetHostedZoneCount", + "route53:ListHostedZones", + "route53:ListHostedZonesByName", + "route53:ListResourceRecordSets", + "s3:GetBucketTagging", + "s3:GetObjectAcl", + "s3:GetObjectTagging", + "s3:ListAllMyBuckets" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": [ + "arn:aws:s3:::managed-velero*", + "arn:aws:s3:::*image-registry*" + ] + } + ] +} +---- +==== + +.`sts_support_permission_policy.json` for 4.8 +[%collapsible] +==== +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "cloudtrail:DescribeTrails", + "cloudtrail:LookupEvents", + "cloudwatch:GetMetricData", + "cloudwatch:GetMetricStatistics", + "cloudwatch:ListMetrics", + "ec2:CopySnapshot", + "ec2:CreateSnapshot", + "ec2:CreateSnapshots", + "ec2:DescribeAccountAttributes", + "ec2:DescribeAddresses", + "ec2:DescribeAddressesAttribute", + "ec2:DescribeAggregateIdFormat", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeByoipCidrs", + "ec2:DescribeCapacityReservations", + "ec2:DescribeCarrierGateways", + "ec2:DescribeClassicLinkInstances", + "ec2:DescribeClientVpnAuthorizationRules", + "ec2:DescribeClientVpnConnections", + "ec2:DescribeClientVpnEndpoints", + "ec2:DescribeClientVpnRoutes", + "ec2:DescribeClientVpnTargetNetworks", + "ec2:DescribeCoipPools", + "ec2:DescribeCustomerGateways", + "ec2:DescribeDhcpOptions", + "ec2:DescribeEgressOnlyInternetGateways", + "ec2:DescribeIamInstanceProfileAssociations", + "ec2:DescribeIdFormat", + "ec2:DescribeIdentityIdFormat", + "ec2:DescribeImageAttribute", + "ec2:DescribeImages", + "ec2:DescribeInstanceAttribute", + "ec2:DescribeInstanceStatus", + "ec2:DescribeInstanceTypeOfferings", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstances", + "ec2:DescribeInternetGateways", + "ec2:DescribeIpv6Pools", + "ec2:DescribeKeyPairs", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations", + "ec2:DescribeLocalGatewayRouteTableVpcAssociations", + "ec2:DescribeLocalGatewayRouteTables", + "ec2:DescribeLocalGatewayVirtualInterfaceGroups", + "ec2:DescribeLocalGatewayVirtualInterfaces", + "ec2:DescribeLocalGateways", + "ec2:DescribeNatGateways", + "ec2:DescribeNetworkAcls", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribePlacementGroups", + "ec2:DescribePrefixLists", + "ec2:DescribePrincipalIdFormat", + "ec2:DescribePublicIpv4Pools", + "ec2:DescribeRegions", + "ec2:DescribeReservedInstances", + "ec2:DescribeRouteTables", + "ec2:DescribeScheduledInstances", + "ec2:DescribeSecurityGroupReferences", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSnapshotAttribute", + "ec2:DescribeSnapshots", + "ec2:DescribeSpotFleetInstances", + "ec2:DescribeStaleSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeTags", + "ec2:DescribeTransitGatewayAttachments", + "ec2:DescribeTransitGatewayConnectPeers", + "ec2:DescribeTransitGatewayConnects", + "ec2:DescribeTransitGatewayMulticastDomains", + "ec2:DescribeTransitGatewayPeeringAttachments", + "ec2:DescribeTransitGatewayRouteTables", + "ec2:DescribeTransitGatewayVpcAttachments", + "ec2:DescribeTransitGateways", + "ec2:DescribeVolumeAttribute", + "ec2:DescribeVolumeStatus", + "ec2:DescribeVolumes", + "ec2:DescribeVolumesModifications", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcClassicLink", + "ec2:DescribeVpcClassicLinkDnsSupport", + "ec2:DescribeVpcEndpointConnectionNotifications", + "ec2:DescribeVpcEndpointConnections", + "ec2:DescribeVpcEndpointServiceConfigurations", + "ec2:DescribeVpcEndpointServicePermissions", + "ec2:DescribeVpcEndpointServices", + "ec2:DescribeVpcEndpoints", + "ec2:DescribeVpcPeeringConnections", + "ec2:DescribeVpcs", + "ec2:DescribeVpnConnections", + "ec2:DescribeVpnGateways", + "ec2:GetAssociatedIpv6PoolCidrs", + "ec2:GetTransitGatewayAttachmentPropagations", + "ec2:GetTransitGatewayMulticastDomainAssociations", + "ec2:GetTransitGatewayPrefixListReferences", + "ec2:GetTransitGatewayRouteTableAssociations", + "ec2:GetTransitGatewayRouteTablePropagations", + "ec2:RebootInstances", + "ec2:SearchLocalGatewayRoutes", + "ec2:SearchTransitGatewayMulticastGroups", + "ec2:SearchTransitGatewayRoutes", + "ec2:StartInstances", + "ec2:TerminateInstances", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:DescribeAccountLimits", + "elasticloadbalancing:DescribeInstanceHealth", + "elasticloadbalancing:DescribeListenerCertificates", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeLoadBalancerPolicyTypes", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeRules", + "elasticloadbalancing:DescribeSSLPolicies", + "elasticloadbalancing:DescribeTags", + "elasticloadbalancing:DescribeTags", + "elasticloadbalancing:DescribeTargetGroupAttributes", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "route53:GetHostedZone", + "route53:GetHostedZoneCount", + "route53:ListHostedZones", + "route53:ListHostedZonesByName", + "route53:ListResourceRecordSets", + "s3:GetBucketTagging", + "s3:GetObjectAcl", + "s3:GetObjectTagging", + "s3:ListAllMyBuckets" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": [ + "arn:aws:s3:::managed-velero*", + "arn:aws:s3:::*image-registry*" + ] + } + ] +} +---- +==== + +.ROSA Ingress Operator IAM policy and policy file +[cols="1,2",options="header"] +|=== + +|Resource|Description + +|`ManagedOpenShift-openshift-ingress-operator-cloud-credentials` +|A managed IAM policy that provides the ROSA Ingress Operator with the permissions required to manage external access to a cluster. + +|=== + +.`openshift_ingress_operator_cloud_credentials_policy.json` for 4.7 +[%collapsible] +==== +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:DescribeLoadBalancers", + "route53:ListHostedZones", + "route53:ChangeResourceRecordSets", + "tag:GetResources" + ], + "Resource": "*" + } + ] +} +---- +==== + +.`openshift_ingress_operator_cloud_credentials_policy.json` for 4.8 +[%collapsible] +==== +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:DescribeLoadBalancers", + "route53:ListHostedZones", + "route53:ChangeResourceRecordSets", + "tag:GetResources" + ], + "Resource": "*" + } + ] +} +---- +==== + +.ROSA back-end storage IAM policy and policy file +[cols="1,2",options="header"] +|=== + +|Resource|Description + +|`ManagedOpenShift-openshift-cluster-csi-drivers-ebs-cloud-credentials` +|A managed IAM policy required by ROSA to manage back-end storage through the Container Storage Interface (CSI). + +|=== + +.`openshift_cluster_csi_drivers_ebs_cloud_credentials_policy.json` for 4.7 +[%collapsible] +==== +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:CreateSnapshot", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DeleteSnapshot", + "ec2:DeleteTags", + "ec2:DeleteVolume", + "ec2:DescribeInstances", + "ec2:DescribeSnapshots", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DescribeVolumesModifications", + "ec2:DetachVolume", + "ec2:ModifyVolume" + ], + "Resource": "*" + } + ] +} +---- +==== + +.`openshift_cluster_csi_drivers_ebs_cloud_credentials_policy.json` for 4.8 +[%collapsible] +==== +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:CreateSnapshot", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DeleteSnapshot", + "ec2:DeleteTags", + "ec2:DeleteVolume", + "ec2:DescribeInstances", + "ec2:DescribeSnapshots", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DescribeVolumesModifications", + "ec2:DetachVolume", + "ec2:ModifyVolume" + ], + "Resource": "*" + } + ] +} +---- +==== + +.ROSA Machine Config Operator policy and policy file +[cols="1,2",options="header"] +|=== + +|Resource|Description + +|`ManagedOpenShift-openshift-machine-api-aws-cloud-credentials` +|A managed IAM policy that provides the ROSA Machine Config Operator with the permissions required to perform core cluster functionality. + +|=== + +.`openshift_machine_api_aws_cloud_credentials_policy.json` for 4.7 +[%collapsible] +==== +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:CreateTags", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeDhcpOptions", + "ec2:DescribeImages", + "ec2:DescribeInstances", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "ec2:RunInstances", + "ec2:TerminateInstances", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:RegisterTargets", + "iam:PassRole", + "iam:CreateServiceLinkedRole" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "kms:Decrypt", + "kms:Encrypt", + "kms:GenerateDataKey", + "kms:GenerateDataKeyWithoutPlainText", + "kms:DescribeKey" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "kms:RevokeGrant", + "kms:CreateGrant", + "kms:ListGrants" + ], + "Resource": "*", + "Condition": { + "Bool": { + "kms:GrantIsForAWSResource": true + } + } + } + ] +} +---- +==== + +.`openshift_machine_api_aws_cloud_credentials_policy.json` for 4.8 +[%collapsible] +==== +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:CreateTags", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeDhcpOptions", + "ec2:DescribeImages", + "ec2:DescribeInstances", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "ec2:RunInstances", + "ec2:TerminateInstances", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:DeregisterTargets", + "iam:PassRole", + "iam:CreateServiceLinkedRole" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "kms:Decrypt", + "kms:Encrypt", + "kms:GenerateDataKey", + "kms:GenerateDataKeyWithoutPlainText", + "kms:DescribeKey" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "kms:RevokeGrant", + "kms:CreateGrant", + "kms:ListGrants" + ], + "Resource": "*", + "Condition": { + "Bool": { + "kms:GrantIsForAWSResource": true + } + } + } + ] +} +---- +==== + +.ROSA Cloud Credential Operator policy and policy file +[cols="1,2",options="header"] +|=== + +|Resource|Description + +|`ManagedOpenShift-openshift-cloud-credential-operator-cloud-credentials` +|A managed IAM policy that provides the ROSA Cloud Credential Operator with the permissions required to manage cloud provider credentials. + +|=== + +.`openshift_cloud_credential_operator_cloud_credential_operator_iam_ro_creds_policy.json` for 4.7 +[%collapsible] +==== +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "iam:GetUser", + "iam:GetUserPolicy", + "iam:ListAccessKeys" + ], + "Resource": "*" + } + ] +} +---- +==== + +.`openshift_cloud_credential_operator_cloud_credential_operator_iam_ro_creds_policy.json` for 4.8 +[%collapsible] +==== +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "iam:GetUser", + "iam:GetUserPolicy", + "iam:ListAccessKeys" + ], + "Resource": "*" + } + ] +} +---- +==== + +.ROSA Image Registry Operator policy and policy file +[cols="1,2",options="header"] +|=== + +|Resource|Description + +|`ManagedOpenShift-openshift-image-registry-installer-cloud-credentials` +|A managed IAM policy that provides the ROSA Image Registry Operator with the permissions required to manage the internal registry storage in AWS S3 for a cluster. + +|=== + +.`openshift_image_registry_installer_cloud_credentials_policy.json` for 4.7 +[%collapsible] +==== +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:CreateBucket", + "s3:DeleteBucket", + "s3:PutBucketTagging", + "s3:GetBucketTagging", + "s3:PutBucketPublicAccessBlock", + "s3:GetBucketPublicAccessBlock", + "s3:PutEncryptionConfiguration", + "s3:GetEncryptionConfiguration", + "s3:PutLifecycleConfiguration", + "s3:GetLifecycleConfiguration", + "s3:GetBucketLocation", + "s3:ListBucket", + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject", + "s3:ListBucketMultipartUploads", + "s3:AbortMultipartUpload", + "s3:ListMultipartUploadParts" + ], + "Resource": "*" + } + ] +} +---- +==== + +.`openshift_image_registry_installer_cloud_credentials_policy.json` for 4.8 +[%collapsible] +==== +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:CreateBucket", + "s3:DeleteBucket", + "s3:PutBucketTagging", + "s3:GetBucketTagging", + "s3:PutBucketPublicAccessBlock", + "s3:GetBucketPublicAccessBlock", + "s3:PutEncryptionConfiguration", + "s3:GetEncryptionConfiguration", + "s3:PutLifecycleConfiguration", + "s3:GetLifecycleConfiguration", + "s3:GetBucketLocation", + "s3:ListBucket", + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject", + "s3:ListBucketMultipartUploads", + "s3:AbortMultipartUpload", + "s3:ListMultipartUploadParts" + ], + "Resource": "*" + } + ] +} +---- +==== diff --git a/modules/rosa-sts-aws-iam.adoc b/modules/rosa-sts-aws-iam.adoc new file mode 100644 index 0000000000..d167477092 --- /dev/null +++ b/modules/rosa-sts-aws-iam.adoc @@ -0,0 +1,4 @@ +[id="rosa-sts-policy-iam_{context}"] += Red Hat managed IAM references for AWS + +With the STS deployment model, Red Hat is no longer responsible for creating and managing Amazon Web Services (AWS) IAM policies, IAM users, or IAM roles. diff --git a/modules/rosa-sts-aws-requirements.adoc b/modules/rosa-sts-aws-requirements.adoc new file mode 100644 index 0000000000..e6c5fc1b3e --- /dev/null +++ b/modules/rosa-sts-aws-requirements.adoc @@ -0,0 +1,42 @@ +// Module included in the following assemblies: +// +// * rosa_getting_started_sts/rosa-sts-aws-prereqs.adoc + +[id="rosa-sts-customer-requirements_{context}"] += Customer requirements when using STS for deployment + +The following prerequisites must be complete before you deploy a {product-title} (ROSA) cluster that uses the AWS Security Token Service (STS). + +[id="rosa-account_{context}"] +== Account +* The customer ensures that the link:https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html[AWS limits] are sufficient to support {product-title} provisioned within the customer's AWS account. +* If SCP policies are applied and enforced, these policies must not be more restrictive than the roles and policies required by the cluster. +* The customer's AWS account should not be transferable to Red Hat. +* The customer should not impose additional AWS usage restrictions beyond the defined roles and policies on Red Hat activities. Imposing restrictions will severely hinder Red Hat's ability to respond to incidents. +* The customer may deploy native AWS services within the same AWS account. +* The account must have a service-linked role set up as it is required for elastic load balancers (ELBs) to be configured. ++ +[NOTE] +==== +Customers are encouraged, but not mandated, to deploy resources in a Virtual Private Cloud (VPC) separate from the VPC hosting {product-title} and other Red Hat supported services. +==== + +[id="rosa-access-requirements_{context}"] +== Access requirements + +* Red Hat must have AWS console access to the customer-provided AWS account. This access is protected and managed by Red Hat. +* The customer must not utilize the AWS account to elevate their permissions within the {product-title} cluster. +* Actions available in the `rosa` CLI utility or link:https://cloud.redhat.com/openshift[OpenShift Cluster Manager (OCM)] console must not be directly performed in the customer's AWS account. + +[id="rosa-support-requirements_{context}"] +== Support requirements +* Red Hat recommends that the customer have at least link:https://aws.amazon.com/premiumsupport/plans/[Business Support] from AWS. +* Red Hat may have permission from the customer to request AWS support on their behalf. +* Red Hat may have permission from the customer to request AWS resource limit increases on the customer's account. +* Red Hat manages the restrictions, limitations, expectations, and defaults for all {product-title} clusters in the same manner, unless otherwise specified in this requirements section. + +[id="rosa-security-requirements_{context}"] +== Security requirements +* Volume snapshots will remain within the customer's AWS account and customer-specified region. +* Red Hat must have ingress access to EC2 hosts and the API server from allow-listed IP addresses. +* Red Hat must have egress allowed to the documented domains. diff --git a/modules/rosa-sts-creating-a-cluster-quickly.adoc b/modules/rosa-sts-creating-a-cluster-quickly.adoc new file mode 100644 index 0000000000..5a133aac32 --- /dev/null +++ b/modules/rosa-sts-creating-a-cluster-quickly.adoc @@ -0,0 +1,98 @@ +// Module included in the following assemblies: +// +// * rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-creating-a-cluster-quickly.adoc + +[id="rosa-sts-creating-cluster-using-defaults{context}"] += Creating a ROSA cluster with STS using the default options + +Through the {product-title} CLI (`rosa`), you can quickly create an OpenShift cluster that uses the AWS Security Token Service (STS). + +Additionally, you can use `auto` mode to immediately create the required AWS Identity and Access Management (IAM) resources using the current AWS account. `auto` mode is used in the following procedure to immediately create the account-wide IAM roles and policies, including the Operator policies, as well as the OpenID Connect (OIDC) identity provider. + +[IMPORTANT] +==== +Only public and AWS PrivateLink clusters are supported with STS. Regular private clusters (non-PrivateLink) are not available for use with STS. +==== + +[NOTE] +==== +link:https://docs.aws.amazon.com/vpc/latest/userguide/vpc-sharing.html[AWS Shared VPCs] are not currently supported for ROSA installations. +==== + +.Prerequisites + +* You have completed the AWS prerequisites for ROSA with STS. +* You have available AWS service quotas. +* You have enabled the ROSA service in the AWS Console. +* You have installed and configured the latest AWS, ROSA, and `oc` CLIs on your installation host. + +.Procedure + +. Create the required account-wide roles and policies, including the Operator policies: ++ +[source,terminal] +---- +$ rosa create account-roles --mode auto +---- ++ +You can optionally specify an OpenShift minor release, for example `4.8`, by using the `--version` option. The latest stable version is assumed if the option is not included. The account-wide roles and policies are specific to an OpenShift minor release version and are backward compatible. ++ +[NOTE] +==== +When using `auto` mode, you can optionally specify the `-y` argument to bypass the interactive prompts and automatically confirm operations. +==== + +. Create a cluster with STS using the defaults. When you use the defaults, the latest stable OpenShift version is installed: ++ +[source,terminal] +---- +$ rosa create cluster --cluster-name --sts <1> +---- +<1> Replace `` with the name of your cluster. ++ +[IMPORTANT] +==== +You must complete the following steps to create the Operator IAM roles and the OpenID Connect (OIDC) provider to move the state of the cluster to `ready`. +==== + +. Create the cluster-specific Operator IAM roles: ++ +[source,terminal] +---- +$ rosa create operator-roles --mode auto --cluster +---- + +. Create the OIDC provider that the cluster Operators use to authenticate: ++ +[source,terminal] +---- +$ rosa create oidc-provider --mode auto --cluster +---- + +. Check the status of your cluster: ++ +[source,terminal] +---- +$ rosa describe cluster --cluster +---- ++ +The following `State` field changes are listed in the output as the cluster installation progresses: ++ +* `waiting (Waiting for OIDC configuration)` +* `pending (Preparing account)` +* `installing (DNS setup in progress)` +* `installing` +* `ready` ++ +[NOTE] +==== +If installation fails or the `State` field does not change to `ready` after about 40 minutes, check the installation troubleshooting documentation for more details. +==== + +. Track the progress of the cluster creation by watching the OpenShift installer logs: ++ +[source,terminal] +---- +$ rosa logs install --cluster --watch <1> +---- +<1> Specify the `--watch` flag to watch for new log messages as the installation progresses. This argument is optional. diff --git a/modules/rosa-sts-creating-a-cluster-with-customizations.adoc b/modules/rosa-sts-creating-a-cluster-with-customizations.adoc new file mode 100644 index 0000000000..3b152c95f9 --- /dev/null +++ b/modules/rosa-sts-creating-a-cluster-with-customizations.adoc @@ -0,0 +1,238 @@ +// Module included in the following assemblies: +// +// * rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-creating-a-cluster-with-customizations.adoc + +[id="rosa-sts-creating-cluster-customizations_{context}"] += Creating a cluster with STS using customizations + +When you create a {product-title} (ROSA) cluster that uses the AWS Security Token Service (STS), you can customize your installation interactively. When you run `rosa create cluster --interactive` at cluster creation time, you are presented with a series of interactive prompts that enable you to customize your deployment. For more information, see _Interactive cluster creation mode reference_. + +There are two `rosa` CLI modes for deploying a cluster with STS: + +* `manual` mode. With this mode, the `rosa` CLI generates the `aws` commands needed to create the required AWS Identity and Access Management (IAM) roles and policies, and an OpenID Connect (OIDC) provider. The corresponding policy JSON files are also saved to the current directory. This enables you to review the details before running the `aws` commands manually. +* `auto` mode. You can use this mode to immediately create the required AWS Identity and Access Management (IAM) resources using the current AWS account. + +[IMPORTANT] +==== +Only public and AWS PrivateLink clusters are supported with STS. Regular private clusters (non-PrivateLink) are not available for use with STS. +==== + +[NOTE] +==== +link:https://docs.aws.amazon.com/vpc/latest/userguide/vpc-sharing.html[AWS Shared VPCs] are not currently supported for ROSA installations. +==== + +.Prerequisites + +* You have completed the AWS prerequisites for ROSA with STS. +* You have available AWS service quotas. +* You have enabled the ROSA service in the AWS Console. +* You have installed and configured the latest AWS, ROSA, and `oc` CLIs on your installation host. +* If you are using a customer-managed AWS Key Management Service (KMS) key for encryption, you have created a symmetric KMS key and you have the key ID and Amazon Resource Name (ARN). For more information about creating AWS KMS keys, see link:https://docs.aws.amazon.com/kms/latest/developerguide/create-keys.html[the AWS documenation]. + +.Procedure + +. Create the required account-wide roles and policies, including the Operator policies: +.. Generate the IAM policy JSON files in the current working directory and output the `aws` CLI commands for review: ++ +[source,terminal] +---- +$ rosa create account-roles --mode manual <1> +---- +<1> `manual` mode generates the `aws` CLI commands and JSON files needed to create the account-wide roles and policies. After review, you must run the commands manually to create the resources. ++ +You can optionally specify an OpenShift minor release, for example `4.8`, by using the `--version` option. The latest stable version is assumed if the option is not included. The account-wide roles and policies are specific to an OpenShift minor release version and are backward compatible. ++ +.. After review, run the `aws` commands manually to create the roles and policies. Alternatively, you can run the preceding command using `--mode auto` to run the `aws` commands immediately. + +. (Optional) If you are using your own AWS KMS key to encrypt the control plane data volumes and the persistent volumes (PVs) for your applications, add the ARN for the account-wide installer role to your KMS key policy. +.. Save the key policy for your KMS key to a file on your local machine. The following example saves the output to `kms-key-policy.json` in the current working directory: ++ +[source,terminal] +---- +$ aws kms get-key-policy --key-id --policy-name default --output text > kms-key-policy.json <1> +---- +<1> Replace `` with the ID or ARN of your KMS key. ++ +.. Add the ARN for the account-wide installer role that you created in the preceding step to the `Statement.Principal.AWS` section in the file. In the following example, the ARN for the default `ManagedOpenShift-Installer-Role` role is added: ++ +[source,json] +---- +{ + "Version": "2012-10-17", + "Id": "key-default-1", + "Statement": [ + { + "Sid": "Enable IAM User Permissions", + "Effect": "Allow", + "Principal": { + "AWS": [ + "arn:aws:iam:::root", + "arn:aws:iam:::role/ManagedOpenShift-Installer-Role" <1> + ] + }, + "Action": "kms:*", + "Resource": "*" + } + ] +} +---- +<1> You must specify the ARN for the account-wide role that will be used when you create the ROSA cluster. The ARNs listed in the section must be comma-separated. + +.. Apply the changes to your KMS key policy: ++ +[source,terminal] +---- +$ aws kms put-key-policy --key-id \ <1> + --policy file://kms-key-policy.json \ <2> + --policy-name default +---- +<1> Replace `` with the ID or ARN of your KMS key. +<2> You must include the `file://` prefix when referencing a key policy in a local file. ++ +You can reference the ARN of your KMS key when you create the cluster in the next step. + +. Create a cluster with STS using custom installation options. You can use the `--interactive` mode to interactively specify custom settings: ++ +[source,terminal] +---- +$ rosa create cluster --interactive --sts +---- ++ +.Example output +[source,terminal] +---- +I: Interactive mode enabled. +Any optional fields can be left empty and a default will be selected. +? Cluster name: +? OpenShift version: 4.8.9 <1> +I: Using arn:aws:iam:::role/ManagedOpenShift-Installer-Role for the Installer role <2> +I: Using arn:aws:iam:::role/ManagedOpenShift-ControlPlane-Role for the ControlPlane role +I: Using arn:aws:iam:::role/ManagedOpenShift-Worker-Role for the Worker role +I: Using arn:aws:iam:::role/ManagedOpenShift-Support-Role for the Support role +? External ID (optional): +? Operator roles prefix: - +? Multiple availability zones (optional): No <3> +? AWS region: us-east-1 +? PrivateLink cluster (optional): No +? Install into an existing VPC (optional): No +? Enable Customer Managed key (optional): No <4> +? Compute nodes instance type (optional): +? Enable autoscaling (optional): No +? Compute nodes: 2 +? Machine CIDR: 10.0.0.0/16 +? Service CIDR: 172.30.0.0/16 +? Pod CIDR: 10.128.0.0/14 +? Host prefix: 23 +? Disable Workload monitoring (optional): No +I: Creating cluster '' +I: To create this cluster again in the future, you can run: + rosa create cluster --cluster-name --role-arn arn:aws:iam:::role/ManagedOpenShift-Installer-Role --support-role-arn arn:aws:iam:::role/ManagedOpenShift-Support-Role --master-iam-role arn:aws:iam:::role/ManagedOpenShift-ControlPlane-Role --worker-iam-role arn:aws:iam:::role/ManagedOpenShift-Worker-Role --operator-roles-prefix - --region us-east-1 --version 4.8.9 --compute-nodes 2 --machine-cidr 10.0.0.0/16 --service-cidr 172.30.0.0/16 --pod-cidr 10.128.0.0/14 --host-prefix 23 <5> +I: To view a list of clusters and their status, run 'rosa list clusters' +I: Cluster '' has been created. +I: Once the cluster is installed you will need to add an Identity Provider before you can login into the cluster. See 'rosa create idp --help' for more information. +I: To determine when your cluster is Ready, run 'rosa describe cluster -c '. +I: To watch your cluster installation logs, run 'rosa logs install -c --watch'. +---- +<1> When creating the cluster, the listed `OpenShift version` options include the major, minor, and patch versions, for example `4.8.9`. +<2> If more than one matching set of account-wide roles are available in your account for a cluster version, an interactive list of options is provided. +<3> Multiple availability zones are recommended for production workloads. The default is a single availability zone. +<4> Enable this option if you are using your own AWS KMS key to encrypt the control plane data volumes and the PVs for your applications. Specify the ARN for the KMS key that you added the account-wide role ARN to in the preceding step. +<5> The output includes a custom command that you can run to create a cluster with the same configuration in the future. ++ +[NOTE] +==== +As an alternative to using the `--interactive` mode, you can specify the customization options directly when you run `rosa create cluster`. Run `rosa create cluster --help` to view a list of available CLI options. +==== ++ +[IMPORTANT] +==== +You must complete the following steps to create the Operator IAM roles and the OpenID Connect (OIDC) provider to move the state of the cluster to `ready`. +==== + +. Create the cluster-specific Operator IAM roles: +.. Generate the Operator IAM policy JSON files in the current working directory and output the `aws` CLI commands for review: ++ +[source,terminal] +---- +$ rosa create operator-roles --mode manual --cluster <1> +---- +<1> `manual` mode generates the `aws` CLI commands and JSON files needed to create the Operator roles. After review, you must run the commands manually to create the resources. +.. After review, run the `aws` commands manually to create the Operator IAM roles and attach the managed Operator policies to them. Alternatively, you can run the preceding command again using `--mode auto` to run the `aws` commands immediately. + +. Create the OpenID Connect (OIDC) provider that the cluster Operators use to authenticate: ++ +[source,terminal] +---- +$ rosa create oidc-provider --mode auto --cluster <1> +---- +<1> `auto` mode immediately runs the `aws` CLI command that creates the OIDC provider. + +. Check the status of your cluster: ++ +[source,terminal] +---- +$ rosa describe cluster --cluster +---- ++ +.Example output +[source,terminal] +---- +Name: +ID: +External ID: +OpenShift Version: +Channel Group: stable +DNS: .xxxx.p1.openshiftapps.com +AWS Account: +API URL: https://api..xxxx.p1.openshiftapps.com:6443 +Console URL: https://console-openshift-console.apps..xxxx.p1.openshiftapps.com +Region: +Multi-AZ: false +Nodes: + - Master: 3 + - Infra: 2 + - Compute: 2 +Network: + - Service CIDR: 172.30.0.0/16 + - Machine CIDR: 10.0.0.0/16 + - Pod CIDR: 10.128.0.0/14 + - Host Prefix: /23 +STS Role ARN: arn:aws:iam:::role/ManagedOpenShift-Installer-Role +Support Role ARN: arn:aws:iam:::role/ManagedOpenShift-Support-Role +Instance IAM Roles: + - Master: arn:aws:iam:::role/ManagedOpenShift-ControlPlane-Role + - Worker: arn:aws:iam:::role/ManagedOpenShift-Worker-Role +Operator IAM Roles: + - arn:aws:iam:::role/-xxxx-openshift-ingress-operator-cloud-credentials + - arn:aws:iam:::role/:role/:role/:role/ +OIDC Endpoint URL: https://rh-oidc.s3..amazonaws.com/ +---- ++ +The following `State` field changes are listed in the output as the cluster installation progresses: ++ +* `waiting (Waiting for OIDC configuration)` +* `pending (Preparing account)` +* `installing (DNS setup in progress)` +* `installing` +* `ready` ++ +[NOTE] +==== +If installation fails or the `State` field does not change to `ready` after about 40 minutes, check the installation troubleshooting documentation for more details. +==== + +. Track the progress of the cluster creation by watching the OpenShift installer logs: ++ +[source,terminal] +---- +$ rosa logs install --cluster --watch <1> +---- +<1> Specify the `--watch` flag to watch for new log messages as the installation progresses. This argument is optional. diff --git a/modules/rosa-sts-interactive-mode-reference.adoc b/modules/rosa-sts-interactive-mode-reference.adoc new file mode 100644 index 0000000000..11dfb066a6 --- /dev/null +++ b/modules/rosa-sts-interactive-mode-reference.adoc @@ -0,0 +1,70 @@ +// Module included in the following assemblies: +// +// * rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-interactive-mode-reference.adoc + +[id="rosa-sts-understanding-interactive-mode-options_{context}"] += Understanding the interactive cluster creation mode options + +You can create a {product-title} cluster with the AWS Security Token Service (STS) by using the interactive mode. You can enable the mode by specifying the `--interactive` option when you run `rosa create cluster`. The following table describes the interactive mode options. + +.`--interactive` mode options +[cols="1,2",options="header"] +|=== + +|Field|Description + +|`Cluster name` +|Enter a name for your cluster, for example `my-rosa-cluster`. + +|`Deploy cluster using AWS STS` +|Create an OpenShift cluster that uses the AWS Security Token Service (STS) to allocate temporary, limited-privilege credentials for component-specific AWS Identity and Access Management (IAM) roles. The service enables cluster components to make AWS API calls using secure cloud resource management practices. + +|`OpenShift version` +|Select the version of OpenShift to install, for example `4.3.12`. The default is the latest version. + +|`External ID (optional)` +|Specify an unique identifier that is passed by OpenShift Cluster Manager and the OpenShift installer when an account role is assumed. This option is only required for custom account roles that expect an external ID. + +|`Operator roles prefix` +|Enter a prefix to assign to the cluster-specific Operator IAM roles. The default is the name of the cluster and a 4-digit random string, for example `my-rosa-cluster-a0b1`. + +|`Multiple availability zones` +|Deploy the cluster to multiple availability zones in the AWS region. The default is `No`, which results in a cluster being deployed to a single availability zone. If you deploy a cluster into multiple availability zones, the AWS region must have at least 3 availability zones. Multiple availability zones are recommended for production workloads. + +|`AWS region` +|Specify the AWS region to deploy the cluster in. This overrides the `AWS_REGION` environment variable. + +|`PrivateLink cluster` +|Create a cluster using AWS PrivateLink. This option provides private connectivity between Virtual Private Clouds (VPCs), AWS services, and your on-premise networks, without exposing your traffic to the public internet. To provide support, Red Hat Site Reliability Engineering (SRE) can connect to the cluster by using AWS PrivateLink Virtual Private Cloud (VPC) endpoints. This option cannot be changed after a cluster is created. The default is `No`. + +|`Install into an existing VPC` +|Install a cluster into an existing AWS VPC. To use this option, your VPC must have 2 subnets for each availability zone that you are installing the cluster into. The default is `No`. + +|`Enable customer managed key` +|Enable this option to use the AWS Key Management Service (KMS) to help securely manage keys for encrypted data. The keys are used for control plane data volumes that are encrypted by default. Persistent volumes (PVs) for customer applications also use AWS KMS for key management. When enabled, the account KMS key for the region is used by default. The default is `No`. + +|`Compute nodes instance type` +|Select a compute node instance type. The default is `m5.xlarge`. + +|`Enable autoscaling` +|Enable compute node autoscaling. The autoscaler adjusts the size of the cluster to meet your deployment demands. The default is `No`. + +|`Compute nodes` +|Specify the number of compute nodes to provision into each availability zone. Clusters deployed in a single availability zone require at least 2 nodes. Clusters deployed in multiple zones must have at least 3 nodes. The default is `2`. + +|`Machine CIDR` +|Specify the machine IP address range. A minimum IP address range of 128 addresses, using the subnet prefix `/25`, is supported for single availability zone deployments. A minimum address range of 256 addresses, using the subnet prefix `/24`, is supported for deployments that use multiple availability zones. The default is `10.0.0.0/16`. + +|`Service CIDR` +|Specify the IP address range for services. The OpenShift SDN allows only one address block for services. The address block must not overlap with any other address block. The default is `172.30.0.0/16`. + +|`Pod CIDR` +|Specify the pod IP address range. The OpenShift SDN network plug-in supports multiple cluster networks. The address blocks for multiple cluster networks must not overlap. The ranges must be large enough to accommodate your workload. The default is `10.128.0.0/14`. + +|`Host prefix` +|Specify the subnet prefix length to assign to each individual node. For example, if the host prefix is set to `23`, each node is assigned a `/23` subnet in the CIDR address range. The default is `23`. + +|`Disable workload monitoring` +|Disable monitoring for user-defined projects. Monitoring for user-defined projects is enabled by default. + +|=== diff --git a/modules/rosa-sts-oidc-provider-command.adoc b/modules/rosa-sts-oidc-provider-command.adoc new file mode 100644 index 0000000000..09a1fe56bf --- /dev/null +++ b/modules/rosa-sts-oidc-provider-command.adoc @@ -0,0 +1,28 @@ +// Module included in the following assemblies: +// +// * rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-about-iam-resources.adoc + +[id="rosa-sts-oidc-provider-for-operators-aws-cli_{context}"] += OIDC provider AWS CLI reference + +This section lists the `aws` CLI command that is shown in the terminal when you run the following `rosa` command using `manual` mode: + +[source,terminal] +---- +$ rosa create oidc-provider --mode manual --cluster +---- + +[NOTE] +==== +When using `manual` mode, the `aws` command is printed to the terminal for your review. After reviewing the `aws` command, you must run it manually. Alternatively, you can specify `--mode auto` with the `rosa create` command to run the `aws` command immediately. +==== + +.Command output +[source,terminal] +---- +aws iam create-open-id-connect-provider \ + --url https://rh-oidc.s3..amazonaws.com/ \ + --client-id-list openshift sts.amazonaws.com \ + --thumbprint-list <1> +---- +<1> The thumbprint is generated automatically when you run the `rosa create oidc-provider` command. For more information about using thumbprints with AWS Identity and Access Management (IAM) OpenID Connect (OIDC) identity providers, see link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_oidc_verify-thumbprint.html[the AWS documentation]. diff --git a/modules/rosa-sts-oidc-provider.adoc b/modules/rosa-sts-oidc-provider.adoc new file mode 100644 index 0000000000..97f56f3fec --- /dev/null +++ b/modules/rosa-sts-oidc-provider.adoc @@ -0,0 +1,8 @@ +// Module included in the following assemblies: +// +// * rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-about-iam-resources.adoc + +[id="rosa-sts-oidc-provider-requirements-for-operators_{context}"] += OIDC provider requirements for Operator authentication + +For ROSA installations that use STS, you must create a cluster-specific OIDC provider that is used by the cluster Operators to authenticate. diff --git a/modules/rosa-sts-operator-role-commands.adoc b/modules/rosa-sts-operator-role-commands.adoc new file mode 100644 index 0000000000..3900759734 --- /dev/null +++ b/modules/rosa-sts-operator-role-commands.adoc @@ -0,0 +1,72 @@ +// Module included in the following assemblies: +// +// * rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-about-iam-resources.adoc + +[id="rosa-sts-operator-role-aws-cli_{context}"] += Operator IAM role AWS CLI reference + +This section lists the `aws` CLI commands that are shown in the terminal when you run the following `rosa` command using `manual` mode: + +[source,terminal] +---- +$ rosa create operator-roles --mode manual --cluster +---- + +[NOTE] +==== +When using `manual` mode, the `aws` commands are printed to the terminal for your review. After reviewing the `aws` commands, you must run them manually. Alternatively, you can specify `--mode auto` with the `rosa create` command to run the `aws` commands immediately. +==== + +.Command output +[source,terminal] +---- +aws iam create-role \ + --role-name -xxxx-openshift-cluster-csi-drivers-ebs-cloud-credent \ + --assume-role-policy-document file://operator_cluster_csi_drivers_ebs_cloud_credentials_policy.json \ + --tags Key=rosa_cluster_id,Value= Key=rosa_openshift_version,Value=4.8 Key=rosa_role_prefix,Value= Key=operator_namespace,Value=openshift-cluster-csi-drivers Key=operator_name,Value=ebs-cloud-credentials + +aws iam attach-role-policy \ + --role-name -xxxx-openshift-cluster-csi-drivers-ebs-cloud-credent \ + --policy-arn arn:aws:iam:::policy/ManagedOpenShift-openshift-cluster-csi-drivers-ebs-cloud-credent + +aws iam create-role \ + --role-name -xxxx-openshift-machine-api-aws-cloud-credentials \ + --assume-role-policy-document file://operator_machine_api_aws_cloud_credentials_policy.json \ + --tags Key=rosa_cluster_id,Value= Key=rosa_openshift_version,Value=4.8 Key=rosa_role_prefix,Value= Key=operator_namespace,Value=openshift-machine-api Key=operator_name,Value=aws-cloud-credentials + +aws iam attach-role-policy \ + --role-name -xxxx-openshift-machine-api-aws-cloud-credentials \ + --policy-arn arn:aws:iam:::policy/ManagedOpenShift-openshift-machine-api-aws-cloud-credentials + +aws iam create-role \ + --role-name -xxxx-openshift-cloud-credential-operator-cloud-crede \ + --assume-role-policy-document file://operator_cloud_credential_operator_cloud_credential_operator_iam_ro_creds_policy.json \ + --tags Key=rosa_cluster_id,Value= Key=rosa_openshift_version,Value=4.8 Key=rosa_role_prefix,Value= Key=operator_namespace,Value=openshift-cloud-credential-operator Key=operator_name,Value=cloud-credential-operator-iam-ro-creds + +aws iam attach-role-policy \ + --role-name -xxxx-openshift-cloud-credential-operator-cloud-crede \ + --policy-arn arn:aws:iam:::policy/ManagedOpenShift-openshift-cloud-credential-operator-cloud-crede + +aws iam create-role \ + --role-name -xxxx-openshift-image-registry-installer-cloud-creden \ + --assume-role-policy-document file://operator_image_registry_installer_cloud_credentials_policy.json \ + --tags Key=rosa_cluster_id,Value= Key=rosa_openshift_version,Value=4.8 Key=rosa_role_prefix,Value= Key=operator_namespace,Value=openshift-image-registry Key=operator_name,Value=installer-cloud-credentials + +aws iam attach-role-policy \ + --role-name -xxxx-openshift-image-registry-installer-cloud-creden \ + --policy-arn arn:aws:iam:::policy/ManagedOpenShift-openshift-image-registry-installer-cloud-creden + +aws iam create-role \ + --role-name -xxxx-openshift-ingress-operator-cloud-credentials \ + --assume-role-policy-document file://operator_ingress_operator_cloud_credentials_policy.json \ + --tags Key=rosa_cluster_id,Value= Key=rosa_openshift_version,Value=4.8 Key=rosa_role_prefix,Value= Key=operator_namespace,Value=openshift-ingress-operator Key=operator_name,Value=cloud-credentials + +aws iam attach-role-policy \ + --role-name -xxxx-openshift-ingress-operator-cloud-credentials \ + --policy-arn arn:aws:iam:::policy/ManagedOpenShift-openshift-ingress-operator-cloud-credentials +---- + +[NOTE] +==== +The command examples provided in the table include Operator roles that use the `ManagedOpenShift` prefix. If you defined a custom prefix when you created the account-wide roles and policies, including the Operator policies, you must reference it by using the `--prefix ` option when you create the Operator roles. +==== diff --git a/modules/rosa-sts-operator-roles.adoc b/modules/rosa-sts-operator-roles.adoc new file mode 100644 index 0000000000..f4cae00ac4 --- /dev/null +++ b/modules/rosa-sts-operator-roles.adoc @@ -0,0 +1,38 @@ +// Module included in the following assemblies: +// +// * rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-about-iam-resources.adoc + +[id="rosa-sts-operator-roles_{context}"] += Cluster-specific Operator IAM role reference + +This section provides details about the Operator IAM roles that are required for ROSA deployments that use STS. + +When you create the Operator roles by using the `rosa` CLI, the account-wide Operator policies for the matching cluster version are attached to the roles. The Operator policies are tagged with the Operator and version they are compatible with. The correct policy for an Operator role is determined by using the tags. + +[NOTE] +==== +If more than one matching policy is available in your account for an Operator role, an interactive list of options is provided when you create the Operator. +==== + +.ROSA cluster-specific Operator roles +[cols="1,2",options="header"] +|=== + +|Resource|Description + +|`ManagedOpenShift-openshift-cluster-csi-drivers-ebs-cloud-credentials` +|An IAM role required by ROSA to manage back-end storage through the Container Storage Interface (CSI). + +|`ManagedOpenShift-openshift-machine-api-aws-cloud-credentials` +|An IAM role required by the ROSA Machine Config Operator to perform core cluster functionality. + +|`ManagedOpenShift-openshift-cloud-credential-operator-cloud-credentials` +|An IAM role required by the ROSA Cloud Credential Operator to cloud provider credentials. + +|`ManagedOpenShift-openshift-image-registry-installer-cloud-credentials` +|An IAM role required by the ROSA Image Registry Operator to manage the internal registry storage in AWS S3 for a cluster. + +|`ManagedOpenShift-openshift-ingress-operator-cloud-credentials` +|An IAM role required by the ROSA Ingress Operator to manage external access to a cluster. + +|=== diff --git a/modules/rosa-sts-setting-up-environment.adoc b/modules/rosa-sts-setting-up-environment.adoc new file mode 100644 index 0000000000..6a9f834778 --- /dev/null +++ b/modules/rosa-sts-setting-up-environment.adoc @@ -0,0 +1,206 @@ +[id="rosa-sts-setting-up-environment_{context}"] + += Setting up the environment for STS + +Complete the following steps to set up your environment before creating your cluster using AWS security token Service (STS). + +.Prerequisites + +* Review and complete the deployment prerequisites and policies. +* Create a link:https://cloud.redhat.com[Red Hat account], if you do not already have one. Then, check your email for a verification link. You will need these credentials to install ROSA. + +.Procedure + +. Log in to the Amazon Web Services (AWS) account that you want to use. ++ +It is recommended to use a dedicated AWS account to run production clusters. If you are using AWS Organizations, you can use an AWS account within your organization or link:https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html#orgs_manage_accounts_create-new[create a new one]. ++ +If you are using AWS Organizations and you need to have a Service Control Policy (SCP) applied to the AWS account you plan to use, these policies must not be more restrictive than the roles and policies required by the cluster. ++ +. Enable the ROSA service in the AWS Console. +.. Sign in to your link:https://console.aws.amazon.com/rosa/home[AWS account]. +.. To enable ROSA, go to the link:https://console.aws.amazon.com/rosa/[ROSA service] and select *Enable OpenShift*. + +. Install and configure the AWS CLI. +.. Follow the AWS command-line interface documentation to link:https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html[install] and link:https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html[configure] the AWS CLI for your operating system. ++ +Specify the correct `aws_access_key_id` and `aws_secret_access_key` in the `.aws/credentials` file. See link:https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html[AWS Configuration basics] in the AWS documentation. + +.. Set a default AWS region. ++ +[NOTE] +==== +You can use the environment variable to set the default AWS region. +==== ++ +The ROSA service evaluates regions in the following priority order: ++ +... The region specified when running a `rosa` command with the `--region` flag. +... The region set in the `AWS_DEFAULT_REGION` environment variable. See link:https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html[Environment variables to configure the AWS CLI] in the AWS documentation. +... The default region set in your AWS configuration file. See link:https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html#cli-configure-quickstart-config[Quick configuration with aws configure] in the AWS documentation. +.. Optional: Configure your AWS CLI settings and credentials by using an AWS named profile. `rosa` evaluates AWS named profiles in the following priority order: +... The profile specified when running a `rosa` command with the `--profile` flag. +... The profile set in the `AWS_PROFILE` environment variable. See link:https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html[Named profiles] in the AWS documentation. +.. Verify the AWS CLI is installed and configured correctly by running the following command to query the AWS API: ++ +[source,terminal] +---- +$ aws sts get-caller-identity +---- ++ +. Install `rosa`, the {product-title} command-line interface (CLI) version 1.0.8 or greater. +.. Download the link:https://access.redhat.com/products/red-hat-openshift-service-aws/[latest release] of the `rosa` CLI for your operating system. +.. Optional: Rename the file you downloaded to `rosa` and make the file executable. This documentation uses `rosa` to refer to the executable file. ++ +[source,terminal] +---- +$ chmod +x rosa +---- +.. Optional: Add `rosa` to your path. ++ +[source,terminal] +---- +$ mv rosa /usr/local/bin/rosa +---- +.. Enter the following command to verify your installation: ++ +[source,terminal] +---- +$ rosa +---- ++ +.Example output +[source,terminal] +---- +Command line tool for ROSA. + +Usage: + rosa [command] + +Available Commands: + completion Generates bash completion scripts + create Create a resource from stdin + delete Delete a specific resource + describe Show details of a specific resource + edit Edit a specific resource + help Help about any command + init Applies templates to support Managed OpenShift on AWS clusters + list List all resources of a specific type + login Log in to your Red Hat account + logout Log out + logs Show logs of a specific resource + verify Verify resources are configured correctly for cluster install + version Prints the version of the tool + +Flags: + --debug Enable debug mode. + -h, --help help for rosa + -v, --v Level log level for V logs + +Use "rosa [command] --help" for more information about a command. +---- ++ +.. Optional: You can run the `rosa completion` command to generate a bash completion file. ++ +[source,terminal] +---- +$ rosa completion > /etc/bash_completion.d/rosa +---- ++ +Add this file to the correct location for your operating system. For example, on a Linux machine, run the following command to enable `rosa` bash completion: ++ +[source,terminal] +---- +$ source /etc/bash_completion.d/rosa +---- + +. Log in to your Red Hat account with the `rosa` CLI. ++ +.. Enter the following command. ++ +[source,terminal] +---- +$ rosa login +---- ++ +.. Replace `` with your token. ++ +.Example output +[source,terminal] +---- +To login to your Red Hat account, get an offline access token at https://cloud.redhat.com/openshift/token/rosa +? Copy the token and paste it here: +---- ++ +.Example output continued +[source,terminal] +---- +I: Logged in as '' on 'https://api.openshift.com' +---- + +. Verify that your AWS account has the necessary quota to deploy an {product-title} cluster. ++ +[source,terminal] +---- +$ rosa verify quota [--region=] +---- ++ +.Example output +[source,terminal] +---- +I: Validating AWS quota... +I: AWS quota ok +---- ++ +[NOTE] +==== +Sometimes your AWS quota varies by region. If you receive any errors, try a different region. +==== ++ +If you need to increase your quota, go to your link:https://aws.amazon.com/console/[AWS console], and request a quota increase for the service that failed. ++ +After both the permissions and quota checks pass, proceed to the next step. ++ +. Prepare your AWS account for cluster deployment: ++ +.. Run the following command to verify your Red Hat and AWS credentials are setup correctly. Check that your AWS Account ID, Default Region and ARN match what you expect. You can safely ignore the rows beginning with OCM for now (OCM stands for OpenShift Cluster Manager). ++ +[source,terminal] +---- +$ rosa whoami +---- ++ +.Example output +[source,terminal] +---- +AWS Account ID: 000000000000 +AWS Default Region: us-east-1 +AWS ARN: arn:aws:iam::000000000000:user/hello +OCM API: https://api.openshift.com +OCM Account ID: 1DzGIdIhqEWyt8UUXQhSoWaaaaa +OCM Account Name: Your Name +OCM Account Username: you@domain.com +OCM Account Email: you@domain.com +OCM Organization ID: 1HopHfA2hcmhup5gCr2uH5aaaaa +OCM Organization Name: Red Hat +OCM Organization External ID: 0000000 +---- + +. Install the OpenShift CLI (`oc`), version 4.7.9 or greater, from the ROSA (`rosa`) CLI. +.. Enter this command to download the latest version of the `oc` CLI: ++ +[source,terminal] +---- +$ rosa download openshift-client +---- + +.. After downloading the `oc` CLI, unzip it and add it to your path. +.. Enter this command to verify that the `oc` CLI is installed correctly: ++ +[source,terminal] +---- +$ rosa verify openshift-client +---- + +.Create roles +After completing these steps, you are ready to set up IAM and OIDC access-based roles. diff --git a/modules/rosa-sts-support-considerations.adoc b/modules/rosa-sts-support-considerations.adoc new file mode 100644 index 0000000000..b7fd8326db --- /dev/null +++ b/modules/rosa-sts-support-considerations.adoc @@ -0,0 +1,15 @@ +// Module included in the following assemblies: +// +// * rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-creating-a-cluster-with-customizations.adoc + +[id="rosa-sts-support-considerations_{context}"] += Support considerations for ROSA clusters with STS + +The supported way of creating a {product-title} cluster that uses the AWS Security Token Service (STS) is by using the steps described in this product documentation. + +You can use `manual` mode with the {product-title} CLI (`rosa`) to generate the AWS Identity and Access Management (IAM) policy files and `aws` commands that are required to install the STS resources. + +[IMPORTANT] +==== +The files and `aws` commands are generated for review purposes only and must not be modified in any way. Red Hat cannot provide support for ROSA clusters that have been deployed by using modified versions of the policy files or `aws` commands. +==== diff --git a/modules/rosa-troubleshooting-deployment.adoc b/modules/rosa-troubleshooting-deployment.adoc new file mode 100644 index 0000000000..7784d2aa9e --- /dev/null +++ b/modules/rosa-troubleshooting-deployment.adoc @@ -0,0 +1,63 @@ +[id="rosa-troubleshooting-deployment_{context}"] += Troubleshooting cluster deployments +This document describes how to troubleshoot cluster deployment errors. + +[id="rosa-troubleshooting-general-deployment-failure_{context}"] +== General deployment failure + +If a cluster deployment fails, the cluster is put into an "error" state. + +Run the following command to get more information: + +[source,terminal] +---- +$ rosa describe cluster -c --debug +---- + +[id="rosa-troubleshooting-deployment-failure-osdccsadmin_{context}"] +== Deployment failure with an `osdCcsAdmin` error + +If a cluster creation action fails, you can receive the following error message. + +.Example output +[source,terminal] +---- +Failed to create cluster: Unable to create cluster spec: Failed to get access keys for user 'osdCcsAdmin': NoSuchEntity: The user with name osdCcsAdmin cannot be found. +---- + +To fix this issue: + +. Delete the stack: ++ +[source,terminal] +---- +$ rosa init --delete-stack +---- + +. Reinitialize your account: ++ +[source,terminal] +---- +$ rosa init +---- + +== Elastic load balancer (ELB) `AccessDenied` error + +If you have not created a load balancer in your AWS account, it is possible that the service role for the elastic load balancer (ELB) might not exist yet. You may receive the following error: + +[source,terminal] +---- +Error: Error creating network Load Balancer: AccessDenied: User: arn:aws:sts::xxxxxxxxxxxx:assumed-role/ManagedOpenShift-Installer-Role/xxxxxxxxxxxxxxxxxxx is not authorized to perform: iam:CreateServiceLinkedRole on resource: arn:aws:iam::xxxxxxxxxxxx:role/aws-service-role/elasticloadbalancing.amazonaws.com/AWSServiceRoleForElasticLoadBalancing" +---- + +To resolve this issue, ensure that the role exists on your AWS account. If not, create this role with the following command: + +[source,terminal] +---- +aws iam get-role --role-name "AWSServiceRoleForElasticLoadBalancing" || aws iam create-service-linked-role --aws-service-name "elasticloadbalancing.amazonaws.com" +---- + +[NOTE] +==== +This command only needs to be executed once per account. +==== diff --git a/modules/rosa-troubleshooting-expired-token.adoc b/modules/rosa-troubleshooting-expired-token.adoc new file mode 100644 index 0000000000..f3f76e57a5 --- /dev/null +++ b/modules/rosa-troubleshooting-expired-token.adoc @@ -0,0 +1,22 @@ + +// Module included in the following assemblies: +// +// cli_reference/rosa_cli/rosa-troubleshooting-expired-tokens.adoc + + +[id="rosa-troubleshooting-expired-offline-access-tokens_{context}"] += Troubleshooting expired offline access tokens + +If you use the `rosa` CLI and your api.openshift.com offline access token expires, an error message appears. This happens when sso.redhat.com invalidates the token. + +.Example output +[source,terminal] +---- +Can't get tokens .... +Can't get access tokens .... +---- + +.Procedure +* Generate a new offline access token at the following URL. A new offline access token is generated every time you visit the URL. + +** {product-title} (ROSA): https://console.redhat.com/openshift/token/rosa diff --git a/modules/rosa-troubleshooting-installing.adoc b/modules/rosa-troubleshooting-installing.adoc new file mode 100644 index 0000000000..150525efc0 --- /dev/null +++ b/modules/rosa-troubleshooting-installing.adoc @@ -0,0 +1,70 @@ +[id="rosa-troubleshooting-installing_{context}"] += Installation troubleshooting + +[id="rosa-troubleshooting-install-uninstall-logs_{context}"] +== Inspect install or uninstall logs + +To display install logs: + +* Run the following command, replacing `` with the name of your cluster: ++ +[source,terminal] +---- +$ rosa logs install --cluster= +---- ++ +* To watch the logs, include the `--watch` flag: ++ +[source,terminal] +---- +$ rosa logs install --cluster= --watch +---- + +To display uninstall logs: + +* Run the following command, replacing `` with the name of your cluster: ++ +[source,terminal] +---- +$ rosa logs uninstall --cluster= +---- ++ +* To watch the logs, include the `--watch` flag: ++ +[source,terminal] +---- +$ rosa logs uninstall --cluster= --watch +---- + +[id="rosa-faq-verify-SCP_{context}"] +== Verify your Amazon Web Services (AWS) account permissions + +Run the following command to verify your AWS account has the correct permissions: + +[source,terminal] +---- +$ rosa verify permissions +---- + +If you receive any errors, double check to ensure than an link:https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_type-auth.html#orgs_manage_policies_scp[SCP] is not applied to your AWS account. If you are required to use an SCP, see link:https://www.openshift.com/dedicated/ccs#scp[Red Hat Requirements for Customer Cloud Subscriptions] for details on the minimum required SCP. + +[id="rosa-faq-verify-aws-quota_{context}"] +== Verify your AWS account and quota + +Run the following command to verify you have the available quota on your AWS account: + +[source,terminal] +---- +$ rosa verify quota +---- + +AWS quotas change based on region. Be sure you are verifying your quota for the correct AWS region. If you need to increase your quota, navigate to your link:https://aws.amazon.com/console/[AWS console], and request a quota increase for the service that failed. + +[id="rosa-faq-aws-notification-emails_{context}"] +== AWS notification emails + +When creating a cluster, the {product-title} service creates small instances in all supported regions. This check ensures the AWS account being used can deploy to each supported region. + +For AWS accounts that are not using all supported regions, AWS may send one or more emails confirming that "Your Request For Accessing AWS Resources Has Been Validated". Typically the sender of this email is aws-verification@amazon.com. + +This is expected behavior as the {product-title} service is validating your AWS account configuration. diff --git a/modules/rosa-understanding.adoc b/modules/rosa-understanding.adoc new file mode 100644 index 0000000000..8e68ef40f4 --- /dev/null +++ b/modules/rosa-understanding.adoc @@ -0,0 +1,31 @@ + +// Module included in the following assemblies: +// +// understanding-rosa/rosa-understanding.adoc + + +[id="rosa-about_{context}"] += About {product-title} + +{product-title} (ROSA) is a fully-managed OpenShift service, jointly managed and supported by both Red Hat and Amazon Web Services (AWS). Having your clusters maintained by this service gives you the freedom to focus on deploying applications. + +This service is licensed directly from your AWS account. ROSA pricing is consumption based and is billed directly to your AWS account. + +You can quickly deploy OpenShift clusters directly from the AWS console and integrate with other AWS services. + +[id="rosa-cluster-consoles_{context}"] +== Cluster creation consoles + +To create a new cluster, start from the AWS Management console using ROSA. This integrates with the new `rosa` CLI and API to provision clusters in your AWS account. The new API for cluster creation alleviates the burden of manually deploying the cluster in your existing VPC and account. + +[id="rosa-consumption-experience_{context}"] +== Consumption experience + +After the clusters are created, you can operate your clusters with the OpenShift web console or with the OpenShift Cluster Manager. The ROSA service also uses OpenShift APIs. These tools provide a standardized OpenShift experience to leverage your existing skills and tools knowledge. + +You receive OpenShift updates with new feature releases and a shared, common source for alignment with OpenShift Container Platform. ROSA supports the same versions of OpenShift as Red Hat OpenShift Dedicated and OpenShift Container Platform to achieve version consistency. + +[id="rosa-integration-aws_{context}"] +== Integration with AWS services + +ROSA can integrate with a range of AWS compute, database, analytics, machine learning, networking, mobile, and various application services, enabling customers to benefit from the more than 170 AWS services that scale on-demand across the globe. These AWS-native services are directly accessible to quickly deploy and scale services through the same management interface. diff --git a/modules/rosa-upgrade-cluster-cli.adoc b/modules/rosa-upgrade-cluster-cli.adoc new file mode 100644 index 0000000000..dc29d0a2f7 --- /dev/null +++ b/modules/rosa-upgrade-cluster-cli.adoc @@ -0,0 +1,107 @@ + +// Module included in the following assemblies: +// +// * cli_reference/rosa_cli/rosa-manage-objects-cli.adoc + +[id="rosa-upgrading-cluster_{context}"] += Upgrade and delete upgrade for clusters + + +This section describes the `upgrade` command usage for clusters. + +[id="rosa-upgrade-cluster_{context}"] +== upgrade cluster + +Schedule a cluster upgrade. + +.Syntax +[source,terminal] +---- +$ rosa upgrade cluster --cluster= | [arguments] +---- + +.Arguments +[cols="30,70"] +|=== +|Option |Definition + +|--cluster +|Required: The name or ID (string) of the cluster that the upgrade will be scheduled for. + +|--interactive +|Enables interactive mode. + +|--version +|The version (string) of OpenShift Container Platform that the cluster will be upgraded to. + +|--schedule-date +|The next date (string) when the upgrade will run at the specified time. Format: `yyyy-mm-dd` + +|--schedule-time +|The next time the upgrade will run on the specified date. Format: `HH:mm` + +|--node-drain-grace-period +|Sets a grace period (string) for how long the pod disruption budget-protected workloads are respected during upgrades. After this grace period, any workloads protected by pod disruption budgets that have not been successfully drained from a node will be forcibly evicted. Default: `1 hour` +|=== + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. +|=== + +.Examples +Interactively schedule an upgrade on a cluster named `mycluster`: + +[source,terminal] +---- +$ rosa upgrade cluster --cluster=mycluster --interactive +---- + +Schedule a cluster upgrade within the hour on a cluster named `mycluster`: + +[source,terminal] +---- +$ rosa upgrade cluster --cluster=mycluster --version 4.5.20 +---- + +[id="rosa-delete-upgrade-cluster_{context}"] +== delete upgrade + +Cancel a scheduled cluster upgrade: + +.Syntax +[source,terminal] +---- +$ rosa delete upgrade --cluster= | +---- + +.Arguments +[cols="30,70"] +|=== +|Option |Definition + +|--cluster +|Required: The name or ID (string) of the cluster that the upgrade will be cancelled for. +|=== + +.Optional arguments inherited from parent commands +[cols="30,70"] +|=== +|Option |Definition + +|--help +|Shows help for this command. + +|--debug +|Enables debug mode. + +|--v level +|Log level for V logs. + +|--yes +|Automatically answers `yes` to confirm the operation. +|=== diff --git a/modules/rosa-upgrading-automatic-ocm.adoc b/modules/rosa-upgrading-automatic-ocm.adoc new file mode 100644 index 0000000000..ff7b098b47 --- /dev/null +++ b/modules/rosa-upgrading-automatic-ocm.adoc @@ -0,0 +1,20 @@ +// Module included in the following assemblies: +// +// * rosa_upgrading/rosa-upgrading.adoc +// * rosa_upgrading/rosa-upgrading-sts.adoc + +[id="rosa-scheduling-upgrade_{context}"] += Scheduling automatic upgrades + +You can schedule automatic upgrades for a {product-title} cluster through the OpenShift Cluster Manager (OCM) console. + +.Procedure + +. Log in to the {cloud-redhat-com}. +. Select a cluster to upgrade. +. Click the *Settings* tab. +. In the *Update strategy* pane, click *Automatic* and select a preferred day of the week and start time for the automatic upgrades. +. In the *Node draining* pane, select a grace period interval from the list. The grace period enables the nodes to gracefully drain before forcing the pod eviction. The default is *1 hour*. +. In the *Update strategy* pane, click *Save* to apply your update strategy. ++ +When upgrades are available, they are automatically applied to the cluster on the preferred day of the week and start time. diff --git a/modules/rosa-upgrading-cli-tutorial.adoc b/modules/rosa-upgrading-cli-tutorial.adoc new file mode 100644 index 0000000000..fc28080d3b --- /dev/null +++ b/modules/rosa-upgrading-cli-tutorial.adoc @@ -0,0 +1,60 @@ +// Module included in the following assemblies: +// +// * rosa_upgrading/rosa-upgrading.adoc +// * rosa_upgrading/rosa-upgrading-sts.adoc + +ifeval::["{context}" == "rosa-upgrading-sts"] +:sts: +endif::[] + +[id="rosa-upgrading-cli_{context}"] += Upgrading manually with the rosa CLI + +You can upgrade a {product-title} cluster +ifdef::sts[] +that uses the AWS Security Token Service (STS) +endif::sts[] +manually by using the `rosa` CLI. + +This method schedules the cluster for an immediate upgrade, if a more recent version is available. + +.Prerequisites + +* You have installed and configured the latest ROSA CLI on your installation host. +ifdef::sts[] +* If you are upgrading your cluster from 4.7 to 4.8, you have upgraded the AWS Identity and Access Management (IAM) account-wide roles and policies to version 4.8. You have also updated the `cloudcredential.openshift.io/upgradeable-to` annotation in the `CloudCredential` custom resource. For more information, see _Preparing an upgrade from 4.7 to 4.8_. +endif::sts[] + +.Procedure + +. To verify the current version of your cluster, enter the following command: ++ +[source,terminal] +---- +$ rosa describe cluster --cluster= <1> +---- +<1> Replace `` with the cluster name or the ID of the cluster. + +. To verify that an upgrade is available, enter the following command: ++ +[source,terminal] +---- +$ rosa list upgrade --cluster= +---- ++ +The command returns a list of versions to which the the cluster can be upgraded, including a recommended version. + +. To upgrade a cluster to the latest available version, enter the following command: ++ +[source,terminal] +---- +$ rosa upgrade cluster --cluster= +---- ++ +The cluster is scheduled for an immediate upgrade. This action can take an hour or longer, depending on your workload configuration, such as pod disruption budgets. ++ +You will receive an email when the upgrade is complete. You can also check the status by running `rosa describe cluster` again from the `rosa` CLI or view the status in the OpenShift Cluster Manager (OCM) console. + +ifeval::["{context}" == "rosa-upgrading-sts"] +:!sts: +endif::[] diff --git a/modules/rosa-upgrading-manual-ocm.adoc b/modules/rosa-upgrading-manual-ocm.adoc new file mode 100644 index 0000000000..0cf86170e7 --- /dev/null +++ b/modules/rosa-upgrading-manual-ocm.adoc @@ -0,0 +1,54 @@ +// Module included in the following assemblies: +// +// * rosa_upgrading/rosa-upgrading.adoc +// * rosa_upgrading/rosa-upgrading-sts.adoc + +ifeval::["{context}" == "rosa-upgrading-sts"] +:sts: +endif::[] + +[id="rosa-upgrade-ocm_{context}"] += Upgrading manually using the console + +You can upgrade a {product-title} cluster +ifdef::sts[] +that uses the AWS Security Token Service (STS) +endif::sts[] +manually by using the OpenShift Cluster Manager (OCM) console. + +ifdef::sts[] +.Prerequisites + +* If you are upgrading your cluster from 4.7 to 4.8, you have upgraded the AWS Identity and Access Management (IAM) account-wide roles and policies to version 4.8. You have also updated the `cloudcredential.openshift.io/upgradeable-to` annotation in the `CloudCredential` custom resource. For more information, see _Preparing an upgrade from 4.7 to 4.8_. +endif::sts[] + +.Procedure + +. Log in to the {cloud-redhat-com}. +. Select a cluster to upgrade. +. Click the *Settings* tab. +. In the *Update strategy* pane, click *Manual*. +. In the *Node draining* pane, select a grace period interval from the list. The grace period enables the nodes to gracefully drain before forcing the pod eviction. The default is *1 hour*. +. In the *Update strategy* pane, click *Save* to apply your update strategy. +. In the *Update status* pane, review the *Update available* information and click *Update*. ++ +[NOTE] +==== +The *Update* button is enabled only when an upgrade is available. +==== ++ +. In the *Select version* dialog, choose a target upgrade version and click *Next*. +. In the *Schedule update* dialog, schedule your cluster upgrade. ++ +* To upgrade within an hour, select *Update now* and click *Next*. +* To upgrade at a later time, select *Schedule a different time* and set a time and date for your upgrade. Click *Next* to proceed to the confirmation dialog. ++ +. After reviewing the version and schedule summary, select *Confirm update*. ++ +The cluster is scheduled for an upgrade to the target version. This action can take an hour or longer, depending on the selected upgrade schedule and your workload configuration, such as pod disruption budgets. ++ +The status is displayed in the *Update status* pane. + +ifeval::["{context}" == "rosa-upgrading-sts"] +:!sts: +endif::[] diff --git a/modules/rosa-upgrading-preparing-4-7-to-4-8.adoc b/modules/rosa-upgrading-preparing-4-7-to-4-8.adoc new file mode 100644 index 0000000000..7c03f31d93 --- /dev/null +++ b/modules/rosa-upgrading-preparing-4-7-to-4-8.adoc @@ -0,0 +1,42 @@ +// Module included in the following assemblies: +// +// * rosa_upgrading/rosa-upgrading-sts.adoc + +[id="rosa-upgrading-4-7-to-4-8-preparing_{context}"] += Preparing an upgrade from 4.7 to 4.8 + +You must meet the following requirements before upgrading a {product-title} (ROSA) cluster that uses the AWS Security Token Service (STS) from version 4.7 to 4.8: + +* Update the AWS Identity and Access Management (IAM) account-wide roles and policies, including the Operator policies to version 4.8. +* After updating the roles and policies, you must update the value of the `cloudcredential.openshift.io/upgradeable-to` annotation in the `CloudCredential` custom resource to `v4.8`. This indicates that the cluster is ready to upgrade. + +.Prerequisites + +* You have installed the latest AWS CLI on your installation host. +* You have installed version 1.1.3 or later of the ROSA CLI on your installation host. +* You have installed version 4.8 or later of the OpenShift CLI (`oc`) on your installation host. +* You have the permissions required to update the AWS account-wide roles and policies. +* You have access to the cluster as a user with the `cluster-admin` role. + +.Procedure + +. Update the account-wide roles and policies, including the Operator policies, to version 4.8: ++ +[source,terminal] +---- +$ rosa create account-roles --mode auto +---- ++ +[IMPORTANT] +==== +If you created the roles and policies for version 4.7 with a custom prefix, you must include the `--prefix` option and specify the same prefix name. Specifying the prefix name ensures that the existing roles and policies used by the cluster are updated. +==== + +. As a cluster administrator, update the value of the `cloudcredential.openshift.io/upgradeable-to` annotation in the `CloudCredential` custom resource to `v4.8`: ++ +[source,terminal] +---- +$ oc annotate cloudcredential cluster cloudcredential.openshift.io/upgradeable-to="v4.8" +---- + +You can now proceed to upgrade the cluster. diff --git a/modules/rosa-using-bash-script.adoc b/modules/rosa-using-bash-script.adoc new file mode 100644 index 0000000000..b62c1d8fcc --- /dev/null +++ b/modules/rosa-using-bash-script.adoc @@ -0,0 +1,46 @@ + +// Module included in the following assemblies: +// +// * cli_reference/rosa_cli/rosa-get-started-cli.adoc + +[id="rosa-using-bash-script_{context}"] += Using a Bash script + + +This is an example workflow of how to use a Bash script with the `rosa` CLI. + +.Prerequisites +Make sure that AWS credentials are available as one of the following options: + +* AWS profile +* Environment variables (`AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`) + +.Procedure + +. Initialize `rosa` using an OpenShift Cluster Manager (OCM) offline token link:https://cloud.redhat.com/openshift/token/rosa[from Red Hat]: ++ +[source,terminal] +---- +$ rosa init --token= +---- + +. Create the {product-title} (ROSA) cluster: ++ +[source,terminal] +---- +$ rosa create cluster --cluster-name= +---- + +. Add an identity provider (IDP): ++ +[source,terminal] +---- +$ rosa create idp --cluster= --type= [arguments] +---- + +. Add a `dedicated-admin` user: ++ +[source,terminal] +---- +$ rosa grant user dedicated-admin --user= --cluster= +---- diff --git a/modules/rosa-using-sts.adoc b/modules/rosa-using-sts.adoc new file mode 100644 index 0000000000..0be40a22cb --- /dev/null +++ b/modules/rosa-using-sts.adoc @@ -0,0 +1,10 @@ +// Module included in the following assemblies: +// +// * rosa_architecture/rosa-understanding.adoc + +[id="rosa-using-sts_{context}"] += Using the AWS Security Token Service + +The Amazon Web Services (AWS) Security Token Service (STS) is a global web service that provides short-term credentials for IAM or federated users. You can use AWS STS with {product-title} (ROSA) to allocate temporary, limited-privilege credentials for component-specific IAM roles. The service enables cluster components to make AWS API calls using secure cloud resource management practices. + +You can use the `rosa` CLI to create the IAM role, policy and identity provider resources that are required for ROSA clusters that use STS. diff --git a/modules/rosa-view-cloudwatch-logs.adoc b/modules/rosa-view-cloudwatch-logs.adoc new file mode 100644 index 0000000000..8c38153027 --- /dev/null +++ b/modules/rosa-view-cloudwatch-logs.adoc @@ -0,0 +1,21 @@ + +// Module included in the following assemblies: +// +// logging/rosa-viewing-logs.adoc + +[id="rosa-view-cloudwatch-logs_{context}"] += Viewing forwarded logs + +Logs that are being forwarded from {product-title} are viewed in the Amazon Web Services (AWS) console. + +.Prerequisites + +* The `cluster-logging-operator` add-on service is installed and `Cloudwatch` is enabled. + +.Procedure + +. Log in to the AWS console. +. Select the region the cluster is deployed in. +. Select the *CloudWatch* service. +. Select *Logs* from the left column, and select *Log Groups*. +. Select a log group to explore. You can view application, infrastructure, or audit logs, depending on which types were enabled during the add-on service installation. See the link:https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/WhatIsCloudWatch.html[Amazon CloudWatch User Guide] for more information. diff --git a/modules/scaling-cluster.adoc b/modules/scaling-cluster.adoc new file mode 100644 index 0000000000..55ae7aa533 --- /dev/null +++ b/modules/scaling-cluster.adoc @@ -0,0 +1,29 @@ + +// Module included in the following assemblies: +// +// * assemblies/osd-quickstart.adoc + +[id="scaling-cluster_{context}"] += Scaling your cluster + +// TODO: This writes out OCM, but there is an {OCM} attribute. Should that always be used instead? +You can scale your {product-title} cluster from the OpenShift Cluster Manager (OCM). + +.Procedure + +. From link:https://cloud.redhat.com/openshift[OCM], click on the cluster you want to resize. + +. Click *Actions* -> *Edit load balancers and persistent storage* +.. Use the drop-down menu to select how many *Load balancers* you want to scale to. +.. Use the drop-down menu to select the amount of *Persistent storage* you want to scale to. +.. Click *Apply*. Scaling occurs automatically. + +. Click *Actions* -> *Edit node count*. +.. Use the drop down menu to select the *Machine Pool* setting you want to scale to. +.. Use the drop down menu to select the *Node Count* you want to scale to. +.. Click *Apply*. Scaling occurs automatically. + + +.Verification + +* In the *Overview* tab under the *Details* heading, the *Status* indicator shows that your cluster is *Ready* for use. diff --git a/modules/sdpolicy-account-management.adoc b/modules/sdpolicy-account-management.adoc new file mode 100644 index 0000000000..be04ad19fb --- /dev/null +++ b/modules/sdpolicy-account-management.adoc @@ -0,0 +1,207 @@ + +// Module included in the following assemblies: +// +// * assemblies/osd-service-definition.adoc + +[id="sdpolicy-account-management_{context}"] += Account management + +[id="billing_{context}"] +== Billing +Each {product-title} cluster requires a minimum annual base cluster purchase and there are two billing options available for each cluster: Standard and Customer Cloud Subscription (CCS). + +Standard {product-title} clusters are deployed in to their own cloud infrastructure accounts, each owned by Red Hat. Red Hat is responsible for this account, and cloud infrastructure costs are paid directly by Red Hat. The customer only pays the Red Hat subscription costs. + +In the CCS model, the customer pays the cloud infrastructure provider directly for cloud costs and the cloud infrastructure account is part of a customer’s Organization, with specific access granted to Red Hat. The customer will have restricted access to this account, but will be able to view billing and usage information. In this model, the customer pays Red Hat for the CCS subscription and pays the cloud provider for the cloud costs. It is the customer's responsibility to pre-purchase or provide Reserved Instance (RI) compute instances to ensure lower cloud infrastructure costs. + +Additional resources can be purchased for an OpenShift Dedicated Cluster, including: + +* Additional nodes (can be different types and sizes through the use of machine pools) +* Middleware (JBoss EAP, JBoss Fuse, and so on) - additional pricing based on specific middleware component +* Additional storage in increments of 500 GB (standard only; 100 GB included) +* Additional 12 TiB Network I/O (standard only; 12 TB included) +* Load Balancers for Services are available in bundles of 4; enables non-HTTP/SNI traffic or non-standard ports (standard only) + +[id="cluster-self-service_{context}"] +== Cluster self-service + +Customers can create, scale, and delete their clusters from link:https://cloud.redhat.com/openshift[OpenShift Cluster Manager (OCM)], provided that they have pre-purchased the necessary subscriptions. + +Actions available in {OCM} must not be directly performed from within the cluster as this might cause adverse affects, including having all actions automatically reverted. + +[id="cloud-providers_{context}"] +== Cloud providers + +{product-title} offers OpenShift Container Platform clusters as a managed service on the following cloud providers: + +* Amazon Web Services (AWS) +* Google Cloud Platform (GCP) + +[id="compute_{context}"] +== Compute + +Single availability zone clusters require a minimum of 2 worker nodes for Customer Cloud Subscription (CCS) clusters deployed to a single availability zone. A minimum of 4 worker nodes is required for standard clusters. These 4 worker nodes are included in the base subscription. + +Multiple availability zone clusters require a minimum of 3 worker nodes for Customer Cloud Subscription (CCS) clusters, 1 deployed to each of 3 availability zones. A minimum of 9 worker nodes are required for standard clusters. These 9 worker nodes are included in the base subscription, and additional nodes must be purchased in multiples of 3 to maintain proper node distribution. + +Worker nodes must all be the same type and size within a single {product-title} cluster. + +[NOTE] +==== +The default machine pool node type and size cannot be changed after the cluster has been created. +==== + +Control and infrastructure nodes are also provided by Red Hat. There are at least 3 control planenodes that handle etcd and API-related workloads. There are at least 2 infrastructure nodes that handle metrics, routing, the web console, and other workloads. Control and infrastructure nodes are strictly for Red Hat workloads to operate the service, and customer workloads are not permitted to be deployed on these nodes. + +[NOTE] +==== +Approximately 1 vCPU core and 1 GiB of memory are reserved on each worker node and removed from allocatable resources. This is necessary to run link:https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#system-reserved[processes required by the underlying platform]. This includes system daemons such as udev, kubelet, container runtime, and so on, and also accounts for kernel reservations. {OCP} core systems such as audit log aggregation, metrics collection, DNS, image registry, SDN, and so on might consume additional allocatable resources to maintain the stability and maintainability of the cluster. The additional resources consumed might vary based on usage. +==== + +[id="aws-compute-types_{context}"] +== AWS compute types + +{product-title} offers the following worker node types and sizes on AWS: + +.General purpose + +* M5.xlarge (4 vCPU, 16 GiB) +* M5.2xlarge (8 vCPU, 32 GiB) +* M5.4xlarge (16 vCPU, 64 GiB) +* M5.8xlarge (32 vCPU, 128 GiB) +* M5.12xlarge (48 vCPU, 192 GiB) +* M5.16xlarge (64 vCPU, 256 GiB) +* M5.24xlarge (96 vCPU, 384 GiB) + +.Memory-optimized + +* R5.xlarge (4 vCPU, 32 GiB) +* R5.2xlarge (8 vCPU, 64 GiB) +* R5.4xlarge (16 vCPU, 128 GiB) +* R5.8xlarge (32 vCPU, 256 GiB) +* R5.12xlarge (48 vCPU, 384 GiB) +* R5.16xlarge (64 vCPU, 512 GiB) +* R5.24xlarge (96 vCPU, 768 GiB) + +.Compute-optimized + +* C5.2xlarge (8 vCPU, 16 GiB) +* C5.4xlarge (16 vCPU, 32 GiB) +* C5.9xlarge (36 vCPU, 72 GiB) +* C5.12xlarge (48 vCPU, 96 GiB) +* C5.18xlarge (72 vCPU, 144 GiB) +* C5.24xlarge (96 vCPU, 192 GiB) + +[id="gcp-compute-types_{context}"] +== Google Cloud compute types + +{product-title} offers the following worker node types and sizes on Google Cloud that are chosen to have a common CPU and memory capacity that are the same as other cloud instance types: + +.General purpose + +* custom-4-16384 (4 vCPU, 16 GiB) +* custom-8-32768 (8 vCPU, 32 GiB) +* custom-16-65536 (16 vCPU, 64 GiB) +* custom-32-131072 (32 vCPU, 128 GiB) +* custom-48-196608 (48 vCPU, 192 GiB) +* custom-64-262144 (64 vCPU, 256 GiB) +* custom-96-393216 (96 vCPU, 384 GiB) + +.Memory-optimized + +* custom-4-32768-ext (4 vCPU, 32 GiB) +* custom-8-65536-ext (8 vCPU, 64 GiB) +* custom-16-131072-ext (16 vCPU, 128 GiB) +* custom-32-262144 (32 vCPU, 256 GiB) +* custom-48-393216 (48 vCPU, 384 GiB) +* custom-64-524288 (64 vCPU, 512 GiB) +* custom-96-786432 (96 vCPU, 768 GiB) + +.Compute-optimized + +* custom-8-16384 (8 vCPU, 16 GiB) +* custom-16-32768 (16 vCPU, 32 GiB) +* custom-36-73728 (36 vCPU, 72 GiB) +* custom-48-98304 (48 vCPU, 96 GiB) +* custom-72-147456 (72 vCPU, 144 GiB) +* custom-96-196608 (96 vCPU, 192 GiB) + + +[id="regions-availability-zones_{context}"] +== Regions and availability zones +The following AWS regions are supported by {OCP} 4 and are supported for {product-title}: + +* af-south-1 (Cape Town, AWS opt-in required) +* ap-east-1 (Hong Kong, AWS opt-in required) +* ap-northeast-1 (Tokyo) +* ap-northeast-2 (Seoul) +* ap-south-1 (Mumbai) +* ap-southeast-1 (Singapore) +* ap-southeast-2 (Sydney) +* ca-central-1 (Central Canada) +* eu-central-1 (Frankfurt) +* eu-north-1 (Stockholm) +* eu-south-1 (Milan, AWS opt-in required) +* eu-west-1 (Ireland) +* eu-west-2 (London) +* eu-west-3 (Paris) +* me-south-1 (Bahrain, AWS opt-in required) +* sa-east-1 (São Paulo) +* us-east-1 (N. Virginia) +* us-east-2 (Ohio) +* us-west-1 (N. California) +* us-west-2 (Oregon) + +The following Google Cloud regions are currently supported: + +* asia-east1, Changhua County, Taiwan +* asia-east2, Hong Kong +* asia-northeast1, Tokyo, Japan +* asia-northeast2, Osaka, Japan +* asia-northeast3, Seoul, Korea +* asia-south1, Mumbai, India +* asia-southeast1, Jurong West, Singapore +* asia-southeast2, Jakarta, Indonesia +* europe-north1, Hamina, Finland +* europe-west1, St. Ghislain, Belgium +* europe-west2, London, England, UK +* europe-west3, Frankfurt, Germany +* europe-west4, Eemshaven, Netherlands +* europe-west6, Zürich, Switzerland +* northamerica-northeast1, Montréal, Québec, Canada +* southamerica-east1, Osasco (São Paulo), Brazil +* us-central1, Council Bluffs, Iowa, USA +* us-east1, Moncks Corner, South Carolina, USA +* us-east4, Ashburn, Northern Virginia, USA +* us-west1, The Dalles, Oregon, USA +* us-west2, Los Angeles, California, USA +* us-west3, Salt Lake City, Utah, USA +* us-west4, Las Vegas, Nevada, USA + +Multi-AZ clusters can only be deployed in regions with at least 3 availability zones (see link:https://aws.amazon.com/about-aws/global-infrastructure/regions_az/[AWS] and link:https://cloud.google.com/compute/docs/regions-zones[Google Cloud]). + +Each new {product-title} cluster is installed within a dedicated Virtual Private Cloud (VPC) in a single Region, with the option to deploy into a single Availability Zone (Single-AZ) or across multiple Availability Zones (Multi-AZ). This provides cluster-level network and resource isolation, and enables cloud-provider VPC settings, such as VPN connections and VPC Peering. Persistent volumes are backed by cloud block storage and are specific to the availability zone in which they are provisioned. Persistent volumes do not bind to a volume until the associated pod resource is assigned into a specific availability zone in order to prevent unschedulable pods. Availability zone-specific resources are only usable by resources in the same availability zone. + +[WARNING] +==== +The region and the choice of single or multi availability zone cannot be changed once a cluster has been deployed. +==== + +[id="sla_{context}"] +== Service level agreement (SLA) +Any SLAs for the service itself are defined in Appendix 4 of the link:https://www.redhat.com/en/about/agreements[Red Hat Enterprise Agreement Appendix 4 (Online Subscription Services)]. + +[id="limited-support_{context}"] +== Limited support status + +You must not remove or replace any native {product-title} components or any other component installed and managed by Red Hat. If using cluster administration rights, Red Hat is not responsible for any actions taken by you or any of your authorized users, including actions that might affect infrastructure services, service availability, and data loss. + +If any actions that affect infrastructure services, service availability, or data loss are detected, Red Hat will notify the customer of such and request either that the action be reverted or to create a support case to work with Red Hat to remedy any issues. + +[id="support_{context}"] +== Support +{product-title} includes Red Hat Premium Support, which can be accessed by using the link:https://access.redhat.com/support?extIdCarryOver=true&sc_cid=701f2000001Css5AAC[Red Hat Customer Portal]. + +See the link:https://access.redhat.com/support/offerings/production/soc[Scope of Coverage Page] for link:https://access.redhat.com/support/offerings/production/scope_moredetail[more details] on what is covered with included support for {product-title}. + +See {product-title} link:https://access.redhat.com/support/offerings/openshift/sla?extIdCarryOver=true&sc_cid=701f2000001Css5AAC[SLAs] for support response times. diff --git a/modules/sdpolicy-logging.adoc b/modules/sdpolicy-logging.adoc new file mode 100644 index 0000000000..5c2e634229 --- /dev/null +++ b/modules/sdpolicy-logging.adoc @@ -0,0 +1,18 @@ + +// Module included in the following assemblies: +// +// * assemblies/osd-service-definition.adoc + +[id="sdpolicy-logging_{context}"] += Logging + +{product-title} provides optional integrated log forwarding to Amazon CloudWatch. + +[id="audit-logging_{context}"] +== Cluster audit logging +Cluster audit logs are available through Amazon CloudWatch, if the integration is enabled. If the integration is not enabled, you can request the audit logs by opening a support case. Audit log requests must specify a date and time range not to exceed 21 days. When requesting audit logs, customers should be aware that audit logs are many GB per day in size. + + +[id="application-logging_{context}"] +== Application logging +Application logs sent to `STDOUT` are collected by Fluentd and forwarded to Amazon CloudWatch through the cluster logging stack, if it is installed. diff --git a/modules/sdpolicy-monitoring.adoc b/modules/sdpolicy-monitoring.adoc new file mode 100644 index 0000000000..a0d3f5bbe0 --- /dev/null +++ b/modules/sdpolicy-monitoring.adoc @@ -0,0 +1,17 @@ + +// Module included in the following assemblies: +// +// * assemblies/osd-service-definition.adoc + +[id="sdpolicy-monitoring_{context}"] += Monitoring + +[id="cluster-metrics_{context}"] +== Cluster metrics + +{product-title} clusters come with an integrated Prometheus/Grafana stack for cluster monitoring including CPU, memory, and network-based metrics. This is accessible through the web console and can also be used to view cluster-level status and capacity/usage through a Grafana dashboard. These metrics also allow for horizontal pod autoscaling based on CPU or memory metrics provided by an {product-title} user. + +[id="cluster-status-notification_{context}"] +== Cluster status notification + +Red Hat communicates the health and status of {product-title} clusters through a combination of a cluster dashboard available in {OCM}, and email notifications sent to the email address of the contact that originally deployed the cluster. diff --git a/modules/sdpolicy-networking.adoc b/modules/sdpolicy-networking.adoc new file mode 100644 index 0000000000..ded17bdb17 --- /dev/null +++ b/modules/sdpolicy-networking.adoc @@ -0,0 +1,82 @@ + +// Module included in the following assemblies: +// +// * assemblies/osd-service-definition.adoc + +[id="sdpolicy-networking_{context}"] += Networking + +[id="custom-domains_{context}"] +== Custom domains for applications +To use a custom hostname for a route, you must update your DNS provider by creating a canonical name (CNAME) record. Your CNAME record should map the OpenShift canonical router hostname to your custom domain. The OpenShift canonical router hostname is shown on the *Route Details* page after a Route is created. Alternatively, a wildcard CNAME record can be created once to route all subdomains for a given hostname to the cluster's router. + +[id="custom-domains-cluster_{context}"] +== Custom domains for cluster services +Custom domains and subdomains are not available for the platform service routes, for example, the API or web console routes, or for the default application routes. + +[id="domain-validated-certificates_{context}"] +== Domain validated certificates +{product-title} includes TLS security certificates needed for both internal and external services on the cluster. For external routes, there are two, separate TLS wildcard certificates that are provided and installed on each cluster, one for the web console and route default hostnames and the second for the API endpoint. _Let’s Encrypt_ is the certificate authority used for certificates. Routes within the cluster, for example, the internal link:https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod[API endpoint], use TLS certificates signed by the cluster's built-in certificate authority and require the CA bundle available in every pod for trusting the TLS certificate. + +[id="custom-certificate-authorities_{context}"] +== Custom certificate authorities for builds +{product-title} supports the use of custom certificate authorities to be trusted by builds when pulling images from an image registry. + +[id="load-balancers_{context}"] +== Load balancers +{product-title} uses up to 5 different load balancers: + +* Internal control plane load balancer that is internal to the cluster and used to balance traffic for internal cluster communications. +* External control plane load balancer that is used for accessing the {OCP} and Kubernetes APIs. This load balancer can be disabled in {OCM}. If this load balancer is disabled, Red Hat reconfigures the API DNS to point to the internal control load balancer. +* External control plane load balancer for Red Hat that is reserved for cluster management by Red Hat. Access is strictly controlled, and communication is only possible from allowlisted bastion hosts. +* Default router/ingress load balancer that is the default application load balancer, denoted by `apps` in the URL. The default load balancer can be configured in {OCM} to be either publicly accessible over the internet, or only privately accessible over a pre-existing private connection. All application routes on the cluster are exposed on this default router load balancer, including cluster services such as the logging UI, metrics API, and registry. +* Optional: Secondary router/ingress load balancer that is a secondary application load balancer, denoted by `apps2` in the URL. The secondary load balancer can be configured in {OCM} to be either publicly accessible over the internet, or only privately accessible over a pre-existing private connection. If a 'Label match' is configured for this router load balancer, then only application routes matching this label will be exposed on this router load balancer, otherwise all application routes are also exposed on this router load balancer. +* Optional: Load balancers for services that can can be mapped to a service running on {product-title} to enable advanced ingress features, such as non-HTTP/SNI traffic or the use of non-standard ports. These can be purchased in groups of 4 for standard clusters, or they can be provisioned without charge in Customer Cloud Subscription (CCS) clusters; however, each AWS account has a quota that link:https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-limits.html[limits the number of Classic Load Balancers] that can be used within each cluster. + +[id="network-usage{context}"] +== Network usage +For standard {product-title} clusters, network usage is measured based on data transfer between inbound, VPC peering, VPN, and AZ traffic. On a standard {product-title} base cluster, 12 TB of network I/O is provided. Additional network I/O can be purchased in 12 TB increments. For CCS {product-title} clusters, network usage is not monitored, and is billed directly by the cloud provider. + +[id="cluster-ingress_{context}"] +== Cluster ingress +Project administrators can add route annotations for many different purposes, including ingress control through IP allowlisting. + +Ingress policies can also be changed by using `NetworkPolicy` objects, which leverage the `ovs-networkpolicy` plugin. This allows for full control over the ingress network policy down to the pod level, including between pods on the same cluster and even in the same namespace. + +All cluster ingress traffic goes through the defined load balancers. Direct access to all nodes is blocked by cloud configuration. + +[id="cluster-egress_{context}"] +== Cluster egress +Pod egress traffic control through `EgressNetworkPolicy` objects can be used to prevent or limit outbound traffic in {product-title}. + +Public outbound traffic from the control plane and infrastructure nodes is required and necessary to maintain cluster image security and cluster monitoring. This requires the `0.0.0.0/0` route to belong only to the internet gateway; it is not possible to route this range over private connections. + +{product-title} clusters use NAT Gateways to present a public, static IP for any public outbound traffic leaving the cluster. Each subnet a cluster is deployed into receives a distinct NAT Gateway. For clusters deployed on AWS with multiple availability zones, up to 3 unique static IP addresses can exist for cluster egress traffic. For clusters deployed on Google Cloud, regardless of availability zone topology, there will by 1 static IP address for worker node egress traffic. Any traffic that remains inside the cluster or does not go out to the public internet will not pass through the NAT Gateway and will have a source IP address belonging to the node that the traffic originated from. Node IP addresses are dynamic, and therefore a customer should not rely on allowlisting individual IP address when accessing private resources. + +Customers can determine their public static IP addresses by running a pod on the cluster and then querying an external service. For example: + +[source,terminal] +---- +$ oc run ip-lookup --image=busybox -i -t --restart=Never --rm -- /bin/sh -c "/bin/nslookup -type=a myip.opendns.com resolver1.opendns.com | grep -E 'Address: [0-9.]+'" +---- + +[id="cloud-network-configuration_{context}"] +== Cloud network configuration +{Product-title} allows for the configuration of a private network connection through several cloud provider managed technologies: + +* VPN connections +* AWS VPC peering +* AWS Transit Gateway +* AWS Direct Connect +* Google Cloud VPC Network peering +* Google Cloud Classic VPN +* Google Cloud HA VPN + +[IMPORTANT] +==== +Red Hat SREs do not monitor private network connections. Monitoring these connections is the responsibility of the customer. +==== + +[id="dns-forwarding_{context}"] +== DNS forwarding +For {product-title} clusters that have a private cloud network configuration, a customer can specify internal DNS servers available on that private connection that should be queried for explicitly provided domains. diff --git a/modules/sdpolicy-platform.adoc b/modules/sdpolicy-platform.adoc new file mode 100644 index 0000000000..68ca2774ca --- /dev/null +++ b/modules/sdpolicy-platform.adoc @@ -0,0 +1,95 @@ + +// Module included in the following assemblies: +// +// * assemblies/osd-service-definition.adoc + +[id="sdpolicy-platform_{context}"] += Platform + +[id="cluster-backup-policy_{context}"] +== Cluster backup policy + +[IMPORTANT] +==== +It is critical that customers have a backup plan for their applications and application data. +==== +Application and application data backups are not a part of the {product-title} service. +All Kubernetes objects in each {product-title} cluster are backed up to facilitate a prompt recovery in the unlikely event that a cluster becomes irreparably inoperable. + +The backups are stored in a secure object storage (Multi-AZ) bucket in the same account as the cluster. +Node root volumes are not backed up because Red Hat Enterprise Linux CoreOS is fully managed by the {OCP} cluster and no stateful data should be stored on the root volume of a node. + +The following table shows the frequency of backups: +[cols="4",options="header"] +|=== + +|Component +|Snapshot Frequency +|Retention +|Notes + +|Full object store backup +|Daily at 0100 UTC +|7 days +|This is a full backup of all Kubernetes objects. No persistent volumes (PVs) are backed up in this backup schedule. + +|Full object store backup +|Weekly on Mondays at 0200 UTC +|30 days +|This is a full backup of all Kubernetes objects. No PVs are backed up in this backup schedule. + +|Full object store backup +|Hourly at 17 minutes past the hour +|24 hours +|This is a full backup of all Kubernetes objects. No PVs are backed up in this backup schedule. + +|=== + +[id="autoscaling_{context}"] +== Autoscaling +Node autoscaling is not available on {product-title} at this time. + +[id="daemon-sets_{context}"] +== Daemon sets +Customers may create and run DaemonSets on {product-title}. In order to restrict DaemonSets to only running on worker nodes, use the following nodeSelector: + +[source,yaml] +---- +... +spec: + nodeSelector: + role: worker +... +---- + +[id="multi-availability-zones_{context}"] +== Multiple availability zone +In a multiple availability zone cluster, control nodes are distributed across availability zones and at least three worker nodes are required in each availability zone. + +[id="node-labels_{context}"] +== Node labels +Custom node labels are created by Red Hat during node creation and cannot be changed on {product-title} clusters at this time. + +[id="openshift-version_{context}"] +== OpenShift version +{product-title} is run as a service and is kept up to date with the latest {OCP} version. + +[id="upgrades_{context}"] +== Upgrades +Refer to link:https://access.redhat.com/support/policy/updates/openshift/dedicated[{product-title} Life Cycle] for more information on the upgrade policy and procedures. + +[id="windows-containers_{context}"] +== Windows containers +Windows containers are not available on {product-title} at this time. + +[id="container-engine_{context}"] +== Container engine +{product-title} runs on OpenShift 4 and uses link:https://www.redhat.com/en/blog/red-hat-openshift-container-platform-4-now-defaults-cri-o-underlying-container-engine[CRI-O ] as the only available container engine. + +[id="operating-system_{context}"] +== Operating system +{product-title} runs on OpenShift 4 and uses Red Hat Enterprise Linux CoreOS as the operating system for all control plane and worker nodes. + +[id="kubernetes-operator-support_{context}"] +== Kubernetes Operator support +All Operators listed in the OperatorHub marketplace should be available for installation. Operators installed from OperatorHub, including Red Hat Operators, are not SRE managed as part of the {product-title} service. Refer to the link:https://access.redhat.com/solutions/4807821[Red Hat Customer Portal] for more information on the supportability of a given Operator. diff --git a/modules/sdpolicy-security.adoc b/modules/sdpolicy-security.adoc new file mode 100644 index 0000000000..f0538fd9f5 --- /dev/null +++ b/modules/sdpolicy-security.adoc @@ -0,0 +1,60 @@ + +// Module included in the following assemblies: +// +// * assemblies/osd-service-definition.adoc + +[id="sdpolicy-security_{context}"] += Security + +[id="auth-provider_{context}"] +== Authentication provider +Authentication for the cluster is configured as part of the {OCM} cluster creation process. OpenShift is not an identity provider, and all access to the cluster must be managed by the customer as part of their integrated solution. Provisioning multiple identity providers provisioned at the same time is supported. The following identity providers are supported: + +* GitHub or GitHub Enterprise OAuth +* GitLab OAuth +* Google OAuth +* LDAP +* OpenID connect + +[id="privileged-containers_{context}"] +== Privileged containers +Privileged containers are not available by default on {product-title}. The `anyuid` and `nonroot` Security Context Constraints are available for members of the `dedicated-admins` group, and should address many use cases. Privileged containers are only available for `cluster-admin` users. + +[id="cluster-admin-user_{context}"] +== Customer administrator user +In addition to normal users, {product-title} provides access to an {product-title}-specific group called `dedicated-admin`. Any users on the cluster that are members of the `dedicated-admin` group: + +* Have administrator access to all customer-created projects on the cluster. +* Can manage resource quotas and limits on the cluster. +* Can add and manage `NetworkPolicy` objects. +* Are able to view information about specific nodes and PVs in the cluster, including scheduler information. +* Can access the reserved `dedicated-admin` project on the cluster, which allows for the creation of service accounts with elevated privileges and also gives the ability to update default limits and quotas for projects on the cluster. + +[id="cluster-admin-role_{context}"] +== Cluster administration role +As an administrator of {product-title} with Customer Cloud Subscriptions (CCS), you have access to the `cluster-admin` role. While logged in to an account with the `cluster-admin` role, users have mostly unrestricted access to control and configure the cluster. There are some configurations that are blocked with webhooks to prevent destablizing the cluster, or because they are managed in {OCM} and any in-cluster changes would be overwritten. + + +[id="project-self-service_{context}"] +== Project self-service +All users, by default, have the ability to create, update, and delete their projects. This can be restricted if a member of the `dedicated-admin` group removes the self-provisioner role from authenticated users: + +[source,terminal] +---- +$ oc adm policy remove-cluster-role-from-group self-provisioner system:authenticated:oauth +---- + +Restrictions can be reverted by applying: + +[source,terminal] +---- +$ oc adm policy add-cluster-role-to-group self-provisioner system:authenticated:oauth +---- + +[id="regulatory-compliance_{context}"] +== Regulatory compliance +See link:https://www.openshift.com/products/dedicated/process-and-security#compliance[OpenShift Dedicated Process and Security Overview] for the latest compliance information. + +[id="network-security_{context}"] +== Network security +With {product-title} on AWS, AWS provides a standard DDoS protection on all Load Balancers, called AWS Shield. This provides 95% protection against most commonly used level 3 and 4 attacks on all the public facing Load Balancers used for {product-title}. A 10-second timeout is added for HTTP requests coming to the haproxy router to receive a response or the connection is closed to provide additional protection. diff --git a/modules/sdpolicy-storage.adoc b/modules/sdpolicy-storage.adoc new file mode 100644 index 0000000000..c610c38934 --- /dev/null +++ b/modules/sdpolicy-storage.adoc @@ -0,0 +1,28 @@ + +// Module included in the following assemblies: +// +// * assemblies/osd-service-definition.adoc + +[id="sdpolicy-storage_{context}"] += Storage + +[id="encrypt-rest-node_{context}"] +== Encrypted-at-rest OS/node storage +Control plane nodes use encrypted-at-rest-EBS storage. + +[id="encrypt-rest-pv_{context}"] +== Encrypted-at-rest PV +EBS volumes used for persistent volumes (PVs) are encrypted-at-rest by default. + +[id="block-storage_{context}"] +== Block storage (RWO) +Persistent volumes (PVs) are backed by AWS EBS and Google Cloud persistent disk block storage, which uses the ReadWriteOnce (RWO) access mode. On a standard {product-title} base cluster, 100 GB of block storage is provided for PVs, which is dynamically provisioned and recycled based on application requests. Additional persistent storage can be purchased in 500 GB increments. + +PVs can only be attached to a single node at a time and are specific to the availability zone in which they were provisioned, but they can be attached to any node in the availability zone. + +Each cloud provider has its own limits for how many PVs can be attached to a single node. See link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/volume_limits.html#instance-type-volume-limits[AWS instance type limits] or link:https://cloud.google.com/compute/docs/machine-types#custom_machine_types[Google Cloud Platform custom machine types ] for details. + +[id="shared-storage_{context}"] +== Shared storage (RWX) + +The link:https://access.redhat.com/articles/5025181[AWS CSI Driver] can be used to provide RWX support for {product-title} on AWS. A community Operator is provided to simplify setup. diff --git a/modules/understanding-admin-roles.adoc b/modules/understanding-admin-roles.adoc new file mode 100644 index 0000000000..12aa1445f9 --- /dev/null +++ b/modules/understanding-admin-roles.adoc @@ -0,0 +1,20 @@ + +// Module included in the following assemblies: +// +// * // * administering_a_cluster/osd-admin-roles.adoc + +[id="understanding-admin-roles_{context}"] += Understanding administration roles + +== The cluster-admin role +As an administrator of an {product-title} cluster with Customer Cloud Subscriptions (CCS), you have access to the `cluster-admin` role. The user who created the cluster can add the `cluster-admin` user role to an account to have the maximum administrator privileges. These privileges are not automatically assigned to your user account when you create the cluster. While logged in to an account with the cluster-admin role, users have mostly unrestricted access to control and configure the cluster. There are some configurations that are blocked with webhooks to prevent destabilizing the cluster, or because they are managed in {cloud-redhat-com} and any in-cluster changes would be overwritten. Usage of the cluster-admin role is subject to the restrictions listed in your Appendix 4 agreement with Red Hat. As a best practice, limit the number of `cluster-admin` users to as few as possible. + + +== The dedicated-admin role +As an administrator of an {product-title} cluster, your account has additional permissions and access to all user-created projects in your organization’s cluster. While logged in to an account with the `dedicated-admin` role, the developer CLI commands (under the `oc` command) allow you increased visibility and management capabilities over objects across projects, while the administrator CLI commands (under the `oc adm` command) allow you to complete additional operations. + +[NOTE] +==== +While your account does have these increased permissions, the actual cluster maintenance and host configuration is still performed by the OpenShift Operations Team. If you would like to request a change to your cluster that you cannot perform using the administrator CLI, open a support case on the link:https://access.redhat.com/support/[Red Hat Customer Portal]. +==== +// TODO: this is the only reference to the "OpenShift Operations Team". Should this be that SRE team? diff --git a/modules/understanding-clusters.adoc b/modules/understanding-clusters.adoc new file mode 100644 index 0000000000..60ef7850b1 --- /dev/null +++ b/modules/understanding-clusters.adoc @@ -0,0 +1,18 @@ + +// Module included in the following assemblies: +// +// * assemblies/create-your-cluster.adoc + +[id="understanding-clusters_{context}"] += Understanding your cluster cloud options + +{product-title} offers {OCP} clusters as a managed service on {AWS} or {GCP}. You can purchase a standard cluster through Red Hat. Alternatively, you can use your existing cloud account through the Customer Cloud Subscription (CCS) model to leverage discounts. + +== Standard clusters + +Standard {product-title} clusters are deployed into their own AWS or GCP infrastructure accounts, each owned by Red Hat. Red Hat is responsible for the account, and the cloud infrastructure costs are paid directly by Red Hat. The customer only pays the Red Hat subscription costs. + +== Customer Cloud Subscription (CCS) +The Customer Cloud Subscription (CCS) model enables Red Hat to deploy and manage {product-title} clusters in an existing AWS or GCP account owned by the customer. Red Hat requires several prerequisites be met in order to provide this service, and this service is supported by Red Hat Site Reliability Engineers (SRE). + +In the CCS model, the customer pays the cloud infrastructure provider directly for cloud costs, and the cloud infrastructure account is part of a customer’s organization, with specific access granted to Red Hat. In this model, the customer pays Red Hat for the CCS subscription and pays the cloud provider for the cloud costs. diff --git a/modules/understanding-idp.adoc b/modules/understanding-idp.adoc new file mode 100644 index 0000000000..8abe316b2a --- /dev/null +++ b/modules/understanding-idp.adoc @@ -0,0 +1,82 @@ +// Module included in the following assemblies: +// +// * identity_providers/config-identity-providers.adoc +// * rosa_getting_started/rosa-config-identity-providers.adoc +// * rosa_getting_started_sts/rosa-sts-config-identity-providers.adoc + +[id="understanding-idp_{context}"] += Understanding identity providers + +{product-title} includes a built-in OAuth server. Developers and administrators obtain OAuth access tokens to authenticate themselves to the API. As an administrator, you can configure OAuth to specify an identity provider after you install your cluster. Configuring identity providers allows users to log in and access the cluster. + +[id="understanding-idp-supported_{context}"] +== Supported identity providers + +You can configure the following types of identity providers: + +[cols="2a,8a",options="header"] +|=== + +|Identity provider +|Description + +|GitHub or GitHub Enterprise +|Configure a `github` identity provider to validate usernames and passwords against GitHub or GitHub Enterprise's OAuth authentication server. + +|GitLab +|Configure a `gitlab` identity provider to use link:https://gitlab.com/[GitLab.com] or any other GitLab instance as an identity provider. + +|Google +|Configure a `google` identity provider using link:https://developers.google.com/identity/protocols/OpenIDConnect[Google's OpenID Connect integration]. + +|LDAP +|Configure the `ldap` identity provider to validate usernames and passwords against an LDAPv3 server, using simple bind authentication. + +|OpenID Connect +|Configure an `oidc` identity provider to integrate with an OpenID Connect identity provider using an link:http://openid.net/specs/openid-connect-core-1_0.html#CodeFlowAuth[Authorization Code Flow]. + +|HTPasswd +|Configure an `htpasswd` identity provider for a single, static administration user. You can log in to the cluster as the user to troubleshoot issues. + +|=== + +[id="understanding-idp-parameters_{context}"] +== Identity provider parameters + +The following parameters are common to all identity providers: + +[cols="2a,8a",options="header"] +|=== +|Parameter | Description +|`name` | The provider name is prefixed to provider user names to form an +identity name. + +|`mappingMethod` | Defines how new identities are mapped to users when they log in. +Enter one of the following values: + +claim:: The default value. Provisions a user with the identity's preferred +user name. Fails if a user with that user name is already mapped to another +identity. +lookup:: Looks up an existing identity, user identity mapping, and user, +but does not automatically provision users or identities. This allows cluster +administrators to set up identities and users manually, or using an external +process. Using this method requires you to manually provision users. +generate:: Provisions a user with the identity's preferred user name. If a +user with the preferred user name is already mapped to an existing identity, a +unique user name is generated. For example, `myuser2`. This method should not be +used in combination with external processes that require exact matches between +{product-title} user names and identity provider user names, such as LDAP group +sync. +add:: Provisions a user with the identity's preferred user name. If a user +with that user name already exists, the identity is mapped to the existing user, +adding to any existing identity mappings for the user. Required when multiple +identity providers are configured that identify the same set of users and map to +the same user names. +|=== + +[NOTE] +==== +When adding or changing identity providers, you can map identities from the new +provider to existing users by setting the `mappingMethod` parameter to +`add`. +==== diff --git a/modules/upgrade-auto.adoc b/modules/upgrade-auto.adoc new file mode 100644 index 0000000000..ed58693f56 --- /dev/null +++ b/modules/upgrade-auto.adoc @@ -0,0 +1,31 @@ + +// Module included in the following assemblies: +// +// * assemblies/upgrades.adoc + +[id="upgrade-auto_{context}"] + += Automatically upgrading your cluster through OCM + + +You can use the OpenShift Cluster Manager (OCM) to automatically upgrade your {product-title} cluster on a weekly basis. Based on upstream changes, there might be times when no updates are released. Therefore, no upgrade occurs for that week. + +.Procedure + +. From link:https://cloud.redhat.com/openshift[OCM], select your cluster from the clusters list. + +. Click the *Upgrade settings* tab to access the upgrade operator. + +. To schedule automatic upgrades, select *Automatic*. + +. Specify the day of the week and the time you want your cluster to upgrade. + +. Click *Save*. + +. Optional: Set a grace period for *Node draining* by selecting a designated amount of time from the drop down list. A *1 hour* grace period is set by default. + +. To edit an existing automatic upgrade policy, edit the preferred day or start time from the *Upgrade Settings* tab. Click *Save*. + +. To cancel an automatic upgrade policy, switch the upgrade method to manual from the *Upgrade Settings* tab. Click *Save*. + +On the *Upgrade settings* tab, the *Upgrade status* box indicates that an upgrade is scheduled. The date and time of the next scheduled update is listed. diff --git a/modules/upgrade-manual.adoc b/modules/upgrade-manual.adoc new file mode 100644 index 0000000000..72d10e7631 --- /dev/null +++ b/modules/upgrade-manual.adoc @@ -0,0 +1,45 @@ + +// Module included in the following assemblies: +// +// * assemblies/upgrades.adoc + +[id="upgrade-manual_{context}"] + += Manually upgrading your cluster through OCM + + +You can use the OpenShift Cluster Manager (OCM) to manually upgrade your {product-title} cluster. + + +.Procedure + +. From link:https://cloud.redhat.com/openshift[OCM], select your cluster from the clusters list. + +. Click the *Upgrade settings* tab to access the upgrade operator. You can also update your cluster from the *Overview* tab by clicking *Update* next to the cluster version under the *Details* heading. + +. Click *Update* in the *Update Status* box. + +. Select the version you want to upgrade your cluster to. Recommended cluster upgrades will be notated in the UI. To learn more about each available upgrade version, click *View release notes*. + +. Click *Next*. + +. To schedule your upgrade: +- Click *Upgrade now* to upgrade within the next hour. +- Click *Schedule a different time* and specify the date and time that you want the cluster to upgrade. + +. Click *Next*. + +. Review the upgrade policy and click *Confirm upgrade*. + +. A confirmation appears when the cluster upgrade has been scheduled. Click *Close*. + +. Optional: Set a grace period for *Node draining* by selecting a designated amount of time from the drop down list. A *1 hour* grace period is set by default. + +From the *Overview* tab, next to the cluster version, the UI notates that the upgrade has been scheduled. Click *View details* to view the upgrade details. If you need to cancel the scheduled upgrade, you can click *Cancel this upgrade* from the *View Details* pop-up. + +The same upgrade details are available on the *Upgrade settings* tab under the *Upgrade status* box. If you need to cancel the scheduled upgrade, you can click *Cancel this upgrade* from the *Upgrade status* box. + +[WARNING] +==== +In the event that a CVE or other critical issue to OpenShift Dedicated is found, all clusters are upgraded within 48 hours of the fix being released. You are notified when the fix is available and informed that the cluster will be automatically upgraded at your latest preferred start time before the 48 hour window closes. You can also upgrade manually at any time before the automatic upgrade starts. +==== diff --git a/modules/upgrade.adoc b/modules/upgrade.adoc new file mode 100644 index 0000000000..2c75db8b0f --- /dev/null +++ b/modules/upgrade.adoc @@ -0,0 +1,55 @@ + +// Module included in the following assemblies: +// +// * assemblies/upgrades.adoc + +[id="upgrade_{context}"] += Understanding {product-title} cluster upgrades + + +When upgrades are made available for your {product-title} cluster, you can upgrade to the newest version through the OpenShift Cluster Manager (OCM) or the OCM CLI. You can set your upgrade policies on existing clusters or during cluster creation, and upgrades can be scheduled to occur automatically or manually. + +Red Hat Site Reliability Engineers (SRE) will provide a curated list of available versions for your {product-title} clusters. For each cluster you will be able to review the full list of available releases, as well as the corresponding release notes. OCM will enable installation of clusters at the latest supported versions, and upgrades can be canceled at any time. + +You can also set a grace period for how long `PodDisruptionBudget` protected workloads are respected during upgrades. After this grace period, any workloads protected by `PodDisruptionBudget` that have not been successfully drained from a node, will be forcibly deleted. + +[NOTE] +==== +All Kubernetes objects and PVs in each {product-title} cluster are backed up as part of the {product-title} service. Application and application data backups are not a part of the {product-title} service. Ensure you have a backup policy in place for your applications and application data prior to scheduling upgrades. +==== + +[id="upgrade-automatic_{context}"] +== Automatic upgrades + +Upgrades can be scheduled to occur automatically on a day and time specified by the cluster owner or administrator. Upgrades occur on a weekly basis, unless an upgrade is unavailable for that week. + +[NOTE] +==== +Automatic upgrade policies are optional and if they are not set, the upgrade policies default to manual. +==== + +[id="upgrade-manual_upgrades_{context}"] +== Manual upgrades + +If you opt for manual upgrades, you are responsible for updating your cluster. If your cluster version falls too far behind, it will transition to a limited support status. For more information on OpenShift life cycle policies, see xref:../osd_policy/osd-life-cycle.adoc#osd-life-cycle[{product-title} update life cycle]. + +[id="upgrade-notifications_{context}"] +== Upgrade notifications + +From the OCM console you can view your cluster's history from the *Overview* tab. The Upgrade states can be viewed in the service log under the *Cluster history* heading. + +Every change of state also triggers an email notification to the cluster owner and subscribed users. You will receive email notifications for the following events: + +* An upgrade has been scheduled. +* An upgrade has started. +* An upgrade has completed. +* An upgrade has been canceled. + +[NOTE] +==== +For automatic upgrades, you will also receive email notifications before the upgrade occurs based on the following cadence: + +* 2 week notice +* 1 week notice +* 1 day notice +==== diff --git a/modules/viewing-notifications.adoc b/modules/viewing-notifications.adoc new file mode 100644 index 0000000000..873ce1cc1b --- /dev/null +++ b/modules/viewing-notifications.adoc @@ -0,0 +1,27 @@ + +// Module included in the following assemblies: +// +// * assemblies/notifications.adoc + +[id="viewing-notifications{context}"] += Viewing {product-title} cluster notifications + + +Service logs are recorded under the *Cluster history* heading on the *Overview* tab from OpenShift Cluster Manager (OCM) and are also available through the OCM CLI. + +.Procedure + +. From link:https://cloud.redhat.com/openshift[OCM], navigate to the *Clusters* page and select your cluster. + +. On the *Overview* tab, under *Cluster history*, you can view all cluster events from the service log. + +. Optional: Filter the cluster service logs by *Description* or *Severity* from the drop-down menu. You can filter further by entering a specific item in the search bar. + +. Optional: Click *Download history* to download the cluster history service logs. Select *JSON* or *CSV* for the output file type and then click *Download*. + +. To view service logs from the OCM CLI, enter the following command: ++ +[source,terminal] +---- +$ ocm get /api/service_logs/v1/cluster_logs --parameter search="cluster_uuid is ''" +---- diff --git a/monitoring/osd-accessing-monitoring-for-user-defined-projects.adoc b/monitoring/osd-accessing-monitoring-for-user-defined-projects.adoc new file mode 100644 index 0000000000..6c804eddfe --- /dev/null +++ b/monitoring/osd-accessing-monitoring-for-user-defined-projects.adoc @@ -0,0 +1,22 @@ +[id="osd-accessing-monitoring-for-user-defined-projects"] += Accessing monitoring for user-defined projects +include::modules/attributes-openshift-dedicated.adoc[] +:context: osd-accessing-monitoring-for-user-defined-projects + +toc::[] + +By default, centralized monitoring for user-defined projects and platform monitoring are enabled. You can monitor your own projects in {product-title} without the need for an additional monitoring solution. + +The monitoring of user-defined projects cannot be disabled. + +The `dedicated-admin` user has default permissions to configure and access monitoring for user-defined projects. + +[NOTE] +==== +Custom Prometheus instances and the Prometheus Operator installed through Operator Lifecycle Manager (OLM) can cause issues with user-defined project monitoring if it is enabled. Custom Prometheus instances are not supported. +==== + +[id="accessing-user-defined-monitoring-next-steps"] +== Next steps + +* xref:../monitoring/osd-managing-metrics.adoc#osd-managing-metrics[Managing metrics] diff --git a/monitoring/osd-configuring-the-monitoring-stack.adoc b/monitoring/osd-configuring-the-monitoring-stack.adoc new file mode 100644 index 0000000000..a9abad4654 --- /dev/null +++ b/monitoring/osd-configuring-the-monitoring-stack.adoc @@ -0,0 +1,77 @@ +[id="osd-configuring-the-monitoring-stack"] += Configuring the monitoring stack +include::modules/attributes-openshift-dedicated.adoc[] +:context: osd-configuring-the-monitoring-stack + +toc::[] + +This document explains what is supported for the monitoring of user-defined projects. It also shows how to configure the monitoring stack, and demonstrates several common configuration scenarios. + +// Maintenance and support for monitoring +include::modules/osd-monitoring-maintenance-and-support.adoc[leveloffset=+1] +include::modules/osd-monitoring-support-considerations.adoc[leveloffset=+2] + +// Configuring the monitoring stack +include::modules/osd-monitoring-configuring-the-monitoring-stack.adoc[leveloffset=+1] + +// Configurable monitoring components +include::modules/osd-monitoring-configurable-monitoring-components.adoc[leveloffset=+1] + +// Moving monitoring components to different nodes +include::modules/osd-monitoring-moving-monitoring-components-to-different-nodes.adoc[leveloffset=+1] + +.Additional resources + +* link:https://docs.openshift.com/container-platform/4.7/nodes/nodes/nodes-nodes-working.html#nodes-nodes-working-updating_nodes-nodes-working[Understanding how to update labels on nodes] +* link:https://docs.openshift.com/container-platform/4.7/nodes/scheduling/nodes-scheduler-node-selectors.html[Placing pods on specific nodes using node selectors] +* See the link:https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector[Kubernetes documentation] for details on the `nodeSelector` constraint + +// TODO: Same question about whether links out to OCP docs should explicitly mention that? (I won't point out any more, but could easily search to find other instances in the repo). + +// Assigning tolerations to monitoring components +include::modules/osd-monitoring-assigning-tolerations-to-monitoring-components.adoc[leveloffset=+1] + +.Additional resources + +* See the link:https://docs.openshift.com/container-platform/4.7/nodes/scheduling/nodes-scheduler-taints-tolerations.html#nodes-scheduler-taints-tolerations[OpenShift Container Platform documentation] on taints and tolerations +* See the link:https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/[Kubernetes documentation] on taints and tolerations + +// Configuring persistent storage +[id="configuring-persistent-storage"] +== Configuring persistent storage + +Running cluster monitoring with persistent storage means that your metrics are stored to a persistent volume (PV) and can survive a pod being restarted or recreated. This is ideal if you require your metrics data to be guarded from data loss. For production environments, it is highly recommended to configure persistent storage. Because of the high IO demands, it is advantageous to use local storage. + +[IMPORTANT] +==== +See link:https://docs.openshift.com/container-platform/4.7/scalability_and_performance/optimizing-storage.html#recommended-configurable-storage-technology_persistent-storage[Recommended configurable storage technology]. +==== + +[id="persistent-storage-prerequisites"] +=== Persistent storage prerequisites + +* Use the block type of storage. + +include::modules/osd-monitoring-configuring-a-local-persistent-volume-claim.adoc[leveloffset=+2] +include::modules/osd-monitoring-modifying-retention-time-for-prometheus-metrics-data.adoc[leveloffset=+2] + +.Additional resources + +* link:https://docs.openshift.com/container-platform/4.7/storage/understanding-persistent-storage.html[Understanding persistent storage] +* link:https://docs.openshift.com/container-platform/4.7/scalability_and_performance/optimizing-storage.html[Optimizing storage] + +// Managing scrape sample limits for user-defined projects +include::modules/osd-monitoring-limiting-scrape-samples-in-user-defined-projects.adoc[leveloffset=+1] +include::modules/osd-monitoring-setting-a-scrape-sample-limit-for-user-defined-projects.adoc[leveloffset=+2] + +.Additional resources + +* link:https://docs.openshift.com/container-platform/4.7/monitoring/troubleshooting-monitoring-issues.html#determining-why-prometheus-is-consuming-disk-space_troubleshooting-monitoring-issues[Determining why Prometheus is consuming a lot of disk space] for steps to query which metrics have the highest number of scrape samples + +// Setting log levels for monitoring components +include::modules/osd-monitoring-setting-log-levels-for-monitoring-components.adoc[leveloffset=+1] + +[id="configuring-the-monitoring-stack-next-steps"] +== Next steps + +* xref:../monitoring/osd-accessing-monitoring-for-user-defined-projects.adoc#osd-accessing-monitoring-for-user-defined-projects[Accessing monitoring for user-defined projects] diff --git a/monitoring/osd-managing-alerts.adoc b/monitoring/osd-managing-alerts.adoc new file mode 100644 index 0000000000..a5f73fc0a1 --- /dev/null +++ b/monitoring/osd-managing-alerts.adoc @@ -0,0 +1,12 @@ +[id="osd-managing-alerts"] += Alerts +include::modules/attributes-openshift-dedicated.adoc[] +:context: osd-managing-alerts + +toc::[] + +Alerts for monitoring workloads in user-defined projects are not currently supported in this product. + +[id="alerts-next-steps"] +== Next steps +* xref:../monitoring/osd-reviewing-monitoring-dashboards.adoc#osd-reviewing-monitoring-dashboards[Reviewing monitoring dashboards] diff --git a/monitoring/osd-managing-metrics.adoc b/monitoring/osd-managing-metrics.adoc new file mode 100644 index 0000000000..f9a10fa360 --- /dev/null +++ b/monitoring/osd-managing-metrics.adoc @@ -0,0 +1,45 @@ +[id="osd-managing-metrics"] += Managing metrics +include::modules/attributes-openshift-dedicated.adoc[] +:context: osd-managing-metrics + +toc::[] + +This document provides an overview about how {product-title} metrics are collected, queried and visualized. + +// Understanding metrics +include::modules/osd-monitoring-understanding-metrics.adoc[leveloffset=+1] + +// Setting up metrics collection for user-defined projects +include::modules/osd-monitoring-setting-up-metrics-collection-for-user-defined-projects.adoc[leveloffset=+1] +include::modules/osd-monitoring-deploying-a-sample-service.adoc[leveloffset=+2] +include::modules/osd-monitoring-specifying-how-a-service-is-monitored.adoc[leveloffset=+2] + +.Additional resources + +* See the link:https://github.com/openshift/prometheus-operator/blob/release-4.7/Documentation/api.md[Prometheus Operator API documentation] for more information on `ServiceMonitor` and `PodMonitor` resources. +* xref:../monitoring/osd-accessing-monitoring-for-user-defined-projects.adoc#osd-accessing-monitoring-for-user-defined-projects[Accessing monitoring for user-defined projects]. + +// Querying metrics +include::modules/osd-monitoring-querying-metrics.adoc[leveloffset=+1] +include::modules/osd-monitoring-querying-metrics-for-all-projects-as-an-administrator.adoc[leveloffset=+2] +include::modules/osd-monitoring-querying-metrics-for-user-defined-projects-as-a-developer.adoc[leveloffset=+2] + +.Additional resources + +* See the link:https://prometheus.io/docs/prometheus/latest/querying/basics/[Prometheus query documentation] for more information about creating PromQL queries. +* See the xref:../monitoring/osd-managing-metrics.adoc#querying-metrics-for-user-defined-projects-as-a-developer_osd-managing-metrics[Querying metrics for user-defined projects as a developer] for details on accessing non-cluster metrics as a developer or a privileged user +// TODO: The above additional resource points to the same module that it's an additional resource of + +include::modules/osd-monitoring-exploring-the-visualized-metrics.adoc[leveloffset=+2] + +.Additional resources + +* See the xref:../monitoring/osd-managing-metrics.adoc#querying-metrics_osd-managing-metrics[Querying metrics] section on using the PromQL interface +* xref:../monitoring/osd-troubleshooting-monitoring-issues.adoc#osd-troubleshooting-monitoring-issues[Troubleshooting monitoring issues] + +[id="managing-metrics-next-steps"] +== Next steps +* xref:../monitoring/osd-managing-alerts.adoc#osd-managing-alerts[Alerts] + +// TODO: Why is alerts a next step if alerts aren't supported? Can this be removed? diff --git a/monitoring/osd-reviewing-monitoring-dashboards.adoc b/monitoring/osd-reviewing-monitoring-dashboards.adoc new file mode 100644 index 0000000000..5e65a0ec5c --- /dev/null +++ b/monitoring/osd-reviewing-monitoring-dashboards.adoc @@ -0,0 +1,31 @@ +[id="osd-reviewing-monitoring-dashboards"] += Reviewing monitoring dashboards +include::modules/common-attributes.adoc[] +:context: osd-reviewing-monitoring-dashboards + +toc::[] + +{product-title} provides monitoring dashboards that help you understand the state of user-defined projects. + +In the *Developer* perspective, you can access dashboards that provide the following statistics for a selected project: + +* CPU usage +* Memory usage +* Bandwidth information +* Packet rate information + +.Example dashboard in the Developer perspective +image::monitoring-dashboard-developer.png[] + +[NOTE] +==== +In the *Developer* perspective, you can view dashboards for only one project at a time. +==== + +// Reviewing monitoring dashboards as a developer +include::modules/osd-monitoring-reviewing-monitoring-dashboards-developer.adoc[leveloffset=+1] + +[id="monitoring-dashboards-next-steps"] +== Next steps + +* xref:../monitoring/osd-troubleshooting-monitoring-issues.adoc#osd-troubleshooting-monitoring-issues[Troubleshooting monitoring issues] diff --git a/monitoring/osd-troubleshooting-monitoring-issues.adoc b/monitoring/osd-troubleshooting-monitoring-issues.adoc new file mode 100644 index 0000000000..70e1c739ea --- /dev/null +++ b/monitoring/osd-troubleshooting-monitoring-issues.adoc @@ -0,0 +1,10 @@ +[id="osd-troubleshooting-monitoring-issues"] += Troubleshooting monitoring issues +include::modules/attributes-openshift-dedicated.adoc[] +:context: osd-troubleshooting-monitoring-issues + +toc::[] + +This document describes how to troubleshoot common monitoring issues for user-defined projects. + +include::modules/osd-monitoring-troubleshooting-issues.adoc[leveloffset=+1] diff --git a/monitoring/osd-understanding-the-monitoring-stack.adoc b/monitoring/osd-understanding-the-monitoring-stack.adoc new file mode 100644 index 0000000000..4ed8f8d4ba --- /dev/null +++ b/monitoring/osd-understanding-the-monitoring-stack.adoc @@ -0,0 +1,31 @@ +[id="osd-understanding-the-monitoring-stack"] += Understanding the monitoring stack +include::modules/attributes-openshift-dedicated.adoc[] +:context: osd-understanding-the-monitoring-stack + +toc::[] + +In {product-title}, you can monitor your own projects in isolation from Red Hat Site Reliability Engineer (SRE) platform metrics. You can monitor your own projects without the need for an additional monitoring solution. + +[NOTE] +==== +Follow the instructions in this document carefully to configure a supported Prometheus instance for monitoring user-defined projects. Custom Prometheus instances are not supported by {product-title}. +==== + +// Understanding the monitoring stack +include::modules/osd-monitoring-understanding-the-monitoring-stack.adoc[leveloffset=+1] +include::modules/osd-monitoring-components-for-monitoring-user-defined-projects.adoc[leveloffset=+2] +include::modules/osd-monitoring-targets-for-user-defined-projects.adoc[leveloffset=+2] + +[id="understanding-the-monitoring-stack-additional-resources"] +== Additional resources + +* xref:../monitoring/osd-accessing-monitoring-for-user-defined-projects.adoc#osd-accessing-monitoring-for-user-defined-projects[Accessing monitoring for user-defined projects] +* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.7/html/monitoring/understanding-the-monitoring-stack#default-monitoring-components_understanding-the-monitoring-stack[Default monitoring components] +* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.7/html/monitoring/understanding-the-monitoring-stack#default-monitoring-targets_understanding-the-monitoring-stack[Default monitoring targets] +// TODO: When there is a link to the OCP docs, should that be explicit, so they're not surprised when they find themselves in another doc set? + +[id="understanding-the-monitoring-stack-next-steps"] +== Next steps + +* xref:../monitoring/osd-configuring-the-monitoring-stack.adoc#osd-configuring-the-monitoring-stack[Configuring the monitoring stack] diff --git a/nodes/nodes-about-autoscaling-nodes.adoc b/nodes/nodes-about-autoscaling-nodes.adoc new file mode 100644 index 0000000000..0467a85b5b --- /dev/null +++ b/nodes/nodes-about-autoscaling-nodes.adoc @@ -0,0 +1,78 @@ +include::modules/attributes-openshift-dedicated.adoc[] +[id="nodes-about-autoscaling-nodes"] += About autoscaling nodes on a cluster +:context: nodes-about-autoscaling-nodes +toc::[] + +ifdef::openshift-dedicated[] +[IMPORTANT] +==== +Autoscaling is available only on clusters that were purchased through the Red Hat Marketplace. +==== +endif::[] + +The autoscaler option can be configured to automatically scale the number of machines in a cluster. + +The cluster autoscaler increases the size of the cluster when there are pods that failed to schedule on any of the current nodes due to insufficient resources or when another node is necessary to meet deployment needs. The cluster autoscaler does not increase the cluster resources beyond the limits that you specify. + +Additionally, the cluster autoscaler decreases the size of the cluster when some nodes are consistently not needed for a significant period, such as when it has low resource use and all of its important pods can fit on other nodes. + +When you enable autoscaling, you must also set a minimum and maximum number of worker nodes. + +[NOTE] +==== +Only cluster owners and organization admins can scale or delete a cluster. +==== + +[id="nodes-enabling-autoscaling-nodes"] +== Enabling autoscaling nodes on a cluster + +You can enable autoscaling on worker nodes to increase or decrease the number of nodes available by editing the machine pool definition for an existing cluster. + +[discrete] +include::modules/ocm-enabling-autoscaling-nodes.adoc[leveloffset=+2] + +ifdef::openshift-rosa[] +[NOTE] +==== +Additionally, you can configure autoscaling on the default machine pool when you xref:../rosa_getting_started/rosa-creating-cluster.adoc#rosa-creating-cluster[create the cluster using interactive mode]. +==== + +[discrete] +include::modules/rosa-enabling-autoscaling-nodes.adoc[leveloffset=+2] +endif::[] + +[id="nodes-disabling-autoscaling-nodes"] +== Disabling autoscaling nodes on a cluster + +You can disable autoscaling on worker nodes to increase or decrease the number of nodes available by editing the machine pool definition for an existing cluster. + +ifdef::openshift-dedicated[] +You can disable autoscaling on a cluster using OpenShift Cluster Manager (OCM) console. +endif::[] + +ifdef::openshift-rosa[] +You can disable autoscaling on a cluster using OpenShift Cluster Manager (OCM) console or the {product-title} CLI. + +[NOTE] +==== +Additionally, you can configure autoscaling on the default machine pool when you xref:../rosa_getting_started/rosa-creating-cluster.adoc#rosa-creating-cluster[create the cluster using interactive mode]. +==== +endif::[] + +[discrete] +include::modules/ocm-disabling-autoscaling-nodes.adoc[leveloffset=+2] + +ifdef::openshift-rosa[] + +[discrete] +include::modules/rosa-disabling-autoscaling-nodes.adoc[leveloffset=+2] +endif::[] + +[id="nodes-about-autoscaling-nodes-additional-resources"] +== Additional resources +* xref:../nodes/nodes-machinepools-about.adoc#machinepools-about[About machinepools] +ifdef::openshift-rosa[] +* xref:../nodes/rosa-managing-worker-nodes.adoc#rosa-managing-worker-nodes[Managing worker nodes] +* xref:../rosa_cli/rosa-manage-objects-cli.adoc#rosa-managing-objects-cli[Managing objects with the rosa CLI] +endif::[] diff --git a/nodes/nodes-machinepools-about.adoc b/nodes/nodes-machinepools-about.adoc new file mode 100644 index 0000000000..ee3650f30c --- /dev/null +++ b/nodes/nodes-machinepools-about.adoc @@ -0,0 +1,40 @@ +include::modules/attributes-openshift-dedicated.adoc[] +[id="machinepools-about"] += About machine pools +:context: machine-pools-about +toc::[] + +{product-title} uses machine pools as an elastic, dynamic provisioning method on top of your cloud infrastructure. + +The primary resources are machines, machine sets, and machine pools. + +== Machines +A machine is a fundamental unit that describes the host for a worker node. + +== Machine sets +`MachineSet` resources are groups of machines. If you need more machines or must scale them down, this is done by configuring the number of replicas in the machine pool to which the machine sets belong. + +ifdef::openshift-rosa[] +Machine sets are not directly modifiable in ROSA. +endif::[] + +== Machine pools +Machine pools are a higher level construct to machine sets. + +A machine pool creates machine sets that are all clones of the same configuration across availability zones. Machine pools perform all of the host node provisioning management actions on a worker node. If you need more machines or must scale them down, change the number of replicas in the machine pool to meet your compute needs. Scaling can be configured manually or you can set autoscaling. + +By default, a cluster is created with one machine pool. Additional machine pools can be added later to an existing cluster and you can modify the default machine pool. Machine pools can also be deleted. + +Multiple machine pools can exist on a single cluster, and they can each have different types or different size nodes. + +== Machine pools in multiple zone clusters +When you create a machine pool in a multiple availability zone (Multi-AZ) cluster, that one machine pool has 3 zones. The machine pool, in turn, creates a total of 3 machine sets - one machine set for each zone in the cluster. Each of those machine sets manages one or more machines in its respective availability zone. + +If you create a new Multi-AZ cluster, the machine pools are replicated to those zones automatically. If you add a machine pool to an existing Multi-AZ, the new pool is automatically created in those zones. Similarly, deleting a machine pool will delete it from all zones. +Due to this multiplicative effect, using machine pools in Multi-AZ cluster can consume more of your project's quota for a specific region when creating machine pools. + +== Additional resources +ifdef::openshift-rosa[] +* xref:../nodes/rosa-managing-worker-nodes.adoc#rosa-managing-worker-nodes[Managing worker nodes] +endif::[] +* xref:../nodes/nodes-about-autoscaling-nodes.adoc#nodes-about-autoscaling-nodes[About autoscaling] diff --git a/nodes/rosa-managing-worker-nodes.adoc b/nodes/rosa-managing-worker-nodes.adoc new file mode 100644 index 0000000000..44ef2279d6 --- /dev/null +++ b/nodes/rosa-managing-worker-nodes.adoc @@ -0,0 +1,20 @@ +include::modules/attributes-openshift-dedicated.adoc[] +[id="rosa-managing-worker-nodes"] += Managing worker nodes +:context: rosa-managing-worker-nodes +toc::[] + +This section describes how to manage worker nodes with {product-title} (ROSA). + +The majority of changes for worker nodes are configured on machine pools. A _machine pool_ is a group of worker nodes in a cluster that have the same configuration, providing ease of management. You can edit the configuration of worker nodes for options such as scaling, instance type, labels, and taints. + +include::modules/rosa-scaling-worker-nodes.adoc[leveloffset=+1] +include::modules/rosa-adding-node-labels.adoc[leveloffset=+1] +include::modules/rosa-adding-instance-types.adoc[leveloffset=+1] + +== Additional resources +* xref:../nodes/nodes-machinepools-about.adoc#machinepools-about[About machinepools] +* xref:../nodes/nodes-about-autoscaling-nodes.adoc#nodes-about-autoscaling-nodes[About autoscaling] +* xref:../nodes/nodes-about-autoscaling-nodes.adoc#nodes-enabling-autoscaling-nodes[Enabling autoscaling] +* xref:../nodes/nodes-about-autoscaling-nodes.adoc#nodes-disabling-autoscaling-nodes[Disabling autoscaling] +* xref:../rosa_policy/rosa-service-definition.adoc#rosa-service-definition[ROSA Service Definition] diff --git a/osd_architecture/images b/osd_architecture/images new file mode 120000 index 0000000000..5e67573196 --- /dev/null +++ b/osd_architecture/images @@ -0,0 +1 @@ +../images \ No newline at end of file diff --git a/osd_architecture/modules b/osd_architecture/modules new file mode 120000 index 0000000000..464b823aca --- /dev/null +++ b/osd_architecture/modules @@ -0,0 +1 @@ +../modules \ No newline at end of file diff --git a/osd_architecture/osd-architecture.adoc b/osd_architecture/osd-architecture.adoc new file mode 100644 index 0000000000..0a112dccb6 --- /dev/null +++ b/osd_architecture/osd-architecture.adoc @@ -0,0 +1,14 @@ +[id="osd-architecture"] += Architecture concepts +include::modules/attributes-openshift-dedicated.adoc[] +:context: osd-architecture + +toc::[] + +Learn about OpenShift and basic container concepts used in the {product-title} architecture. + +include::modules/kubernetes-about.adoc[leveloffset=+1] + +include::modules/container-benefits.adoc[leveloffset=+1] + +include::modules/osd-vs-ocp.adoc[leveloffset=+1] diff --git a/osd_architecture/osd-understanding.adoc b/osd_architecture/osd-understanding.adoc new file mode 100644 index 0000000000..9667446126 --- /dev/null +++ b/osd_architecture/osd-understanding.adoc @@ -0,0 +1,10 @@ +[id="osd-understanding"] += Introduction to {product-title} +include::modules/attributes-openshift-dedicated.adoc[] +:context: osd-understanding + +toc::[] + +With its foundation in Kubernetes, {product-title} is a complete {OCP} cluster provided as a cloud service, configured for high availability, and dedicated to a single customer. + +include::modules/osd-intro.adoc[leveloffset=+1] diff --git a/osd_cluster_create/creating-your-cluster.adoc b/osd_cluster_create/creating-your-cluster.adoc new file mode 100644 index 0000000000..6d32a9e14a --- /dev/null +++ b/osd_cluster_create/creating-your-cluster.adoc @@ -0,0 +1,15 @@ +[id="creating-your-cluster"] += Creating your cluster +:context: creating-your-cluster +include::modules/attributes-openshift-dedicated.adoc[] + +toc::[] + +[role="_abstract"] +After you have an {product-title} subscription, you can access your services and create your cluster. + +include::modules/understanding-clusters.adoc[leveloffset=+1] + +include::modules/create-aws-cluster.adoc[leveloffset=+1] + +include::modules/create-gcp-cluster.adoc[leveloffset=+1] diff --git a/osd_cluster_create/images b/osd_cluster_create/images new file mode 120000 index 0000000000..5e67573196 --- /dev/null +++ b/osd_cluster_create/images @@ -0,0 +1 @@ +../images \ No newline at end of file diff --git a/osd_cluster_create/modules b/osd_cluster_create/modules new file mode 120000 index 0000000000..464b823aca --- /dev/null +++ b/osd_cluster_create/modules @@ -0,0 +1 @@ +../modules \ No newline at end of file diff --git a/osd_notifications/images b/osd_notifications/images new file mode 120000 index 0000000000..5e67573196 --- /dev/null +++ b/osd_notifications/images @@ -0,0 +1 @@ +../images \ No newline at end of file diff --git a/osd_notifications/modules b/osd_notifications/modules new file mode 120000 index 0000000000..464b823aca --- /dev/null +++ b/osd_notifications/modules @@ -0,0 +1 @@ +../modules \ No newline at end of file diff --git a/osd_notifications/notifications.adoc b/osd_notifications/notifications.adoc new file mode 100644 index 0000000000..0622d886ce --- /dev/null +++ b/osd_notifications/notifications.adoc @@ -0,0 +1,15 @@ +[id="notifications"] += Notifications for {product-title} clusters +:context: notifications +include::modules/attributes-openshift-dedicated.adoc[] + +toc::[] + +[role="_abstract"] +Cluster service logs can be viewed through {OCM} and the OCM CLI. The cluster history details cluster events such as adding groups, adding users, adding identity providers, load balancer quota updates, and scheduled maintenance upgrades. + +In addition to the cluster history on OCM, users subscribed to cluster notifications also receive emails for cluster upgrade maintenance, known cluster incidents, or customer action required cluster events. + +include::modules/viewing-notifications.adoc[leveloffset=+1] + +include::modules/notification-subscribe.adoc[leveloffset=+1] diff --git a/osd_planning/aws-ccs.adoc b/osd_planning/aws-ccs.adoc new file mode 100644 index 0000000000..7983d58ff5 --- /dev/null +++ b/osd_planning/aws-ccs.adoc @@ -0,0 +1,17 @@ +[id="aws-ccs"] += Customer Cloud Subscriptions on AWS +:context: aws-ccs +include::modules/attributes-openshift-dedicated.adoc[] + +toc::[] + +[role="_abstract"] +{product-title} provides a Customer Cloud Subscription (CCS) model that allows Red Hat to deploy and manage clusters into a customer’s existing Amazon Web Service (AWS) account. + +include::modules/ccs-aws-understand.adoc[leveloffset=+1] +include::modules/ccs-aws-customer-requirements.adoc[leveloffset=+1] +include::modules/ccs-aws-customer-procedure.adoc[leveloffset=+1] +include::modules/ccs-aws-scp.adoc[leveloffset=+1] +include::modules/ccs-aws-iam.adoc[leveloffset=+1] +include::modules/ccs-aws-provisioned.adoc[leveloffset=+1] +include::modules/aws-limits.adoc[leveloffset=+1] diff --git a/osd_planning/gcp-ccs.adoc b/osd_planning/gcp-ccs.adoc new file mode 100644 index 0000000000..f5aee0fb6b --- /dev/null +++ b/osd_planning/gcp-ccs.adoc @@ -0,0 +1,17 @@ +[id="gcp-ccs"] += Customer Cloud Subscriptions on GCP +:context: gcp-ccs +include::modules/attributes-openshift-dedicated.adoc[] + +toc::[] + +[role="_abstract"] +Red Hat recommends the usage of a {GCP} project, managed by the customer, to organize all of your GCP resources. A project consists of a set of users and APIs, as well as billing, authentication, and monitoring settings for those APIs. + +It is a best practice for the {product-title} CCS cluster to be hosted in a GCP project within a GCP organization. The Organization resource is the root node of the GCP resource hierarchy and all resources that belong to an organization are grouped under the organization node. An IAM service account with certain roles granted is created and applied to the GCP project. When you make calls to the API, you typically provide service account keys for authentication. Each service account is owned by a specific project, but service accounts can be provided roles to access resources for other projects. + +include::modules/ccs-gcp-understand.adoc[leveloffset=+1] +include::modules/ccs-gcp-customer-requirements.adoc[leveloffset=+1] +include::modules/ccs-gcp-customer-procedure.adoc[leveloffset=+1] +include::modules/ccs-gcp-iam.adoc[leveloffset=+1] +include::modules/gcp-limits.adoc[leveloffset=+1] diff --git a/osd_planning/images b/osd_planning/images new file mode 120000 index 0000000000..5e67573196 --- /dev/null +++ b/osd_planning/images @@ -0,0 +1 @@ +../images \ No newline at end of file diff --git a/osd_planning/modules b/osd_planning/modules new file mode 120000 index 0000000000..464b823aca --- /dev/null +++ b/osd_planning/modules @@ -0,0 +1 @@ +../modules \ No newline at end of file diff --git a/osd_policy/images b/osd_policy/images new file mode 120000 index 0000000000..5e67573196 --- /dev/null +++ b/osd_policy/images @@ -0,0 +1 @@ +../images \ No newline at end of file diff --git a/osd_policy/modules b/osd_policy/modules new file mode 120000 index 0000000000..464b823aca --- /dev/null +++ b/osd_policy/modules @@ -0,0 +1 @@ +../modules \ No newline at end of file diff --git a/osd_policy/osd-life-cycle.adoc b/osd_policy/osd-life-cycle.adoc new file mode 100644 index 0000000000..6a00b1a598 --- /dev/null +++ b/osd_policy/osd-life-cycle.adoc @@ -0,0 +1,22 @@ +[id="osd-life-cycle"] += {product-title} update life cycle +include::modules/attributes-openshift-dedicated.adoc[] +:context: osd-life-cycle + +toc::[] + +include::modules/life-cycle-overview.adoc[leveloffset=+1] + +.Additional resources + +* xref:../osd_policy/osd-service-definition.adoc#osd-service-definition[{product-title} service definition] + +include::modules/life-cycle-definitions.adoc[leveloffset=+1] +include::modules/life-cycle-major-versions.adoc[leveloffset=+1] +include::modules/life-cycle-minor-versions.adoc[leveloffset=+1] +include::modules/life-cycle-patch-versions.adoc[leveloffset=+1] +include::modules/life-cycle-limited-support.adoc[leveloffset=+1] +include::modules/life-cycle-supported-versions.adoc[leveloffset=+1] +include::modules/life-cycle-install.adoc[leveloffset=+1] +include::modules/life-cycle-mandatory-upgrades.adoc[leveloffset=+1] +include::modules/life-cycle-dates.adoc[leveloffset=+1] diff --git a/osd_policy/osd-service-definition.adoc b/osd_policy/osd-service-definition.adoc new file mode 100644 index 0000000000..ecc6093f3e --- /dev/null +++ b/osd_policy/osd-service-definition.adoc @@ -0,0 +1,14 @@ +[id="osd-service-definition"] += {product-title} service definition +include::modules/attributes-openshift-dedicated.adoc[] +:context: osd-service-definition + +toc::[] + +include::modules/sdpolicy-account-management.adoc[leveloffset=+1] +include::modules/sdpolicy-logging.adoc[leveloffset=+1] +include::modules/sdpolicy-monitoring.adoc[leveloffset=+1] +include::modules/sdpolicy-networking.adoc[leveloffset=+1] +include::modules/sdpolicy-storage.adoc[leveloffset=+1] +include::modules/sdpolicy-platform.adoc[leveloffset=+1] +include::modules/sdpolicy-security.adoc[leveloffset=+1] diff --git a/osd_policy/policy-process-security.adoc b/osd_policy/policy-process-security.adoc new file mode 100644 index 0000000000..0d6f3f2997 --- /dev/null +++ b/osd_policy/policy-process-security.adoc @@ -0,0 +1,13 @@ +[id="policy-process-security"] += Understanding process and security for {product-title} +include::modules/attributes-openshift-dedicated.adoc[] +:context: policy-process-security + +toc::[] + + +include::modules/policy-incident.adoc[leveloffset=+1] +include::modules/policy-change-management.adoc[leveloffset=+1] +include::modules/policy-identity-access-management.adoc[leveloffset=+1] +include::modules/policy-security-regulation-compliance.adoc[leveloffset=+1] +include::modules/policy-disaster-recovery.adoc[leveloffset=+1] diff --git a/osd_policy/policy-responsibility-matrix.adoc b/osd_policy/policy-responsibility-matrix.adoc new file mode 100644 index 0000000000..43b1bd9d11 --- /dev/null +++ b/osd_policy/policy-responsibility-matrix.adoc @@ -0,0 +1,13 @@ +[id="policy-responsibility-matrix"] += Responsibility assignment matrix +include::modules/attributes-openshift-dedicated.adoc[] +:context: policy-responsibility-matrix + +toc::[] + +[role="_abstract"] +Understanding the Red Hat, cloud provider, and customer responsibilities for the OpenShift Dedicated managed service. + +include::modules/policy-responsibilities.adoc[leveloffset=+1] +include::modules/policy-shared-responsibility.adoc[leveloffset=+1] +include::modules/policy-customer-responsibility.adoc[leveloffset=+1] diff --git a/osd_policy/policy-understand-availability.adoc b/osd_policy/policy-understand-availability.adoc new file mode 100644 index 0000000000..229dafac29 --- /dev/null +++ b/osd_policy/policy-understand-availability.adoc @@ -0,0 +1,11 @@ +[id="policy-understand-availability"] += Understanding availability for {product-title} +include::modules/attributes-openshift-dedicated.adoc[] +:context: policy-understand-availability + +toc::[] + +[role="_abstract"] +Availability and disaster avoidance are extremely important aspects of any application platform. {product-title} provides many protections against failures at several levels, but customer-deployed applications must be appropriately configured for high availability. In addition, to account for cloud provider outages that might occur, other options are available, such as deploying a cluster across multiple availability zones or maintaining multiple clusters with failover mechanisms. + +include::modules/policy-failure-points.adoc[leveloffset=+1] diff --git a/osd_private_connections/aws-private-connections.adoc b/osd_private_connections/aws-private-connections.adoc new file mode 100644 index 0000000000..1c704b567d --- /dev/null +++ b/osd_private_connections/aws-private-connections.adoc @@ -0,0 +1,12 @@ +[id="aws-private-connections"] += Configuring private connections for AWS +:context: aws-private-connections +include::modules/attributes-openshift-dedicated.adoc[] + +toc::[] + +include::modules/enable-aws-access.adoc[leveloffset=+1] +include::modules/config-aws-access.adoc[leveloffset=+1] +include::modules/aws-vpc.adoc[leveloffset=+1] +include::modules/aws-vpn.adoc[leveloffset=+1] +include::modules/aws-direct-connect.adoc[leveloffset=+1] diff --git a/osd_private_connections/images b/osd_private_connections/images new file mode 120000 index 0000000000..5e67573196 --- /dev/null +++ b/osd_private_connections/images @@ -0,0 +1 @@ +../images \ No newline at end of file diff --git a/osd_private_connections/modules b/osd_private_connections/modules new file mode 120000 index 0000000000..464b823aca --- /dev/null +++ b/osd_private_connections/modules @@ -0,0 +1 @@ +../modules \ No newline at end of file diff --git a/osd_private_connections/private-cluster.adoc b/osd_private_connections/private-cluster.adoc new file mode 100644 index 0000000000..bf3798fb0e --- /dev/null +++ b/osd_private_connections/private-cluster.adoc @@ -0,0 +1,18 @@ +[id="private-cluster"] += Configuring a private cluster +:context: private-cluster +include::modules/attributes-openshift-dedicated.adoc[] + +toc::[] + +[role="_abstract"] +An {product-title} cluster can be made private so that internal applications can be hosted inside a corporate network. In addition, private clusters can be configured to have only internal API endpoints for increased security. + +{product-title} administrators can choose between public and private cluster configuration from within the *OpenShift Cluster Manager* (OCM). Privacy settings can be configured during cluster creation or after a cluster is established. + + +include::modules/enable-private-cluster-new.adoc[leveloffset=+1] + +include::modules/enable-private-cluster-existing.adoc[leveloffset=+1] + +include::modules/enable-public-cluster.adoc[leveloffset=+1] diff --git a/osd_quickstart/images b/osd_quickstart/images new file mode 120000 index 0000000000..5e67573196 --- /dev/null +++ b/osd_quickstart/images @@ -0,0 +1 @@ +../images \ No newline at end of file diff --git a/osd_quickstart/modules b/osd_quickstart/modules new file mode 120000 index 0000000000..464b823aca --- /dev/null +++ b/osd_quickstart/modules @@ -0,0 +1 @@ +../modules \ No newline at end of file diff --git a/osd_quickstart/osd-quickstart.adoc b/osd_quickstart/osd-quickstart.adoc new file mode 100644 index 0000000000..f296d37a99 --- /dev/null +++ b/osd_quickstart/osd-quickstart.adoc @@ -0,0 +1,19 @@ +[id="osd-quickstart"] += Quick start +include::modules/attributes-openshift-dedicated.adoc[] +:context: osd-quickstart + +toc::[] + +// Can the assembly title be more detailed? Plus this uses "Quick start" vs "Quickstart in nav menu" + +[role="_abstract"] +Use this quick start to create and provision a cluster, add users, deploy your first application, and learn how to scale and delete your cluster. + +include::modules/create-cluster.adoc[leveloffset=+1] +include::modules/config-idp.adoc[leveloffset=+1] +include::modules/add-user.adoc[leveloffset=+1] +include::modules/access-cluster.adoc[leveloffset=+1] +include::modules/deploy-app.adoc[leveloffset=+1] +include::modules/scaling-cluster.adoc[leveloffset=+1] +include::modules/deleting-cluster.adoc[leveloffset=+1] diff --git a/rosa_architecture/images b/rosa_architecture/images new file mode 120000 index 0000000000..5e67573196 --- /dev/null +++ b/rosa_architecture/images @@ -0,0 +1 @@ +../images \ No newline at end of file diff --git a/rosa_architecture/modules b/rosa_architecture/modules new file mode 120000 index 0000000000..464b823aca --- /dev/null +++ b/rosa_architecture/modules @@ -0,0 +1 @@ +../modules \ No newline at end of file diff --git a/rosa_architecture/rosa-architecture-models.adoc b/rosa_architecture/rosa-architecture-models.adoc new file mode 100644 index 0000000000..b43364e85b --- /dev/null +++ b/rosa_architecture/rosa-architecture-models.adoc @@ -0,0 +1,15 @@ +[id="rosa-architecture-models"] += Architecture models +include::modules/attributes-openshift-dedicated.adoc[] +:context: rosa-architecture-models + +toc::[] + +ROSA architecture supports the following network configuration types: + +* Public network +* Private network +* AWS PrivateLink + +include::modules/rosa-architecture.adoc[leveloffset=+1] +include::modules/osd-aws-privatelink-architecture.adoc[leveloffset=+1] diff --git a/rosa_architecture/rosa-basic-architecture-concepts.adoc b/rosa_architecture/rosa-basic-architecture-concepts.adoc new file mode 100644 index 0000000000..910cb91ede --- /dev/null +++ b/rosa_architecture/rosa-basic-architecture-concepts.adoc @@ -0,0 +1,11 @@ +[id="rosa-basic-architecture-concepts"] += Architecture concepts +include::modules/attributes-openshift-dedicated.adoc[] +:context: rosa-basic-architecture-concepts +toc::[] + +Learn about OpenShift and container basic concepts used in {product-title} architecture. + +include::modules/rosa-openshift-concepts.adoc[leveloffset=+1] +include::modules/rosa-kubernetes-concept.adoc[leveloffset=+1] +include::modules/rosa-containers-concept.adoc[leveloffset=+1] diff --git a/rosa_architecture/rosa-understanding.adoc b/rosa_architecture/rosa-understanding.adoc new file mode 100644 index 0000000000..4cab35fcfa --- /dev/null +++ b/rosa_architecture/rosa-understanding.adoc @@ -0,0 +1,10 @@ +[id="rosa-understanding"] += Introduction to ROSA +include::modules/attributes-openshift-dedicated.adoc[] +:context: rosa-understanding +toc::[] + +Learn about {product-title} (ROSA) access, supported consoles, consumption experience, and integration with Amazon Web Services (AWS) services. + +include::modules/rosa-understanding.adoc[leveloffset=+1] +include::modules/rosa-using-sts.adoc[leveloffset=+2] diff --git a/rosa_cli/images b/rosa_cli/images new file mode 120000 index 0000000000..5e67573196 --- /dev/null +++ b/rosa_cli/images @@ -0,0 +1 @@ +../images \ No newline at end of file diff --git a/rosa_cli/modules b/rosa_cli/modules new file mode 120000 index 0000000000..464b823aca --- /dev/null +++ b/rosa_cli/modules @@ -0,0 +1 @@ +../modules \ No newline at end of file diff --git a/rosa_cli/rosa-checking-acct-version-cli.adoc b/rosa_cli/rosa-checking-acct-version-cli.adoc new file mode 100644 index 0000000000..b1a53318bd --- /dev/null +++ b/rosa_cli/rosa-checking-acct-version-cli.adoc @@ -0,0 +1,8 @@ +include::modules/attributes-openshift-dedicated.adoc[] +[id="rosa-acct-version-cli"] += Checking account and version information with the rosa cli +:context: rosa-checking-acct-version-cli + +toc::[] + +include::modules/rosa-checking-account-version-info-cli.adoc[leveloffset=+1] diff --git a/rosa_cli/rosa-checking-logs-cli.adoc b/rosa_cli/rosa-checking-logs-cli.adoc new file mode 100644 index 0000000000..d139a36564 --- /dev/null +++ b/rosa_cli/rosa-checking-logs-cli.adoc @@ -0,0 +1,8 @@ +include::modules/attributes-openshift-dedicated.adoc[] +[id="rosa-checking-logs-cli"] += Checking logs with the rosa CLI +:context: rosa-checking-logs-cli + +toc::[] + +include::modules/rosa-logs.adoc[leveloffset=+1] diff --git a/rosa_cli/rosa-get-started-cli.adoc b/rosa_cli/rosa-get-started-cli.adoc new file mode 100644 index 0000000000..6fa26d5cdc --- /dev/null +++ b/rosa_cli/rosa-get-started-cli.adoc @@ -0,0 +1,13 @@ +include::modules/attributes-openshift-dedicated.adoc[] +[id="rosa-get-started-cli"] += Getting started with the rosa CLI +:context: rosa-getting-started-cli +toc::[] + +Setup and basic usage of the `rosa` CLI. + +include::modules/rosa-about.adoc[leveloffset=+1] +include::modules/rosa-setting-up-cli.adoc[leveloffset=+1] +include::modules/rosa-configure.adoc[leveloffset=+1] +include::modules/rosa-initialize.adoc[leveloffset=+1] +include::modules/rosa-using-bash-script.adoc[leveloffset=+1] diff --git a/rosa_cli/rosa-manage-objects-cli.adoc b/rosa_cli/rosa-manage-objects-cli.adoc new file mode 100644 index 0000000000..2ad2698e5f --- /dev/null +++ b/rosa_cli/rosa-manage-objects-cli.adoc @@ -0,0 +1,17 @@ +include::modules/attributes-openshift-dedicated.adoc[] +[id="rosa-managing-objects-cli"] += Managing objects with the rosa CLI +:context: rosa-managing-objects-cli + +toc::[] + +Managing objects with the `rosa` CLI, such as adding `dedicated-admin` users, managing clusters, and scheduling cluster upgrades. + +include::modules/rosa-common-commands.adoc[leveloffset=+1] +include::modules/rosa-parent-commands.adoc[leveloffset=+1] +include::modules/rosa-create-objects.adoc[leveloffset=+1] +include::modules/rosa-edit-objects.adoc[leveloffset=+1] +include::modules/rosa-delete-objects.adoc[leveloffset=+1] +include::modules/rosa-install-uninstall-addon.adoc[leveloffset=+1] +include::modules/rosa-list-objects.adoc[leveloffset=+1] +include::modules/rosa-upgrade-cluster-cli.adoc[leveloffset=+1] diff --git a/rosa_getting_started/images b/rosa_getting_started/images new file mode 120000 index 0000000000..5e67573196 --- /dev/null +++ b/rosa_getting_started/images @@ -0,0 +1 @@ +../images \ No newline at end of file diff --git a/rosa_getting_started/modules b/rosa_getting_started/modules new file mode 120000 index 0000000000..464b823aca --- /dev/null +++ b/rosa_getting_started/modules @@ -0,0 +1 @@ +../modules \ No newline at end of file diff --git a/rosa_getting_started/rosa-accessing-cluster.adoc b/rosa_getting_started/rosa-accessing-cluster.adoc new file mode 100644 index 0000000000..48b1eecc52 --- /dev/null +++ b/rosa_getting_started/rosa-accessing-cluster.adoc @@ -0,0 +1,23 @@ +include::modules/attributes-openshift-dedicated.adoc[] +[id="rosa-accessing-cluster"] += Accessing a ROSA cluster +:context: rosa-accessing-cluster + +toc::[] + +As a best practice, access your {product-title} (ROSA) cluster using an identity provider (IDP) account. However, the cluster administrator who created the cluster can access it using the quick access procedure. + +This document describes how to access a cluster and set up an IDP using the `rosa` CLI. Alternatively, you can set up an IDP account using the OpenShift Cluster Manager (OCM) console. + +include::modules/rosa-accessing-your-cluster-quick.adoc[leveloffset=+1] + +include::modules/rosa-accessing-your-cluster.adoc[leveloffset=+1] + +include::modules/rosa-create-cluster-admins.adoc[leveloffset=+1] + +include::modules/rosa-create-dedicated-cluster-admins.adoc[leveloffset=+1] + +[id="additional-resources-cluster-access"] +== Additional resources +* xref:../rosa_getting_started/rosa-config-identity-providers.adoc#rosa-config-identity-providers[Configuring identity providers using the OCM console] +* xref:../rosa_getting_started/rosa-getting-started-workflow.adoc#rosa-getting-started-workflow[Getting started workflow] diff --git a/rosa_getting_started/rosa-aws-prereqs.adoc b/rosa_getting_started/rosa-aws-prereqs.adoc new file mode 100644 index 0000000000..7b6da6e783 --- /dev/null +++ b/rosa_getting_started/rosa-aws-prereqs.adoc @@ -0,0 +1,27 @@ +include::modules/attributes-openshift-dedicated.adoc[] +:context: prerequisites + +[id="prerequisites"] += AWS prerequisites for ROSA + +toc::[] + +{product-title} (ROSA) provides a model that allows Red Hat to deploy clusters into a customer’s existing Amazon Web Service (AWS) account. + +You must ensure that the prerequisites are met before installing ROSA. This requirements document does not apply to AWS Security Token Service (STS). If you are using STS, see the link:https://docs.openshift.com/rosa/rosa_getting_started_sts/rosa-sts-aws-prereqs.html[STS-specific requirements]. + +include::modules/rosa-aws-understand.adoc[leveloffset=+1] +include::modules/rosa-aws-requirements.adoc[leveloffset=+1] +include::modules/rosa-aws-procedure.adoc[leveloffset=+1] +include::modules/rosa-aws-scp.adoc[leveloffset=+1] +include::modules/rosa-aws-iam.adoc[leveloffset=+1] +include::modules/rosa-aws-provisioned.adoc[leveloffset=+1] +include::modules/osd-aws-privatelink-firewall-prerequisites.adoc[leveloffset=+1] + +== Next steps +xref:../rosa_getting_started/rosa-required-aws-service-quotas.adoc#rosa-required-aws-service-quotas[Review the required AWS service quotas] + +== Additional resources +* See xref:../rosa_planning/rosa-limits-scalability.adoc#initial-planning-considerations_rosa-limits-scalability[Intial Planning Considerations] for guidance on worker node count. +* See xref:../rosa_policy/rosa-policy-process-security.adoc#rosa-policy-sre-access_rosa-policy-process-security[SRE access to all Red Hat OpenShift Service on AWS clusters] for information about how Red Hat site reliability engineering accesses ROSA clusters. +* xref:../rosa_getting_started/rosa-getting-started-workflow.adoc#rosa-getting-started-workflow[Getting started workflow] diff --git a/rosa_getting_started/rosa-aws-privatelink-creating-cluster.adoc b/rosa_getting_started/rosa-aws-privatelink-creating-cluster.adoc new file mode 100644 index 0000000000..3ecc38ae38 --- /dev/null +++ b/rosa_getting_started/rosa-aws-privatelink-creating-cluster.adoc @@ -0,0 +1,22 @@ +include::modules/attributes-openshift-dedicated.adoc[] +:context: rosa-getting-started +[id="rosa-aws-privatelink-creating-cluster"] += Creating an AWS PrivateLink cluster on ROSA + +toc::[] + +This document describes how to create a ROSA cluster using AWS PrivateLink. Alternatively, you can create a ROSA cluster without AWS PrivateLink. + +include::modules/osd-aws-privatelink-about.adoc[leveloffset=+1] +include::modules/osd-aws-privatelink-required-resources.adoc[leveloffset=+1] +include::modules/rosa-aws-privatelink-create-cluster.adoc[leveloffset=+1] +include::modules/osd-aws-privatelink-config-dns-forwarding.adoc[leveloffset=+1] + +== Next steps +xref:../rosa_getting_started/rosa-config-identity-providers.adoc#rosa-config-identity-providers[Configure identity providers] + +== Additional resources +* xref:../rosa_getting_started/rosa-aws-prereqs.adoc#osd-aws-privatelink-firewall-prerequisites[AWS PrivateLink firewall prerequisites] +* xref:../rosa_getting_started/rosa-getting-started-workflow.adoc#rosa-getting-started-workflow[Getting started workflow] +* xref:../rosa_getting_started/rosa-deleting-cluster.adoc#rosa-deleting-cluster[Deleting a ROSA cluster] +* xref:../rosa_architecture/rosa-architecture-models.adoc#rosa-architecture-models[ROSA architecture] diff --git a/rosa_getting_started/rosa-config-aws-account.adoc b/rosa_getting_started/rosa-config-aws-account.adoc new file mode 100644 index 0000000000..bf0d7ef521 --- /dev/null +++ b/rosa_getting_started/rosa-config-aws-account.adoc @@ -0,0 +1,22 @@ +[id="rosa-config-aws-account"] += Configuring your AWS account +include::modules/attributes-openshift-dedicated.adoc[] +:context: rosa-config-aws-account + +toc::[] + +After you complete the AWS prerequisites, configure your AWS account and enable the {product-title} (ROSA) service. + +include::modules/rosa-configuring-aws-account.adoc[leveloffset=+1] + +[id="next-steps_rosa-config-aws-account"] +== Next steps + +* xref:../rosa_getting_started/rosa-installing-rosa.adoc#rosa-installing-rosa[Install ROSA] + +[id="additional-resources"] +== Additional resources + +* xref:../rosa_getting_started/rosa-aws-prereqs.adoc#prerequisites[AWS prerequisites] +* xref:../rosa_getting_started/rosa-required-aws-service-quotas.adoc#rosa-required-aws-service-quotas[Required AWS service quotas and requesting increases] +* xref:../rosa_getting_started/rosa-getting-started-workflow.adoc#rosa-getting-started-workflow[Getting started workflow] diff --git a/rosa_getting_started/rosa-config-identity-providers.adoc b/rosa_getting_started/rosa-config-identity-providers.adoc new file mode 100644 index 0000000000..f3d36f2a1e --- /dev/null +++ b/rosa_getting_started/rosa-config-identity-providers.adoc @@ -0,0 +1,22 @@ +include::modules/attributes-openshift-dedicated.adoc[] +[id="rosa-config-identity-providers"] += Configuring identity providers +:context: rosa-config-identity-providers + +toc::[] + +After your {product-title} (ROSA) cluster is created, you must configure identity providers to determine how users log in to access the cluster. + +The following topics describe how to configure an identity provider using the OpenShift Cluster Manager (OCM) console. Alternatively, you can use the `rosa` CLI to create an identity provider and access the cluster. + +include::modules/understanding-idp.adoc[leveloffset=+1] +include::modules/config-github-idp.adoc[leveloffset=+1] +include::modules/config-gitlab-idp.adoc[leveloffset=+1] +include::modules/config-google-idp.adoc[leveloffset=+1] +include::modules/config-ldap-idp.adoc[leveloffset=+1] +include::modules/config-openid-idp.adoc[leveloffset=+1] + +[id="additional-resources-idps"] +== Additional resources +* xref:../rosa_getting_started/rosa-accessing-cluster.adoc#rosa-accessing-cluster[Accessing a cluster] +* xref:../rosa_getting_started/rosa-getting-started-workflow.adoc#rosa-getting-started-workflow[Getting started workflow] diff --git a/rosa_getting_started/rosa-creating-cluster.adoc b/rosa_getting_started/rosa-creating-cluster.adoc new file mode 100644 index 0000000000..c724f35c5d --- /dev/null +++ b/rosa_getting_started/rosa-creating-cluster.adoc @@ -0,0 +1,22 @@ +include::modules/attributes-openshift-dedicated.adoc[] +:context: rosa-getting-started +[id="rosa-creating-cluster"] += Creating a ROSA cluster + +toc::[] + +After you set up your environment and install {product-title} (ROSA), create a cluster. + +This document describes how to set up a ROSA cluster. Alternatively, you can create a ROSA cluster with AWS PrivateLink. + +include::modules/rosa-creating-cluster.adoc[leveloffset=+1] + +== Next steps +xref:../rosa_getting_started/rosa-config-identity-providers.adoc#rosa-config-identity-providers[Configure identity providers] + + +== Additional resources + +* xref:../rosa_getting_started/rosa-getting-started-workflow.adoc#rosa-getting-started-workflow[Getting started workflow] +* xref:../rosa_getting_started/rosa-deleting-cluster.adoc#rosa-deleting-cluster[Deleting a ROSA cluster] +* xref:../rosa_architecture/rosa-architecture-models.adoc#rosa-architecture-models[ROSA architecture] diff --git a/rosa_getting_started/rosa-deleting-access-cluster.adoc b/rosa_getting_started/rosa-deleting-access-cluster.adoc new file mode 100644 index 0000000000..527d433cb1 --- /dev/null +++ b/rosa_getting_started/rosa-deleting-access-cluster.adoc @@ -0,0 +1,12 @@ +include::modules/attributes-openshift-dedicated.adoc[] +[id="rosa-deleting-access-cluster"] += Deleting access to a ROSA cluster +:context: rosa-deleting-access-cluster + +toc::[] + +Delete access to a {product-title} (ROSA) cluster using the `rosa` command-line. + +include::modules/rosa-delete-dedicated-admins.adoc[leveloffset=+1] + +include::modules/rosa-delete-cluster-admins.adoc[leveloffset=+1] diff --git a/rosa_getting_started/rosa-deleting-cluster.adoc b/rosa_getting_started/rosa-deleting-cluster.adoc new file mode 100644 index 0000000000..ad027540cb --- /dev/null +++ b/rosa_getting_started/rosa-deleting-cluster.adoc @@ -0,0 +1,10 @@ +include::modules/attributes-openshift-dedicated.adoc[] +[id="rosa-deleting-cluster"] += Deleting a ROSA cluster +:context: rosa-deleting-cluster + +toc::[] + +Delete a {product-title} (ROSA) cluster using the `rosa` command-line. + +include::modules/rosa-deleting-cluster.adoc[leveloffset=+1] diff --git a/rosa_getting_started/rosa-getting-started-workflow.adoc b/rosa_getting_started/rosa-getting-started-workflow.adoc new file mode 100644 index 0000000000..1652e6616b --- /dev/null +++ b/rosa_getting_started/rosa-getting-started-workflow.adoc @@ -0,0 +1,22 @@ +include::modules/attributes-openshift-dedicated.adoc[] + +[id="rosa-getting-started-workflow"] += Getting started workflow +:context: rosa-getting-started-workflow + +toc::[] + +Follow this workflow to set up and access {product-title} (ROSA) clusters: + +. xref:../rosa_getting_started/rosa-aws-prereqs.adoc#prerequisites[Perform the AWS prerequisites]. +. xref:../rosa_getting_started/rosa-required-aws-service-quotas.adoc#rosa-required-aws-service-quotas[Review the required AWS service quotas]. +. xref:../rosa_getting_started/rosa-config-aws-account.adoc#rosa-config-aws-account[Configure your AWS account]. +. xref:../rosa_getting_started/rosa-installing-rosa.adoc#rosa-installing-rosa[Install ROSA]. +. xref:../rosa_getting_started/rosa-creating-cluster.adoc#rosa-creating-cluster[Create a ROSA cluster] or xref:../rosa_getting_started/rosa-aws-privatelink-creating-cluster.adoc#rosa-aws-privatelink-creating-cluster[Create a ROSA cluster using AWS PrivateLink]. +. xref:../rosa_getting_started/rosa-accessing-cluster.adoc#rosa-accessing-cluster[Access a cluster]. + +== Additional resources +* xref:../rosa_getting_started/rosa-config-identity-providers.adoc#rosa-config-identity-providers[Configuring identity providers using the OCM console] +* xref:../rosa_getting_started/rosa-deleting-cluster.adoc#rosa-deleting-cluster[Deleting a cluster] +* xref:../rosa_getting_started/rosa-deleting-access-cluster.adoc#rosa-deleting-access-cluster[Deleting access to a cluster] +* xref:../rosa_getting_started/rosa-quickstart.adoc#rosa-getting-started[Command quick reference for creating clusters and users] diff --git a/rosa_getting_started/rosa-installing-rosa.adoc b/rosa_getting_started/rosa-installing-rosa.adoc new file mode 100644 index 0000000000..e9813fd277 --- /dev/null +++ b/rosa_getting_started/rosa-installing-rosa.adoc @@ -0,0 +1,22 @@ +[id="rosa-installing-rosa"] += Installing ROSA +include::modules/attributes-openshift-dedicated.adoc[] +:context: rosa-installing-rosa + +toc::[] + +After you configure your AWS account, install {product-title} (ROSA). + +include::modules/rosa-installing.adoc[leveloffset=+1] + +[id="next-steps_rosa-installing-rosa"] +== Next steps + +* xref:../rosa_getting_started/rosa-creating-cluster.adoc#rosa-creating-cluster[Create a ROSA cluster] or xref:../rosa_getting_started/rosa-aws-privatelink-creating-cluster.adoc#rosa-aws-privatelink-creating-cluster[Create an AWS PrivateLink cluster on ROSA]. + +[id="additional-resources-installing"] +== Additional resources + +* xref:../rosa_getting_started/rosa-aws-prereqs.adoc#prerequisites[AWS Prerequisites] +* xref:../rosa_getting_started/rosa-required-aws-service-quotas.adoc#rosa-required-aws-service-quotas[Required AWS service quotas and requesting increases] +* xref:../rosa_getting_started/rosa-getting-started-workflow.adoc#rosa-getting-started-workflow[Getting started workflow] diff --git a/rosa_getting_started/rosa-quickstart.adoc b/rosa_getting_started/rosa-quickstart.adoc new file mode 100644 index 0000000000..5d4a78164c --- /dev/null +++ b/rosa_getting_started/rosa-quickstart.adoc @@ -0,0 +1,12 @@ +include::modules/attributes-openshift-dedicated.adoc[] + +[id="rosa-getting-started"] += Command quick reference for creating clusters and users +:context: rosa-getting-started + +toc::[] + +include::modules/rosa-quickstart-instructions.adoc[leveloffset=+1] + +== Additional resources +* xref:../rosa_getting_started/rosa-getting-started-workflow.adoc#rosa-getting-started-workflow[Getting started workflow] diff --git a/rosa_getting_started/rosa-required-aws-service-quotas.adoc b/rosa_getting_started/rosa-required-aws-service-quotas.adoc new file mode 100644 index 0000000000..2fc54e9186 --- /dev/null +++ b/rosa_getting_started/rosa-required-aws-service-quotas.adoc @@ -0,0 +1,17 @@ +include::modules/attributes-openshift-dedicated.adoc[] +[id="rosa-required-aws-service-quotas"] += Required AWS service quotas +:context: rosa-required-aws-service-quotas + +toc::[] + +Review this list of the required Amazon Web Service (AWS) service quotas that are required to run an {product-title} cluster. + +include::modules/rosa-required-aws-service-quotas.adoc[leveloffset=+1] + +== Next steps +* xref:../rosa_getting_started/rosa-config-aws-account.adoc#rosa-config-aws-account[Configure your AWS account] + +== Additional resources + +* xref:../rosa_getting_started/rosa-getting-started-workflow.adoc#rosa-getting-started-workflow[Getting started workflow] diff --git a/rosa_getting_started_sts/images b/rosa_getting_started_sts/images new file mode 120000 index 0000000000..5e67573196 --- /dev/null +++ b/rosa_getting_started_sts/images @@ -0,0 +1 @@ +../images \ No newline at end of file diff --git a/rosa_getting_started_sts/modules b/rosa_getting_started_sts/modules new file mode 120000 index 0000000000..464b823aca --- /dev/null +++ b/rosa_getting_started_sts/modules @@ -0,0 +1 @@ +../modules \ No newline at end of file diff --git a/rosa_getting_started_sts/rosa-sts-accessing-cluster.adoc b/rosa_getting_started_sts/rosa-sts-accessing-cluster.adoc new file mode 100644 index 0000000000..22e50c0ca3 --- /dev/null +++ b/rosa_getting_started_sts/rosa-sts-accessing-cluster.adoc @@ -0,0 +1,19 @@ +include::modules/attributes-openshift-dedicated.adoc[] +[id="rosa-sts-accessing-cluster"] += Accessing a ROSA cluster +:context: rosa-sts-accessing-cluster + +toc::[] + +It is recommended that you access your {product-title} (ROSA) cluster using an identity provider (IDP) account. However, the cluster administrator who created the cluster can access it using the quick access procedure. + +This document describes how to access a cluster and set up an IDP using the `rosa` CLI. Alternatively, you can create an IDP account using the OpenShift Cluster Manager (OCM) console. + +include::modules/rosa-accessing-your-cluster-quick.adoc[leveloffset=+1] +include::modules/rosa-accessing-your-cluster.adoc[leveloffset=+1] +include::modules/rosa-create-cluster-admins.adoc[leveloffset=+1] +include::modules/rosa-create-dedicated-cluster-admins.adoc[leveloffset=+1] + +== Additional resources +* xref:../rosa_getting_started_sts/rosa-sts-config-identity-providers.adoc#rosa-sts-config-identity-providers[Configuring identity providers using the OCM console] +* xref:../rosa_getting_started_sts/rosa-sts-getting-started-workflow.adoc#rosa-sts-getting-started-workflow[Getting started workflow] diff --git a/rosa_getting_started_sts/rosa-sts-aws-prereqs.adoc b/rosa_getting_started_sts/rosa-sts-aws-prereqs.adoc new file mode 100644 index 0000000000..31008f0a79 --- /dev/null +++ b/rosa_getting_started_sts/rosa-sts-aws-prereqs.adoc @@ -0,0 +1,25 @@ +include::modules/attributes-openshift-dedicated.adoc[] +:context: rosa-sts-aws-prerequisites + +[id="rosa-sts-aws-prerequisites"] += AWS prerequisites for ROSA with STS + +toc::[] + +{product-title} (ROSA) provides a model that allows Red Hat to deploy clusters into a customer’s existing Amazon Web Service (AWS) account. + +Ensure that the following AWS prerequisites are met before installing ROSA with STS. + +include::modules/rosa-aws-understand.adoc[leveloffset=+1] +include::modules/rosa-sts-aws-requirements.adoc[leveloffset=+1] +include::modules/rosa-requirements-deploying-in-opt-in-regions.adoc[leveloffset=+1] +include::modules/rosa-setting-the-aws-security-token-version.adoc[leveloffset=+2] +include::modules/rosa-sts-aws-iam.adoc[leveloffset=+1] +include::modules/rosa-aws-provisioned.adoc[leveloffset=+1] + +== Next steps +xref:../rosa_getting_started_sts/rosa-sts-required-aws-service-quotas.adoc#rosa-sts-required-aws-service-quotas[Review the required AWS service quotas] + +== Additional resources +* See xref:../rosa_planning/rosa-limits-scalability.adoc#initial-planning-considerations_rosa-limits-scalability[Intial Planning Considerations] for guidance on worker node count. +* See xref:../rosa_policy/rosa-policy-process-security.adoc#rosa-policy-sre-access_rosa-policy-process-security[SRE access to all Red Hat OpenShift Service on AWS clusters] for information about how Red Hat site reliability engineering accesses ROSA clusters. diff --git a/rosa_getting_started_sts/rosa-sts-config-identity-providers.adoc b/rosa_getting_started_sts/rosa-sts-config-identity-providers.adoc new file mode 100644 index 0000000000..de2ae5bf3a --- /dev/null +++ b/rosa_getting_started_sts/rosa-sts-config-identity-providers.adoc @@ -0,0 +1,22 @@ +include::modules/attributes-openshift-dedicated.adoc[] +[id="rosa-sts-config-identity-providers"] += Configuring identity providers for STS +:context: rosa-sts-config-identity-providers + +toc::[] + +After your {product-title} (ROSA) cluster is created, you must configure identity providers to determine how users log in to access the cluster. + +The following topics describe how to configure an identity provider using the OpenShift Cluster Manager (OCM) console. Alternatively, you can use the `rosa` CLI to configure an identity provider and access the cluster. + +include::modules/understanding-idp.adoc[leveloffset=+1] +include::modules/config-github-idp.adoc[leveloffset=+1] +include::modules/config-gitlab-idp.adoc[leveloffset=+1] +include::modules/config-google-idp.adoc[leveloffset=+1] +include::modules/config-ldap-idp.adoc[leveloffset=+1] +include::modules/config-openid-idp.adoc[leveloffset=+1] + +[id="additional-resources-cluster-access-sts"] +== Additional resources +* xref:../rosa_getting_started_sts/rosa-sts-accessing-cluster.adoc#rosa-sts-accessing-cluster[Accessing a cluster] +* xref:../rosa_getting_started_sts/rosa-sts-getting-started-workflow.adoc#rosa-sts-getting-started-workflow[Getting started workflow] diff --git a/rosa_getting_started_sts/rosa-sts-deleting-access-cluster.adoc b/rosa_getting_started_sts/rosa-sts-deleting-access-cluster.adoc new file mode 100644 index 0000000000..9340b5590b --- /dev/null +++ b/rosa_getting_started_sts/rosa-sts-deleting-access-cluster.adoc @@ -0,0 +1,12 @@ +include::modules/attributes-openshift-dedicated.adoc[] +[id="rosa-sts-deleting-access-cluster"] += Deleting access to a ROSA cluster +:context: rosa-sts-deleting-access-cluster + +toc::[] + +Delete access to a {product-title} (ROSA) cluster using the `rosa` command-line. + +include::modules/rosa-delete-dedicated-admins.adoc[leveloffset=+1] + +include::modules/rosa-delete-cluster-admins.adoc[leveloffset=+1] diff --git a/rosa_getting_started_sts/rosa-sts-deleting-cluster.adoc b/rosa_getting_started_sts/rosa-sts-deleting-cluster.adoc new file mode 100644 index 0000000000..052f5a5cba --- /dev/null +++ b/rosa_getting_started_sts/rosa-sts-deleting-cluster.adoc @@ -0,0 +1,12 @@ +include::modules/attributes-openshift-dedicated.adoc[] +[id="rosa-sts-deleting-cluster"] += Deleting a ROSA cluster +:context: rosa-sts-deleting-cluster + +toc::[] + +Delete a {product-title} (ROSA) cluster using the `rosa` command-line. + +include::modules/rosa-deleting-cluster.adoc[leveloffset=+1] +include::modules/rosa-deleting-aws-resources-cli.adoc[leveloffset=+1] +include::modules/rosa-deleting-aws-resources-aws-console.adoc[leveloffset=+1] diff --git a/rosa_getting_started_sts/rosa-sts-getting-started-workflow.adoc b/rosa_getting_started_sts/rosa-sts-getting-started-workflow.adoc new file mode 100644 index 0000000000..64b03c95c6 --- /dev/null +++ b/rosa_getting_started_sts/rosa-sts-getting-started-workflow.adoc @@ -0,0 +1,26 @@ +include::modules/attributes-openshift-dedicated.adoc[] + +[id="rosa-sts-getting-started-workflow"] += Getting started using STS workflow +:context: rosa-sts-getting-started-workflow + +toc::[] + +[id="rosa-getting-started-rosa-sts"] +== Getting started with ROSA using STS + +The Amazon Web Services (AWS) Security Token Service (STS) is a global web service that provides short-term credentials for IAM or federated users. You can use AWS STS with {product-title} (ROSA) to allocate temporary, limited-privilege credentials for component-specific IAM roles. The service enables cluster components to make AWS API calls using secure cloud resource management practices. + +Follow this workflow to set up and access {product-title} (ROSA) clusters using AWS security token service (STS). + +. xref:../rosa_getting_started_sts/rosa-sts-aws-prereqs.adoc#rosa-sts-aws-prerequisites[Complete the AWS prerequisites for ROSA with STS]. +. xref:../rosa_getting_started_sts/rosa-sts-required-aws-service-quotas.adoc#rosa-sts-required-aws-service-quotas[Review the required AWS service quotas]. +. xref:../rosa_getting_started_sts/rosa-sts-setting-up-environment.adoc#rosa-sts-setting-up-environment[Set up the environment and install ROSA using STS]. +. xref:../rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-creating-a-cluster-quickly.adoc#rosa-sts-creating-a-cluster-quickly[Create a ROSA cluster with STS quickly] or xref:../rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-creating-a-cluster-with-customizations.adoc#rosa-sts-creating-a-cluster-with-customizations[create a cluster using customizations]. +. xref:../rosa_getting_started_sts/rosa-sts-accessing-cluster.adoc#rosa-sts-accessing-cluster[Access a cluster]. + +[id="additional_resources_rosa-sts-getting-started-workflow"] +== Additional resources +* xref:../rosa_getting_started_sts/rosa-sts-config-identity-providers.adoc#rosa-sts-config-identity-providers[Configure identity providers using the OCM console] +* xref:../rosa_getting_started_sts/rosa-sts-deleting-cluster.adoc#rosa-sts-deleting-cluster[Deleting a cluster] +* xref:../rosa_getting_started_sts/rosa-sts-deleting-access-cluster.adoc#rosa-sts-deleting-access-cluster[Deleting access to a cluster] diff --git a/rosa_getting_started_sts/rosa-sts-required-aws-service-quotas.adoc b/rosa_getting_started_sts/rosa-sts-required-aws-service-quotas.adoc new file mode 100644 index 0000000000..14359fa597 --- /dev/null +++ b/rosa_getting_started_sts/rosa-sts-required-aws-service-quotas.adoc @@ -0,0 +1,13 @@ +include::modules/attributes-openshift-dedicated.adoc[] +[id="rosa-sts-required-aws-service-quotas"] += Required AWS service quotas +:context: rosa-sts-required-aws-service-quotas + +toc::[] + +Review this list of the required Amazon Web Service (AWS) service quotas that are required to run an {product-title} cluster. + +include::modules/rosa-required-aws-service-quotas.adoc[leveloffset=+1] + +== Next steps +* xref:../rosa_getting_started_sts/rosa-sts-setting-up-environment.adoc#rosa-sts-setting-up-environment[Set up the environment and install ROSA] diff --git a/rosa_getting_started_sts/rosa-sts-setting-up-environment.adoc b/rosa_getting_started_sts/rosa-sts-setting-up-environment.adoc new file mode 100644 index 0000000000..af131ea6f3 --- /dev/null +++ b/rosa_getting_started_sts/rosa-sts-setting-up-environment.adoc @@ -0,0 +1,21 @@ +[id="rosa-sts-setting-up-environment"] += Setting up the environment for using STS +include::modules/attributes-openshift-dedicated.adoc[] +:context: rosa-sts-setting-up-environment + +toc::[] + +After you meet the AWS prerequisites, set up your environment and install {product-title} (ROSA). + +include::modules/rosa-sts-setting-up-environment.adoc[leveloffset=+1] + +[id="next-steps_rosa-sts-setting-up-environment"] +== Next steps + +* xref:../rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-creating-a-cluster-quickly.adoc#rosa-sts-creating-a-cluster-quickly[Create a ROSA cluster with STS quickly] or xref:../rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-creating-a-cluster-with-customizations.adoc#rosa-sts-creating-a-cluster-with-customizations[create a cluster using customizations]. + +[id="additional-resources"] +== Additional resources + +* xref:../rosa_getting_started_sts/rosa-sts-aws-prereqs.adoc#rosa-sts-aws-prerequisites[AWS Prerequisites] +* xref:../rosa_getting_started_sts/rosa-sts-required-aws-service-quotas.adoc#rosa-sts-required-aws-service-quotas[Required AWS service quotas and increase requests] diff --git a/rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/images b/rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/images new file mode 120000 index 0000000000..5fa6987088 --- /dev/null +++ b/rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/images @@ -0,0 +1 @@ +../../images \ No newline at end of file diff --git a/rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/modules b/rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/modules new file mode 120000 index 0000000000..8b0e854007 --- /dev/null +++ b/rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/modules @@ -0,0 +1 @@ +../../modules \ No newline at end of file diff --git a/rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-about-iam-resources.adoc b/rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-about-iam-resources.adoc new file mode 100644 index 0000000000..600ecf80fe --- /dev/null +++ b/rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-about-iam-resources.adoc @@ -0,0 +1,25 @@ +[id="rosa-sts-about-iam-resources"] += About IAM resources for ROSA clusters that use STS +include::modules/attributes-openshift-dedicated.adoc[] +:context: rosa-sts-about-iam-resources + +toc::[] + +include::modules/rosa-sts-about-iam-resources.adoc[leveloffset=+1] + +.Additional resources + +* For steps to quickly create a ROSA cluster with STS, including the AWS IAM resources, see xref:../../rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-creating-a-cluster-quickly.adoc#rosa-sts-creating-a-cluster-quickly[Creating a ROSA cluster with STS quickly]. +* For steps to create a ROSA cluster with STS using customizations, including the AWS IAM resources, see xref:../../rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-creating-a-cluster-with-customizations.adoc#rosa-sts-creating-a-cluster-with-customizations[Creating a ROSA cluster with STS using customizations]. + +include::modules/rosa-sts-account-wide-roles-and-policies.adoc[leveloffset=+1] + +.Additional resources + +* For a definition of OpenShift major, minor, and patch versions, see xref:../../rosa_policy/rosa-life-cycle.adoc#rosa-life-cycle-definitions_rosa-life-cycle[the {product-title} update life cycle]. + +include::modules/rosa-sts-account-wide-role-and-policy-commands.adoc[leveloffset=+2] +include::modules/rosa-sts-operator-roles.adoc[leveloffset=+1] +include::modules/rosa-sts-operator-role-commands.adoc[leveloffset=+2] +include::modules/rosa-sts-oidc-provider.adoc[leveloffset=+1] +include::modules/rosa-sts-oidc-provider-command.adoc[leveloffset=+2] diff --git a/rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-creating-a-cluster-quickly.adoc b/rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-creating-a-cluster-quickly.adoc new file mode 100644 index 0000000000..bfda0082b5 --- /dev/null +++ b/rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-creating-a-cluster-quickly.adoc @@ -0,0 +1,23 @@ +[id="rosa-sts-creating-a-cluster-quickly"] += Creating a ROSA cluster with STS quickly +include::modules/attributes-openshift-dedicated.adoc[] +:context: rosa-sts-creating-a-cluster-quickly + +toc::[] + +Use the {product-title} CLI (`rosa`) with the default options to quickly create an OpenShift cluster that uses the AWS Security Token Service (STS). + +include::modules/rosa-sts-creating-a-cluster-quickly.adoc[leveloffset=+1] + +[id="next-steps_{context}"] +== Next steps + +* xref:../../rosa_getting_started_sts/rosa-sts-accessing-cluster.adoc#rosa-sts-accessing-cluster[Accessing a ROSA cluster] + +[id="additional-resources_{context}"] +== Additional resources + +* For more information about the AWS Identity Access Management (IAM) resources required to deploy {product-title} with STS, see xref:../../rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-about-iam-resources.adoc#rosa-sts-about-iam-resources[About IAM resources for clusters that use STS]. +* For information about the prerequisites to installing ROSA with STS, see xref:../../rosa_getting_started_sts/rosa-sts-aws-prereqs.adoc#rosa-sts-aws-prerequisites[AWS prerequisites for ROSA with STS]. +* For more information about using OpenID Connect (OIDC) identity providers in AWS IAM, see link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_oidc.html[Creating OpenID Connect (OIDC) identity providers] in the AWS documentation. +* For more information about troubleshooting ROSA cluster deployments, see xref:../../rosa_support/rosa-troubleshooting-deployments.adoc#rosa-troubleshooting-cluster-deployments[Troubleshooting cluster deployments]. diff --git a/rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-creating-a-cluster-with-customizations.adoc b/rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-creating-a-cluster-with-customizations.adoc new file mode 100644 index 0000000000..0daf1b19ec --- /dev/null +++ b/rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-creating-a-cluster-with-customizations.adoc @@ -0,0 +1,25 @@ +[id="rosa-sts-creating-a-cluster-with-customizations"] += Creating a ROSA cluster with STS using customizations +include::modules/attributes-openshift-dedicated.adoc[] +:context: rosa-sts-creating-a-cluster-with-customizations + +toc::[] + +Use the {product-title} CLI (`rosa`) to create an OpenShift cluster with the AWS Security Token Service (STS) using customizations. + +include::modules/rosa-sts-support-considerations.adoc[leveloffset=+1] +include::modules/rosa-sts-creating-a-cluster-with-customizations.adoc[leveloffset=+1] + +[id="next-steps_{context}"] +== Next steps + +* xref:../../rosa_getting_started_sts/rosa-sts-accessing-cluster.adoc#rosa-sts-accessing-cluster[Accessing a ROSA cluster] + +[id="additional-resources_{context}"] +== Additional resources + +* For more information about the AWS Identity Access Management (IAM) resources required to deploy {product-title} with STS, see xref:../../rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-about-iam-resources.adoc#rosa-sts-about-iam-resources[About IAM resources for clusters that use STS]. +* For an overview of the options that are presented when you create a cluster using interactive mode, see xref:../../rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-interactive-mode-reference.adoc#rosa-sts-interactive-mode-reference[Interactive cluster creation mode reference]. +* For information about the prerequisites to installing ROSA with STS, see xref:../../rosa_getting_started_sts/rosa-sts-aws-prereqs.adoc#rosa-sts-aws-prerequisites[AWS prerequisites for ROSA with STS]. +* For more information about using OpenID Connect (OIDC) identity providers in AWS IAM, see link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_oidc.html[Creating OpenID Connect (OIDC) identity providers] in the AWS documentation. +* For more information about troubleshooting ROSA cluster deployments, see xref:../../rosa_support/rosa-troubleshooting-deployments.adoc#rosa-troubleshooting-cluster-deployments[Troubleshooting cluster deployments]. diff --git a/rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-interactive-mode-reference.adoc b/rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-interactive-mode-reference.adoc new file mode 100644 index 0000000000..8854c52fe7 --- /dev/null +++ b/rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-interactive-mode-reference.adoc @@ -0,0 +1,16 @@ +[id="rosa-sts-interactive-mode-reference"] += Interactive cluster creation mode reference +include::modules/attributes-openshift-dedicated.adoc[] +:context: rosa-sts-interactive-mode-reference + +toc::[] + +This section provides an overview of the options that are presented when you use the interactive mode to create a cluster through the `rosa` CLI. + +include::modules/rosa-sts-interactive-mode-reference.adoc[leveloffset=+1] + +[id="additional-resources_{context}"] +== Additional resources + +* For detailed steps to quickly create a ROSA cluster with STS, including the AWS IAM resources, see xref:../../rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-creating-a-cluster-quickly.adoc#rosa-sts-creating-a-cluster-quickly[Creating a ROSA cluster with STS quickly]. +* For detailed steps to create a ROSA cluster with STS using customizations, including the AWS IAM resources, see xref:../../rosa_getting_started_sts/rosa_creating_a_cluster_with_sts/rosa-sts-creating-a-cluster-with-customizations.adoc#rosa-sts-creating-a-cluster-with-customizations[Creating a ROSA cluster with STS using customizations]. diff --git a/rosa_planning/images b/rosa_planning/images new file mode 120000 index 0000000000..5e67573196 --- /dev/null +++ b/rosa_planning/images @@ -0,0 +1 @@ +../images \ No newline at end of file diff --git a/rosa_planning/modules b/rosa_planning/modules new file mode 120000 index 0000000000..464b823aca --- /dev/null +++ b/rosa_planning/modules @@ -0,0 +1 @@ +../modules \ No newline at end of file diff --git a/rosa_planning/rosa-limits-scalability.adoc b/rosa_planning/rosa-limits-scalability.adoc new file mode 100644 index 0000000000..939011ec47 --- /dev/null +++ b/rosa_planning/rosa-limits-scalability.adoc @@ -0,0 +1,11 @@ +include::modules/attributes-openshift-dedicated.adoc[] + +[id="rosa-limits-scalability"] += Limits and scalability +:context: rosa-limits-scalability + +toc::[] + +include::modules/rosa-planning-considerations.adoc[leveloffset=+1] +include::modules/rosa-planning-cluster-maximums.adoc[leveloffset=+1] +include::modules/rosa-planning-cluster-maximums-environment.adoc[leveloffset=+1] diff --git a/rosa_planning/rosa-planning-environment.adoc b/rosa_planning/rosa-planning-environment.adoc new file mode 100644 index 0000000000..57a1b6e354 --- /dev/null +++ b/rosa_planning/rosa-planning-environment.adoc @@ -0,0 +1,10 @@ +include::modules/attributes-openshift-dedicated.adoc[] + +[id="rosa-planning-environment"] += Planning your environment +:context: rosa-planning-environment + +toc::[] + +include::modules/rosa-planning-environment-cluster-max.adoc[leveloffset=+1] +include::modules/rosa-planning-environment-application-reqs.adoc[leveloffset=+1] diff --git a/rosa_policy/images b/rosa_policy/images new file mode 120000 index 0000000000..5e67573196 --- /dev/null +++ b/rosa_policy/images @@ -0,0 +1 @@ +../images \ No newline at end of file diff --git a/rosa_policy/modules b/rosa_policy/modules new file mode 120000 index 0000000000..464b823aca --- /dev/null +++ b/rosa_policy/modules @@ -0,0 +1 @@ +../modules \ No newline at end of file diff --git a/rosa_policy/rosa-life-cycle.adoc b/rosa_policy/rosa-life-cycle.adoc new file mode 100644 index 0000000000..11e33776b5 --- /dev/null +++ b/rosa_policy/rosa-life-cycle.adoc @@ -0,0 +1,27 @@ +include::modules/attributes-openshift-dedicated.adoc[] +:context: rosa-life-cycle +[id="rosa-life-cycle"] += {product-title} update life cycle + +toc::[] + +include::modules/life-cycle-overview.adoc[leveloffset=+1] + +.Additional resources + +* xref:../rosa_policy/rosa-service-definition.adoc#rosa-service-definition[{product-title} service definition] + +include::modules/life-cycle-definitions.adoc[leveloffset=+1] +include::modules/life-cycle-major-versions.adoc[leveloffset=+1] +include::modules/life-cycle-minor-versions.adoc[leveloffset=+1] + +.Additional resources + +* xref:../rosa_policy/rosa-life-cycle.adoc#rosa-limited-support_rosa-life-cycle[{product-title} limited support status] + +include::modules/life-cycle-patch-versions.adoc[leveloffset=+1] +include::modules/life-cycle-limited-support.adoc[leveloffset=+1] +include::modules/life-cycle-supported-versions.adoc[leveloffset=+1] +include::modules/life-cycle-install.adoc[leveloffset=+1] +include::modules/life-cycle-mandatory-upgrades.adoc[leveloffset=+1] +include::modules/life-cycle-dates.adoc[leveloffset=+1] diff --git a/rosa_policy/rosa-policy-process-security.adoc b/rosa_policy/rosa-policy-process-security.adoc new file mode 100644 index 0000000000..99dd3ef7fa --- /dev/null +++ b/rosa_policy/rosa-policy-process-security.adoc @@ -0,0 +1,33 @@ +include::modules/attributes-openshift-dedicated.adoc[] +:context: rosa-policy-process-security +[id="rosa-policy-process-security"] += Understanding process and security for {product-title} + +toc::[] + +This document details the Red Hat responsibilities for the managed {product-title} (ROSA). + +.Acronyms and terms + +* *AWS* - Amazon Web Services +* *CEE* - Customer Experience and Engagement (Red Hat Support) +* *CI/CD* - Continuous Integration / Continuous Delivery +* *CVE* - Common Vulnerabilities and Exposures +* *OCM* - OpenShift Cluster Manager +* *PVs* - Persistent Volumes +* *ROSA* - {product-title} +* *SRE* - Red Hat Site Reliability Engineering +* *VPC* - Virtual Private Cloud + +include::modules/rosa-policy-incident.adoc[leveloffset=+1] +include::modules/rosa-policy-change-management.adoc[leveloffset=+1] +include::modules/rosa-policy-identity-access-management.adoc[leveloffset=+1] +include::modules/rosa-policy-security-regulation-compliance.adoc[leveloffset=+1] +include::modules/rosa-policy-disaster-recovery.adoc[leveloffset=+1] + + +== Additional resources + +* For more information about customer or shared responsibilities, see the xref:../rosa_policy/rosa-policy-responsibility-matrix.adoc#rosa-policy-responsibilities_rosa-policy-responsibility-matrix[ROSA Responsibilities] document. + +* For more information about ROSA and its components, see the xref:../rosa_policy/rosa-service-definition.adoc#rosa-service-definition[ROSA Service Definition]. diff --git a/rosa_policy/rosa-policy-responsibility-matrix.adoc b/rosa_policy/rosa-policy-responsibility-matrix.adoc new file mode 100644 index 0000000000..20f5776a7f --- /dev/null +++ b/rosa_policy/rosa-policy-responsibility-matrix.adoc @@ -0,0 +1,12 @@ +include::modules/attributes-openshift-dedicated.adoc[] +:context: rosa-policy-responsibility-matrix +[id="rosa-policy-responsibility-matrix"] += Responsibility assignment matrix + +toc::[] + +This documentation outlines Red Hat, cloud provider, and customer responsibilities for the {product-title} (ROSA) managed service. + +include::modules/rosa-policy-responsibilities.adoc[leveloffset=+1] +include::modules/rosa-policy-shared-responsibility.adoc[leveloffset=+1] +include::modules/rosa-policy-customer-responsibility.adoc[leveloffset=+1] diff --git a/rosa_policy/rosa-policy-understand-availability.adoc b/rosa_policy/rosa-policy-understand-availability.adoc new file mode 100644 index 0000000000..198fca4ba3 --- /dev/null +++ b/rosa_policy/rosa-policy-understand-availability.adoc @@ -0,0 +1,10 @@ +include::modules/attributes-openshift-dedicated.adoc[] +[id="rosa-policy-understand-availability"] +:context: rosa-policy-understand-availability += About availability for {product-title} + +toc::[] + +Availability and disaster avoidance are extremely important aspects of any application platform. Although {product-title} (ROSA) provides many protections against failures at several levels, customer-deployed applications must be appropriately configured for high availability. To account for outages that might occur with cloud providers, additional options are available such as deploying a cluster across multiple availability zones and maintaining multiple clusters with failover mechanisms. + +include::modules/rosa-policy-failure-points.adoc[leveloffset=+1] diff --git a/rosa_policy/rosa-service-definition.adoc b/rosa_policy/rosa-service-definition.adoc new file mode 100644 index 0000000000..3265b25267 --- /dev/null +++ b/rosa_policy/rosa-service-definition.adoc @@ -0,0 +1,22 @@ +include::modules/attributes-openshift-dedicated.adoc[] +:context: rosa-service-definition +[id="rosa-service-definition"] += {product-title} service definition + +toc::[] + +This documentation outlines the service definition for the {product-title} (ROSA) managed service. + +include::modules/rosa-sdpolicy-account-management.adoc[leveloffset=+1] +include::modules/rosa-sdpolicy-logging.adoc[leveloffset=+1] +include::modules/rosa-sdpolicy-monitoring.adoc[leveloffset=+1] +include::modules/rosa-sdpolicy-networking.adoc[leveloffset=+1] +include::modules/rosa-sdpolicy-storage.adoc[leveloffset=+1] +include::modules/rosa-sdpolicy-platform.adoc[leveloffset=+1] +include::modules/rosa-sdpolicy-security.adoc[leveloffset=+1] + + +== Additional resources + +* See xref:../rosa_policy/rosa-policy-process-security.adoc#rosa-policy-process-security[Understanding process and security for ROSA] for the latest compliance information. +* See xref:../rosa_policy/rosa-life-cycle.adoc#rosa-life-cycle[ROSA life cycle] diff --git a/rosa_support/images b/rosa_support/images new file mode 120000 index 0000000000..5e67573196 --- /dev/null +++ b/rosa_support/images @@ -0,0 +1 @@ +../images \ No newline at end of file diff --git a/rosa_support/modules b/rosa_support/modules new file mode 120000 index 0000000000..464b823aca --- /dev/null +++ b/rosa_support/modules @@ -0,0 +1 @@ +../modules \ No newline at end of file diff --git a/rosa_support/rosa-getting-support.adoc b/rosa_support/rosa-getting-support.adoc new file mode 100644 index 0000000000..23649b04a2 --- /dev/null +++ b/rosa_support/rosa-getting-support.adoc @@ -0,0 +1,9 @@ +[id="rosa-getting-support"] += Getting support for {product-title} +include::modules/attributes-openshift-dedicated.adoc[] +:context: rosa-getting-support +toc::[] + +Get support for {product-title} (ROSA). + +include::modules/rosa-getting-support.adoc[leveloffset=+1] diff --git a/rosa_support/rosa-troubleshooting-deployments.adoc b/rosa_support/rosa-troubleshooting-deployments.adoc new file mode 100644 index 0000000000..2b204655bc --- /dev/null +++ b/rosa_support/rosa-troubleshooting-deployments.adoc @@ -0,0 +1,7 @@ +include::modules/attributes-openshift-dedicated.adoc[] +[id="rosa-troubleshooting-cluster-deployments"] += Troubleshooting cluster deployments +:context: rosa-troubleshooting-cluster-deployments +toc::[] + +include::modules/rosa-troubleshooting-deployment.adoc[leveloffset=+1] diff --git a/rosa_support/rosa-troubleshooting-expired-tokens.adoc b/rosa_support/rosa-troubleshooting-expired-tokens.adoc new file mode 100644 index 0000000000..a9167016ab --- /dev/null +++ b/rosa_support/rosa-troubleshooting-expired-tokens.adoc @@ -0,0 +1,7 @@ +include::modules/attributes-openshift-dedicated.adoc[] +[id="rosa-troubleshooting-expired-tokens"] += Troubleshooting expired tokens +:context: rosa-troubleshooting-expired-tokens +toc::[] + +include::modules/rosa-troubleshooting-expired-token.adoc[leveloffset=+1] diff --git a/rosa_support/rosa-troubleshooting-installations.adoc b/rosa_support/rosa-troubleshooting-installations.adoc new file mode 100644 index 0000000000..6a61bba710 --- /dev/null +++ b/rosa_support/rosa-troubleshooting-installations.adoc @@ -0,0 +1,8 @@ +include::modules/attributes-openshift-dedicated.adoc[] +[id="rosa-troubleshooting-installations"] += Troubleshooting installations +:context: rosa-troubleshooting-installations + +toc::[] + +include::modules/rosa-troubleshooting-installing.adoc[leveloffset=+1] diff --git a/storage/persistent_storage/osd-persistent-storage-aws.adoc b/storage/persistent_storage/osd-persistent-storage-aws.adoc new file mode 100644 index 0000000000..2de2bb26cc --- /dev/null +++ b/storage/persistent_storage/osd-persistent-storage-aws.adoc @@ -0,0 +1,39 @@ +[id="osd-persistent-storage-aws"] += Setting up AWS EFS for {product-title} +include::modules/attributes-openshift-dedicated.adoc[] +:context: osd-persistent-storage-aws + +toc::[] + +The Amazon Web Services Elastic File System (AWS EFS) is a Network File System (NFS) that can be provisioned on {product-title} clusters. AWS also provides and supports a CSI EFS Driver to be used with Kubernetes that allows Kubernetes workloads to leverage this shared file storage. + +This document describes the basic steps needed to set up your AWS account to prepare EFS to be used by {product-title}. For more information about AWS EFS, see the link:https://docs.aws.amazon.com/efs/index.html[AWS EFS documentation]. + +[IMPORTANT] +==== +Red Hat does not provide official support for this feature, including backup and recovery. The customer is responsible for backing up the EFS data and recovering it in the event of an outage or data loss. +==== + +The high-level process to enable EFS on a cluster is: + +. Create an AWS EFS in the AWS account used by the cluster. +. Install the AWS EFS Operator from OperatorHub. +. Create `SharedVolume` custom resources. +. Use the generated persistent volume claims in pod `spec.volumes`. + +== Prerequisites +ifdef::openshift-dedicated[] +* Customer Cloud Subscription (CCS) for a {product-title} cluster +endif::[] + +ifdef::openshift-rosa[] +* A {product-title} cluster +endif::[] +* Administrator access to the AWS account of that cluster + +include::modules/osd-storage-pv-aws-config-account.adoc[leveloffset=+1] +include::modules/osd-storage-pv-aws-install-efs.adoc[leveloffset=+1] +include::modules/osd-storage-pv-aws-create-sharedvolumes-console.adoc[leveloffset=+1] +include::modules/osd-storage-pv-aws-create-sharedvolumes-cli.adoc[leveloffset=+1] +include::modules/osd-storage-pv-aws-connect-pods.adoc[leveloffset=+1] +include::modules/osd-storage-pv-aws-uninstall-efs.adoc[leveloffset=+1] diff --git a/support/osd-managed-resources.adoc b/support/osd-managed-resources.adoc new file mode 100644 index 0000000000..3c57dfe53d --- /dev/null +++ b/support/osd-managed-resources.adoc @@ -0,0 +1,53 @@ +[id="osd-managed-resources"] += {product-title} managed resources +include::modules/attributes-openshift-dedicated.adoc[] +:context: osd-managed-resources + +toc::[] + +[id="osd-managed-resources-overview"] +== Overview + +The following covers all resources managed or protected by the Service Reliability Engineering Platform (SRE-P) Team. Customers should not attempt to modify these resources because doing so can lead to cluster instability. + +[id="osd-managed-resources-all"] +== Hive managed resources + +The following list displays the {product-title} resources managed by OpenShift Hive, the centralized fleet configuration management system. These resources are in addition to the OpenShift Container Platform resources created during installation. OpenShift Hive continually attempts to maintain consistency across all {product-title} clusters. Changes to {product-title} resources should be made through OCM so that OCM and Hive are synchronized. Contact ocm-feedback@redhat.com if OCM does not support modifying the resources in question. + +.List of Hive managed resources +[%collapsible] +==== +[source,yaml] +---- +include::https://raw.githubusercontent.com/openshift/managed-cluster-config/master/resources/managed/all-osd-resources.yaml[] +---- +==== + +[id="osd-add-on-managed-namespaces"] +== {product-title} add-on namespaces + +{product-title} add-ons are services available for installation after cluster installation. These additional services include AWS CloudWatch, Red Hat CodeReady Workspaces, Red Hat OpenShift API Management, and Cluster Logging Operator. Any changes to resources within the following namespaces might be overridden by the add-on during upgrades, which can lead to unsupported configurations for the add-on functionality. + +.List of add-on managed namespaces +[%collapsible] +==== +[source,yaml] +---- +include::https://raw.githubusercontent.com/openshift/managed-cluster-config/master/resources/addons-namespaces/main.yaml[] +---- +==== + +[id="osd-validating-webhooks"] +== {product-title} validating webhooks + +{product-title} validating webhooks are a set of dynamic admission controls maintained by the OpenShift SRE team. These HTTP callbacks, also known as webhooks, are called for various types of requests to ensure cluster stability. Upon request the webhooks accept or reject the request. The following list describes the various webhooks with rules containing the registered operations and resources that are controlled. Any attempt to circumvent these validating webhooks could affect the stability and supportability of the cluster. + +.List of validating webhooks +[%collapsible] +==== +[source,json] +---- +include::https://raw.githubusercontent.com/openshift/managed-cluster-validating-webhooks/master/docs/webhooks.json[] +---- +==== diff --git a/upgrading/images b/upgrading/images new file mode 120000 index 0000000000..5e67573196 --- /dev/null +++ b/upgrading/images @@ -0,0 +1 @@ +../images \ No newline at end of file diff --git a/upgrading/modules b/upgrading/modules new file mode 120000 index 0000000000..464b823aca --- /dev/null +++ b/upgrading/modules @@ -0,0 +1 @@ +../modules \ No newline at end of file diff --git a/upgrading/osd-upgrades.adoc b/upgrading/osd-upgrades.adoc new file mode 100644 index 0000000000..e29dc11aa8 --- /dev/null +++ b/upgrading/osd-upgrades.adoc @@ -0,0 +1,19 @@ +[id="osd-upgrades"] += {product-title} cluster upgrades +:context: osd-upgrades +include::modules/attributes-openshift-dedicated.adoc[] + +toc::[] + +[role="_abstract"] +You can schedule automatic or manual upgrade policies to update the version of your {product-title} clusters. Upgrading {product-title} clusters can be done through the {OCM} or the OCM CLI. + +include::modules/upgrade.adoc[leveloffset=+1] + +include::modules/upgrade-auto.adoc[leveloffset=+1] + +include::modules/upgrade-manual.adoc[leveloffset=+1] + +// Per Will Gordon: Right now, the OCM CLI isn't productized, so I'm not sure if we should be referring to it in documentation just yet, but I absolutely think it will be useful eventually! + +//include::modules/upgrade-cli.adoc[leveloffset=+1] diff --git a/upgrading/rosa-upgrading-sts.adoc b/upgrading/rosa-upgrading-sts.adoc new file mode 100644 index 0000000000..609ba276ba --- /dev/null +++ b/upgrading/rosa-upgrading-sts.adoc @@ -0,0 +1,29 @@ +[id="rosa-upgrading-sts"] += Upgrading ROSA clusters with STS +include::modules/attributes-openshift-dedicated.adoc[] +:context: rosa-upgrading-sts + +toc::[] + +[id="rosa-lifecycle-policy_{context}"] +== Life cycle policies and planning + +To plan an upgrade, review the xref:../rosa_policy/rosa-life-cycle.adoc#rosa-life-cycle[{product-title} update life cycle]. The life cycle page includes release definitions, support and upgrade requirements, installation policy information and life cycle dates. + +include::modules/rosa-upgrading-preparing-4-7-to-4-8.adoc[leveloffset=+1] + +[id="rosa-sts-upgrading-a-cluster-with-sts"] +== Upgrading a ROSA cluster that uses STS + +There are two methods to upgrade {product-title} (ROSA) clusters that uses the AWS Security Token Service (STS): + +* Manual upgrades through the `rosa` CLI +* Manual upgrades through the {cloud-redhat-com} console + +[NOTE] +==== +For steps to upgrade a ROSA cluster that does not use the AWS Security Token Service (STS), see xref:../upgrading/rosa-upgrading.adoc#rosa-upgrading[Upgrading ROSA clusters]. +==== + +include::modules/rosa-upgrading-cli-tutorial.adoc[leveloffset=+2] +include::modules/rosa-upgrading-manual-ocm.adoc[leveloffset=+2] diff --git a/upgrading/rosa-upgrading.adoc b/upgrading/rosa-upgrading.adoc new file mode 100644 index 0000000000..bbabdde5cd --- /dev/null +++ b/upgrading/rosa-upgrading.adoc @@ -0,0 +1,28 @@ +[id="rosa-upgrading"] += Upgrading ROSA clusters +include::modules/attributes-openshift-dedicated.adoc[] +:context: rosa-upgrading + +toc::[] + +[id="rosa-lifecycle-policy_{context}"] +== Life cycle policies and planning + +To plan an upgrade, review the xref:../rosa_policy/rosa-life-cycle.adoc#rosa-life-cycle[{product-title} update life cycle]. The life cycle page includes release definitions, support and upgrade requirements, installation policy information and life cycle dates. + +[id="rosa-sts-upgrading-a-cluster"] +== Upgrading a ROSA cluster +There are three methods to upgrade {product-title} (ROSA) clusters: + +* Manual upgrades through the `rosa` CLI +* Manual upgrades through the {cloud-redhat-com} console +* Automatic upgrades through the {cloud-redhat-com} console + +[NOTE] +==== +For steps to upgrade a ROSA cluster that uses the AWS Security Token Service (STS), see xref:../upgrading/rosa-upgrading-sts.adoc#rosa-upgrading-sts[Upgrading ROSA clusters with STS]. +==== + +include::modules/rosa-upgrading-cli-tutorial.adoc[leveloffset=+2] +include::modules/rosa-upgrading-manual-ocm.adoc[leveloffset=+2] +include::modules/rosa-upgrading-automatic-ocm.adoc[leveloffset=+2]