diff --git a/_attributes/common-attributes.adoc b/_attributes/common-attributes.adoc index 5a3519b5aa..3b44a74f31 100644 --- a/_attributes/common-attributes.adoc +++ b/_attributes/common-attributes.adoc @@ -40,11 +40,8 @@ endif::[] :oadp-full: OpenShift API for Data Protection :oadp-short: OADP :oadp-version: 1.4.1 -<<<<<<< HEAD :oadp-version-1-3: 1.3.3 :oadp-version-1-4: 1.4.1 -======= ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) :oc-first: pass:quotes[OpenShift CLI (`oc`)] :product-registry: OpenShift image registry :product-mirror-registry: Mirror registry for Red Hat OpenShift diff --git a/_topic_maps/_topic_map.yml b/_topic_maps/_topic_map.yml index c0ab7e7d91..065de09e4f 100644 --- a/_topic_maps/_topic_map.yml +++ b/_topic_maps/_topic_map.yml @@ -135,22 +135,22 @@ Topics: - Name: Updating a cluster in a disconnected environment Dir: updating Topics: - - Name: About cluster updates in a disconnected environment - File: index - - Name: Mirroring OpenShift Container Platform images - File: mirroring-image-repository - - Name: Updating a cluster in a disconnected environment using OSUS - File: disconnected-update-osus - Distros: openshift-enterprise - - Name: Updating a cluster in a disconnected environment without OSUS - File: disconnected-update - Distros: openshift-enterprise - - Name: Updating a cluster in a disconnected environment by using the CLI - File: disconnected-update - Distros: openshift-origin - - Name: Uninstalling OSUS from a cluster - File: uninstalling-osus - Distros: openshift-enterprise + - Name: About cluster updates in a disconnected environment + File: index + - Name: Mirroring OpenShift Container Platform images + File: mirroring-image-repository + - Name: Updating a cluster in a disconnected environment using OSUS + File: disconnected-update-osus + Distros: openshift-enterprise + - Name: Updating a cluster in a disconnected environment without OSUS + File: disconnected-update + Distros: openshift-enterprise + - Name: Updating a cluster in a disconnected environment by using the CLI + File: disconnected-update + Distros: openshift-origin + - Name: Uninstalling OSUS from a cluster + File: uninstalling-osus + Distros: openshift-enterprise --- Name: Installing Dir: installing @@ -2521,17 +2521,11 @@ Topics: File: hcp-destroy-virt - Name: Destroying a hosted cluster on IBM Z File: hcp-destroy-ibmz -<<<<<<< HEAD - Name: Destroying a hosted cluster on IBM Power File: hcp-destroy-ibmpower - Name: Destroying a hosted cluster on non-bare metal agent machines File: hcp-destroy-non-bm - Name: Manually importing a hosted cluster -======= - - Name: Destroying a hosted cluster on non-bare metal agent machines - File: hcp-destroy-non-bm -- Name: Manually importing a hosted control plane cluster ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) File: hcp-import --- Name: Nodes diff --git a/_topic_maps/_topic_map_ms.yml b/_topic_maps/_topic_map_ms.yml index 1fe846f9a2..5e201bf18f 100644 --- a/_topic_maps/_topic_map_ms.yml +++ b/_topic_maps/_topic_map_ms.yml @@ -33,13 +33,8 @@ Name: Red Hat build of MicroShift release notes Dir: microshift_release_notes Distros: microshift Topics: -<<<<<<< HEAD - Name: Red Hat build of MicroShift 4.18 release notes File: microshift-4-18-release-notes -======= -- Name: Red Hat build of MicroShift 4.17 release notes - File: microshift-4-17-release-notes ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) --- Name: Getting ready to install MicroShift Dir: microshift_install_get_ready @@ -115,11 +110,7 @@ Dir: microshift_configuring Distros: microshift Topics: - Name: Using the MicroShift configuration file -<<<<<<< HEAD File: microshift-using-config-yaml -======= - File: microshift-using-config-tools ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) - Name: Configuring IPv6 networking File: microshift-nw-ipv6-config - Name: Cluster access with kubeconfig @@ -130,11 +121,8 @@ Topics: File: microshift-greenboot-checking-status - Name: Configuring audit logging policies File: microshift-audit-logs-config -<<<<<<< HEAD - Name: Disabling LVMS CSI provider and CSI snapshot File: microshift-disable-lvms-csi-provider-csi-snapshot -======= ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) - Name: Configuring low latency Dir: microshift_low_latency Topics: diff --git a/_topic_maps/_topic_map_rosa.yml b/_topic_maps/_topic_map_rosa.yml index 437d83c89e..bdf669133e 100644 --- a/_topic_maps/_topic_map_rosa.yml +++ b/_topic_maps/_topic_map_rosa.yml @@ -903,6 +903,8 @@ Topics: File: configuring-registry-operator - Name: Accessing the registry File: accessing-the-registry +# - Name: Exposing the registry +# File: securing-exposing-registry --- Name: Operators Dir: operators diff --git a/_topic_maps/_topic_map_rosa_hcp.yml b/_topic_maps/_topic_map_rosa_hcp.yml index 8fc3ce692c..f1c9dfec46 100644 --- a/_topic_maps/_topic_map_rosa_hcp.yml +++ b/_topic_maps/_topic_map_rosa_hcp.yml @@ -101,8 +101,6 @@ Distros: openshift-rosa-hcp Topics: - Name: Tutorials overview File: index -# - Name: ROSA prerequisites -# File: rosa-mobb-prerequisites-tutorial - Name: ROSA with HCP activation and account linking File: cloud-experts-rosa-hcp-activation-and-account-linking-tutorial - Name: ROSA with HCP private offer acceptance and sharing @@ -129,12 +127,6 @@ Topics: # File: cloud-experts-dynamic-certificate-custom-domain # - Name: Assigning consistent egress IP for external traffic # File: cloud-experts-consistent-egress-ip -- Name: Getting started with ROSA - Dir: cloud-experts-getting-started - Distros: openshift-rosa-hcp - Topics: - - Name: Obtaining support - File: cloud-experts-getting-started-support - Name: Deploying an application Dir: cloud-experts-deploying-application Distros: openshift-rosa-hcp @@ -145,29 +137,6 @@ Topics: File: cloud-experts-deploying-application-prerequisites - Name: Lab Overview File: cloud-experts-deploying-application-lab-overview -# --- -# Name: Prepare your environment -# Dir: rosa_planning -# Distros: openshift-rosa-hcp -# Topics: -# - Name: Prerequisites checklist for deploying ROSA using STS -# File: rosa-cloud-expert-prereq-checklist -# - Name: Detailed requirements for deploying ROSA using STS -# File: rosa-sts-aws-prereqs -# - Name: ROSA IAM role resources -# File: rosa-sts-ocm-role -# - Name: Limits and scalability -# File: rosa-limits-scalability -#- Name: ROSA with HCP limits and scalability -# File: rosa-hcp-limits-scalability -# - Name: Planning your environment -# File: rosa-planning-environment -# - Name: Required AWS service quotas -# File: rosa-sts-required-aws-service-quotas -# - Name: Setting up your environment -# File: rosa-sts-setting-up-environment -# - Name: Preparing Terraform to install ROSA clusters -# File: rosa-understanding-terraform --- Name: Install ROSA with HCP clusters Dir: rosa_hcp @@ -185,329 +154,11 @@ Topics: File: rosa-hcp-sts-creating-a-cluster-ext-auth - Name: Using the Node Tuning Operator on ROSA with HCP File: rosa-tuning-config -# --- -# Name: Install ROSA Classic clusters -# Dir: rosa_install_access_delete_clusters -# Distros: openshift-rosa-hcp -# Topics: -# - Name: Creating a ROSA cluster with STS using the default options -# File: rosa-sts-creating-a-cluster-quickly -# - Name: Creating a ROSA cluster with STS using customizations -# File: rosa-sts-creating-a-cluster-with-customizations -# - Name: Creating a ROSA cluster with STS using Terraform -# Dir: terraform -# Distros: openshift-rosa-hcp -# Topics: -# - Name: Creating a default ROSA Classic cluster using Terraform -# File: rosa-sts-creating-a-cluster-quickly-terraform -# - Name: Customizing a ROSA cluster with Terraform -# File: rosa-sts-creating-a-cluster-with-customizations-terraform -# - Name: Interactive cluster creation mode reference -# File: rosa-sts-interactive-mode-reference -# - Name: Creating an AWS PrivateLink cluster on ROSA -# File: rosa-aws-privatelink-creating-cluster -# - Name: Configuring a shared virtual private cloud for ROSA clusters -# File: rosa-shared-vpc-config -# - Name: Accessing a ROSA cluster -# File: rosa-sts-accessing-cluster -# - Name: Configuring identity providers using Red Hat OpenShift Cluster Manager -# File: rosa-sts-config-identity-providers -# - Name: Revoking access to a ROSA cluster -# File: rosa-sts-deleting-access-cluster -# - Name: Deleting a ROSA cluster -# File: rosa-sts-deleting-cluster -# - Name: Deploying ROSA without AWS STS -# Dir: rosa_getting_started_iam -# Distros: openshift-rosa-hcp -# Topics: -# - Name: AWS prerequisites for ROSA -# File: rosa-aws-prereqs -# - Name: Understanding the ROSA deployment workflow -# File: rosa-getting-started-workflow -# - Name: Required AWS service quotas -# File: rosa-required-aws-service-quotas -# - Name: Configuring your AWS account -# File: rosa-config-aws-account -# - Name: Installing the ROSA CLI -# File: rosa-installing-rosa -# - Name: Creating a ROSA cluster without AWS STS -# File: rosa-creating-cluster -# - Name: Configuring a private cluster -# File: rosa-private-cluster -# - Name: Creating a ROSA cluster using the web console -# File: rosa-creating-cluster-console -# - Name: Accessing a ROSA cluster -# File: rosa-accessing-cluster -# - Name: Configuring identity providers using the Red Hat OpenShift Cluster Manager -# File: rosa-config-identity-providers -# - Name: Deleting access to a ROSA cluster -# File: rosa-deleting-access-cluster -# - Name: Deleting a ROSA cluster -# File: rosa-deleting-cluster -# - Name: Command quick reference for creating clusters and users -# File: rosa-quickstart -# -- -# Name: Support -# Dir: support -# Distros: openshift-rosa-hcp -# Topics: -# - Name: Support overview -# File: index -# - Name: Managing your cluster resources -# File: managing-cluster-resources -# - Name: Getting support -# File: getting-support -# Distros: openshift-rosa-hcp -# - Name: Remote health monitoring with connected clusters -# Dir: remote_health_monitoring -# Distros: openshift-rosa-hcp -# Topics: -# - Name: About remote health monitoring -# File: about-remote-health-monitoring -# - Name: Showing data collected by remote health monitoring -# File: showing-data-collected-by-remote-health-monitoring -# cannot get resource "secrets" in API group "" in the namespace "openshift-config" -# - Name: Opting out of remote health reporting -# File: opting-out-of-remote-health-reporting -# cannot get resource "secrets" in API group "" in the namespace "openshift-config" -# - Name: Enabling remote health reporting -# File: enabling-remote-health-reporting -# - Name: Using Insights to identify issues with your cluster -# File: using-insights-to-identify-issues-with-your-cluster -# - Name: Using Insights Operator -# File: using-insights-operator -# Not supported per Michael McNeill -# - Name: Using remote health reporting in a restricted network -# File: remote-health-reporting-from-restricted-network -# cannot list resource "secrets" in API group "" in the namespace "openshift-config" -# - Name: Importing simple content access entitlements with Insights Operator -# File: insights-operator-simple-access -# must-gather not supported for customers, per Dustin Row, cannot create resource "namespaces" -# - Name: Gathering data about your cluster -# File: gathering-cluster-data -# Distros: openshift-rosa-hcp -# - Name: Summarizing cluster specifications -# File: summarizing-cluster-specifications -# Distros: openshift-rosa-hcp -# - Name: Troubleshooting -# Dir: troubleshooting -# Distros: openshift-rosa-hcp -# Topics: -# rosa has own troubleshooting installations -# - Name: Troubleshooting installations -# File: troubleshooting-installations -# - Name: Troubleshooting ROSA installations -# File: rosa-troubleshooting-installations -# - Name: Troubleshooting networking -# File: rosa-troubleshooting-networking -# - Name: Verifying node health -# File: verifying-node-health -# cannot create resource "namespaces", cannot patch resource "nodes" -# - Name: Troubleshooting CRI-O container runtime issues -# File: troubleshooting-crio-issues -# requires ostree, butane, and other plug-ins -# - Name: Troubleshooting operating system issues -# File: troubleshooting-operating-system-issues -# Distros: openshift-rosa-hcp -# cannot patch resource "nodes", "nodes/proxy", "namespaces" -# - Name: Troubleshooting network issues -# File: troubleshooting-network-issues -# Distros: openshift-rosa-hcp -# - Name: Troubleshooting Operator issues -# File: troubleshooting-operator-issues -# - Name: Investigating pod issues -# File: investigating-pod-issues -# Hiding from ROSA and OSD until it is decided who should port the Build book -# - Name: Troubleshooting the Source-to-Image process -# File: troubleshooting-s2i -# - Name: Troubleshooting storage issues -# File: troubleshooting-storage-issues -# Not supported per WINC team -# - Name: Troubleshooting Windows container workload issues -# File: troubleshooting-windows-container-workload-issues -# - Name: Investigating monitoring issues -# File: investigating-monitoring-issues -# - Name: Diagnosing OpenShift CLI (oc) issues -# File: diagnosing-oc-issues -# - Name: Troubleshooting expired offline access tokens -# File: rosa-troubleshooting-expired-tokens -# Distros: openshift-rosa-hcp -# - Name: Troubleshooting IAM roles -# File: rosa-troubleshooting-iam-resources -# Distros: openshift-rosa-hcp -# - Name: Troubleshooting cluster deployments -# File: rosa-troubleshooting-deployments -# Distros: openshift-rosa-hcp -# - Name: Red Hat OpenShift Service on AWS managed resources -# File: sd-managed-resources -# Distros: openshift-rosa-hcp -# --- -# Name: Web console -# Dir: web_console -# Distros: openshift-rosa-hcp -# Topics: -# - Name: Web console overview -# File: web-console-overview -# - Name: Accessing the web console -# File: web-console -# - Name: Viewing cluster information -# File: using-dashboard-to-get-cluster-information -# - Name: Adding user preferences -# File: adding-user-preferences -# Distros: openshift-enterprise,openshift-origin -# cannot patch resource "consoles", insufficient permissions to read any Cluster configuration -# - Name: Configuring the web console -# File: configuring-web-console -# Distros: openshift-rosa-hcp -# - Name: Customizing the web console -# File: customizing-the-web-console -# Distros: openshift-rosa-hcp -# - Name: Dynamic plugins -# Dir: dynamic-plugin -# Distros: openshift-rosa-hcp -# Topics: -# - Name: Overview of dynamic plugins -# File: overview-dynamic-plugin -# - Name: Getting started with dynamic plugins -# File: dynamic-plugins-get-started -# - Name: Deploy your plugin on a cluster -# File: deploy-plugin-cluster -# - Name: Dynamic plugin example -# File: dynamic-plugin-example -# - Name: Dynamic plugin reference -# File: dynamic-plugins-reference -# - Name: Web terminal -# Dir: web_terminal -# Distros: openshift-rosa-hcp -# Topics: -# - Name: Installing the web terminal -# File: installing-web-terminal -# Do not have sufficient permissions to read any cluster configuration. -# - Name: Configuring the web terminal -# File: configuring-web-terminal -# - Name: Using the web terminal -# File: odc-using-web-terminal -# - Name: Troubleshooting the web terminal -# File: troubleshooting-web-terminal -# - Name: Uninstalling the web terminal -# File: uninstalling-web-terminal -# - Name: About quick start tutorials -# File: creating-quick-start-tutorials -# Distros: openshift-rosa-hcp -# --- -# Name: CLI tools -# Dir: cli_reference -# Distros: openshift-rosa-hcp -# Topics: -# - Name: CLI tools overview -# File: index -# - Name: OpenShift CLI (oc) -# Dir: openshift_cli -# Topics: -# - Name: Getting started with the OpenShift CLI -# File: getting-started-cli -# - Name: Configuring the OpenShift CLI -# File: configuring-cli -# - Name: Usage of oc and kubectl commands -# File: usage-oc-kubectl -# - Name: Managing CLI profiles -# File: managing-cli-profiles -# - Name: Extending the OpenShift CLI with plugins -# File: extending-cli-plugins -# # - Name: Managing CLI plugins with Krew -# # File: managing-cli-plugins-krew -# # Distros: openshift-rosa-hcp -# - Name: OpenShift CLI developer command reference -# File: developer-cli-commands -# - Name: OpenShift CLI administrator command reference -# File: administrator-cli-commands -# Distros: openshift-rosa-hcp -# - Name: Developer CLI (odo) -# File: odo-important-update -# # Dir: developer_cli_odo -# Distros: openshift-rosa-hcp -# # Topics: -# # - Name: odo release notes -# # File: odo-release-notes -# # - Name: Understanding odo -# # File: understanding-odo -# # - Name: Installing odo -# # File: installing-odo -# # - Name: Configuring the odo CLI -# # File: configuring-the-odo-cli -# # - Name: odo CLI reference -# # File: odo-cli-reference -# - Name: Knative CLI (kn) for use with OpenShift Serverless -# File: kn-cli-tools -# Distros: openshift-rosa-hcp -# - Name: Pipelines CLI (tkn) -# Dir: tkn_cli -# Distros: openshift-rosa-hcp -# Topics: -# - Name: Installing tkn -# File: installing-tkn -# - Name: Configuring tkn -# File: op-configuring-tkn -# - Name: Basic tkn commands -# File: op-tkn-reference -# - Name: opm CLI -# Dir: opm -# Distros: openshift-rosa-hcp -# Topics: -# - Name: Installing the opm CLI -# File: cli-opm-install -# - Name: opm CLI reference -# File: cli-opm-ref -# - Name: Operator SDK -# Dir: osdk -# Distros: openshift-rosa-hcp -# Topics: -# - Name: Installing the Operator SDK CLI -# File: cli-osdk-install -# - Name: Operator SDK CLI reference -# File: cli-osdk-ref -# - Name: ROSA CLI -# Dir: rosa_cli -# Distros: openshift-rosa-hcp -# Topics: -# # - Name: CLI and web console -# # File: rosa-cli-openshift-console -# - Name: Getting started with the ROSA CLI -# File: rosa-get-started-cli -# - Name: Managing objects with the ROSA CLI -# File: rosa-manage-objects-cli -# - Name: Checking account and version information with the ROSA CLI -# File: rosa-checking-acct-version-cli -# - Name: Checking logs with the ROSA CLI -# File: rosa-checking-logs-cli -# - Name: Least privilege permissions for ROSA CLI commands -# File: rosa-cli-permission-examples -# -# --- -# Name: Red Hat OpenShift Cluster Manager -# Dir: ocm -# Distros: openshift-rosa-hcp -# Topics: -# - Name: Red Hat OpenShift Cluster Manager -# File: ocm-overview -# - Name: Red Hat OpenShift Cluster Manager -# File: ocm-overview -# - Name: Using the OpenShift web console -# File: rosa-using-openshift-console --- Name: Cluster administration Dir: rosa_cluster_admin Distros: openshift-rosa-hcp Topics: -# - Name: Cluster configurations -# File: rosa-cluster-config -# - Name: Cluster authentication -# File: rosa-cluster-auth -# - Name: Authorization and RBAC -# File: rosa-auth-rbac -- Name: Cluster notifications - File: rosa-cluster-notifications - Distros: openshift-rosa-hcp - Name: Cluster configurations File: rosa-cluster-notifications --- @@ -518,65 +169,6 @@ Topics: - Name: Adding additional constraints for IP-based AWS role assumption File: rosa-adding-additional-constraints-for-ip-based-aws-role-assumption # --- -# Name: Cluster administration -# Dir: rosa_cluster_admin -# Distros: openshift-rosa-hcp -# Topics: -# - Name: Cluster configurations -# File: rosa-cluster-config -# - Name: Cluster authentication -# File: rosa-cluster-auth -# - Name: Authorization and RBAC -# File: rosa-auth-rbac -- Name: Cluster notifications - File: rosa-cluster-notifications - Distros: openshift-rosa-hcp -# - Name: Cluster notifications -# File: rosa-cluster-notifications -# Distros: openshift-rosa-hcp -# - Name: Configuring private connections -# Dir: cloud_infrastructure_access -# Distros: openshift-rosa-hcp -# Topics: -# - Name: Configuring private connections -# File: rosa-configuring-private-connections -# - Name: Configuring AWS VPC peering -# File: dedicated-aws-peering -# - Name: Configuring AWS VPN -# File: dedicated-aws-vpn -# - Name: Configuring AWS Direct Connect -# File: dedicated-aws-dc -# - Name: Cluster autoscaling -# File: rosa-cluster-autoscaling -# - Name: Manage nodes using machine pools -# Dir: rosa_nodes -# Distros: openshift-rosa-hcp -# Topics: -# - Name: About machine pools -# File: rosa-nodes-machinepools-about -# - Name: Managing compute nodes -# File: rosa-managing-worker-nodes -# - Name: Configuring machine pools in Local Zones -# File: rosa-nodes-machinepools-configuring -# Distros: openshift-rosa-hcp -# - Name: About autoscaling nodes on a cluster -# File: rosa-nodes-about-autoscaling-nodes -# - Name: Configuring cluster memory to meet container memory and risk requirements -# File: nodes-cluster-resource-configure -- Name: Configuring PID limits - File: rosa-configuring-pid-limits - Distros: openshift-rosa -# - Name: Configuring PID limits -# File: rosa-configuring-pid-limits -# Distros: openshift-rosa-hcp ---- -Name: Security and compliance -Dir: security -Distros: openshift-rosa-hcp -Topics: -- Name: Adding additional constraints for IP-based AWS role assumption - File: rosa-adding-additional-constraints-for-ip-based-aws-role-assumption -# --- # - Name: Security # File: rosa-security # - Name: Application and cluster compliance @@ -661,237 +253,12 @@ Topics: # - Name: Manual mode with short-term credentials for components # File: cco-short-term-creds --- -# --- -# Name: Authentication and authorization -# Dir: authentication -# Distros: openshift-rosa-hcp -# Topics: -# - Name: Authentication and authorization overview -# File: index -# - Name: Understanding authentication -# File: understanding-authentication -# - Name: Configuring the internal OAuth server -# File: configuring-internal-oauth -# - Name: Configuring OAuth clients -# File: configuring-oauth-clients -# - Name: Managing user-owned OAuth access tokens -# File: managing-oauth-access-tokens -# - Name: Understanding identity provider configuration -# File: understanding-identity-provider -# - Name: Configuring identity providers -# File: sd-configuring-identity-providers -# - Name: Configuring identity providers -# Dir: identity_providers -# Topics: -# - Name: Configuring an htpasswd identity provider -# File: configuring-htpasswd-identity-provider -# - Name: Configuring a Keystone identity provider -# File: configuring-keystone-identity-provider -# - Name: Configuring an LDAP identity provider -# File: configuring-ldap-identity-provider -# - Name: Configuring a basic authentication identity provider -# File: configuring-basic-authentication-identity-provider -# - Name: Configuring a request header identity provider -# File: configuring-request-header-identity-provider -# - Name: Configuring a GitHub or GitHub Enterprise identity provider -# File: configuring-github-identity-provider -# - Name: Configuring a GitLab identity provider -# File: configuring-gitlab-identity-provider -# - Name: Configuring a Google identity provider -# File: configuring-google-identity-provider -# - Name: Configuring an OpenID Connect identity provider -# File: configuring-oidc-identity-provider -# - Name: Using RBAC to define and apply permissions -# File: using-rbac -# - Name: Removing the kubeadmin user -# File: remove-kubeadmin -# - Name: Configuring LDAP failover -# File: configuring-ldap-failover -# - Name: Understanding and creating service accounts -# File: understanding-and-creating-service-accounts -# - Name: Using service accounts in applications -# File: using-service-accounts-in-applications -# - Name: Using a service account as an OAuth client -# File: using-service-accounts-as-oauth-client -# - Name: Assuming an AWS IAM role for a service account -# File: assuming-an-aws-iam-role-for-a-service-account -# - Name: Scoping tokens -# File: tokens-scoping -# - Name: Using bound service account tokens -# File: bound-service-account-tokens -# - Name: Managing security context constraints -# File: managing-security-context-constraints -# - Name: Understanding and managing pod security admission -# File: understanding-and-managing-pod-security-admission -# - Name: Impersonating the system:admin user -# File: impersonating-system-admin -# - Name: Syncing LDAP groups -# File: ldap-syncing -# - Name: Managing cloud provider credentials -# Dir: managing_cloud_provider_credentials -# Topics: -# - Name: About the Cloud Credential Operator -# File: about-cloud-credential-operator -# - Name: Mint mode -# File: cco-mode-mint -# - Name: Passthrough mode -# File: cco-mode-passthrough -# - Name: Manual mode with long-term credentials for components -# File: cco-mode-manual -# - Name: Manual mode with short-term credentials for components -# File: cco-short-term-creds -#--- ---- -#--- Name: Upgrading Dir: upgrading Distros: openshift-rosa-hcp Topics: -# - Name: Preparing to upgrade ROSA to 4.9 -# File: rosa-upgrading-cluster-prepare -# Distros: openshift-rosa-hcp -# - Name: Upgrading ROSA with STS -# File: rosa-upgrading-sts -# - Name: Upgrading ROSA -# File: rosa-upgrading - Name: Upgrading ROSA with HCP File: rosa-hcp-upgrading -# --- -# Name: CI/CD -# Dir: cicd -# Distros: openshift-rosa -# Topics: -# - Name: CI/CD overview -# File: index -# This can be included when Shipwright is ported. -# - Name: Builds using Shipwright -# Dir: builds_using_shipwright -# Topics: -# - Name: Overview of Builds -# File: overview-openshift-builds -# - Name: Builds using BuildConfig -# Dir: builds -# Topics: -# - Name: Understanding image builds -# File: understanding-image-builds -# - Name: Understanding build configurations -# File: understanding-buildconfigs -# - Name: Creating build inputs -# File: creating-build-inputs -# - Name: Managing build output -# File: managing-build-output -# - Name: Using build strategies -# File: build-strategies -# - Name: Custom image builds with Buildah -# File: custom-builds-buildah -# - Name: Performing and configuring basic builds -# File: basic-build-operations -# - Name: Triggering and modifying builds -# File: triggering-builds-build-hooks -# - Name: Performing advanced builds -# File: advanced-build-operations -# - Name: Using Red Hat subscriptions in builds -# File: running-entitled-builds -# Dedicated-admin cannot secure builds by strategy -# - Name: Securing builds by strategy -# File: securing-builds-by-strategy -# Dedicated-admin cannot edit build configuration resources -# - Name: Build configuration resources -# File: build-configuration -# - Name: Troubleshooting builds -# File: troubleshooting-builds -# - Name: Setting up additional trusted certificate authorities for builds -# File: setting-up-trusted-ca -# This can be included when Pipelines is ported. -# - Name: Pipelines -# Dir: pipelines -# Topics: -# - Name: About OpenShift Pipelines -# File: about-pipelines -# This can be included when GitOps is ported. -# - Name: GitOps -# Dir: gitops -# Topics: -# - Name: About OpenShift GitOps -# File: about-redhat-openshift-gitops -# - Name: Jenkins -# Dir: jenkins -# Topics: -# - Name: Configuring Jenkins images -# File: images-other-jenkins -# - Name: Jenkins agent -# File: images-other-jenkins-agent -# Include this when Pipelines is ported: -# - Name: Migrating from Jenkins to OpenShift Pipelines -# File: migrating-from-jenkins-to-openshift-pipelines -# - Name: Important changes to OpenShift Jenkins images -# File: important-changes-to-openshift-jenkins-images -# --- -# Name: Images -# Dir: openshift_images -# Distros: openshift-rosa-hcp -# Topics: -# - Name: Overview of images -# File: index -# replaced Configuring the Cluster Samples Operator name, cannot configure the operator -# - Name: Overview of the Cluster Samples Operator -# File: configuring-samples-operator -# Distros: openshift-rosa-hcp -# - Name: Using the Cluster Samples Operator with an alternate registry -# File: samples-operator-alt-registry -# Distros: openshift-rosa-hcp -# - Name: Creating images -# File: create-images -# - Name: Managing images -# Dir: managing_images -# Topics: -# - Name: Managing images overview -# File: managing-images-overview -# - Name: Tagging images -# File: tagging-images -# - Name: Image pull policy -# File: image-pull-policy -# - Name: Using image pull secrets -# File: using-image-pull-secrets -# - Name: Managing image streams -# File: image-streams-manage -# Distros: openshift-rosa-hcp -# - Name: Using image streams with Kubernetes resources -# File: using-imagestreams-with-kube-resources -# Distros: openshift-rosa-hcp -# - Name: Triggering updates on image stream changes -# File: triggering-updates-on-imagestream-changes -# Distros: openshift-rosa-hcp -# - Name: Image configuration resources (Classic) -# File: image-configuration -# - Name: Image configuration resources (HCP) -# File: image-configuration-classic-hcp -# - Name: Image configuration resources -# File: image-configuration -# Distros: openshift-rosa-hcp -# - Name: Using templates -# File: using-templates -# - Name: Using Ruby on Rails -# File: templates-using-ruby-on-rails -# - Name: Using images -# Dir: using_images -# Distros: openshift-rosa-hcp -# Topics: -# - Name: Using images overview -# File: using-images-overview -# - Name: Source-to-image -# File: using-s21-images -# - Name: Customizing source-to-image images -# File: customizing-s2i-images -# --- -# Name: Add-on services -# Dir: adding_service_cluster -# Distros: openshift-rosa-hcp -# Topics: -# - Name: Adding services to a cluster -# File: adding-service -# - Name: Available services -# File: rosa-available-services --- Name: Storage Dir: storage @@ -924,369 +291,12 @@ Topics: - Name: Dynamic provisioning File: dynamic-provisioning # --- -# Name: Registry -# Dir: registry # Name: Backing up and restoring applications # Dir: rosa_backing_up_and_restoring_applications -# --- -# Name: Storage -# Dir: storage # Distros: openshift-rosa-hcp # Topics: -# - Name: Storage overview -# File: index -# - Name: Understanding ephemeral storage -# File: understanding-ephemeral-storage -# - Name: Understanding persistent storage -# File: understanding-persistent-storage -# - Name: Configuring persistent storage -# Dir: persistent_storage -# Topics: -# - Name: Persistent storage using AWS Elastic Block Store -# File: persistent-storage-aws -# - Name: Using Container Storage Interface (CSI) -# Dir: container_storage_interface -# Topics: -# - Name: Configuring CSI volumes -# File: persistent-storage-csi -# - Name: Managing the default storage class -# File: persistent-storage-csi-sc-manage -# - Name: AWS Elastic Block Store CSI Driver Operator -# File: persistent-storage-csi-ebs -# - Name: AWS Elastic File Service CSI Driver Operator -# File: persistent-storage-csi-aws-efs -# - Name: Generic ephemeral volumes -# File: generic-ephemeral-vols -# - Name: Dynamic provisioning -# File: dynamic-provisioning -# # --- -# Name: Registry -# Dir: registry -# Distros: openshift-rosa-hcp -# Topics: -# - Name: Registry overview -# File: index -# - Name: Image Registry Operator in Red Hat OpenShift Service on AWS -# File: configuring-registry-operator -# - Name: Accessing the registry -# File: accessing-the-registry -# - Name: Exposing the registry -# File: securing-exposing-registry -# --- -# Name: Operators -# Dir: operators -# Distros: openshift-rosa-hcp -# Topics: -# - Name: Operators overview -# File: index -# - Name: Understanding Operators -# Dir: understanding -# Topics: -# - Name: What are Operators? -# File: olm-what-operators-are -# - Name: Packaging format -# File: olm-packaging-format -# - Name: Common terms -# File: olm-common-terms -# - Name: Operator Lifecycle Manager (OLM) -# Dir: olm -# Topics: -# - Name: Concepts and resources -# File: olm-understanding-olm -# - Name: Architecture -# File: olm-arch -# - Name: Workflow -# File: olm-workflow -# - Name: Dependency resolution -# File: olm-understanding-dependency-resolution -# - Name: Operator groups -# File: olm-understanding-operatorgroups -# - Name: Multitenancy and Operator colocation -# File: olm-colocation -# - Name: Operator conditions -# File: olm-operatorconditions -# - Name: Metrics -# File: olm-understanding-metrics -# - Name: Webhooks -# File: olm-webhooks -# - Name: OperatorHub -# File: olm-understanding-operatorhub -# - Name: Red Hat-provided Operator catalogs -# File: olm-rh-catalogs -# - Name: Operators in multitenant clusters -# File: olm-multitenancy -# - Name: CRDs -# Dir: crds -# Topics: -# - Name: Managing resources from CRDs -# File: crd-managing-resources-from-crds -# - Name: User tasks -# Dir: user -# Topics: -# - Name: Creating applications from installed Operators -# File: olm-creating-apps-from-installed-operators -# - Name: Administrator tasks -# Dir: admin -# Topics: -# - Name: Adding Operators to a cluster -# File: olm-adding-operators-to-cluster -# - Name: Updating installed Operators -# File: olm-upgrading-operators -# - Name: Deleting Operators from a cluster -# File: olm-deleting-operators-from-cluster -# - Name: Configuring proxy support -# File: olm-configuring-proxy-support -# - Name: Viewing Operator status -# File: olm-status -# - Name: Managing Operator conditions -# File: olm-managing-operatorconditions -# - Name: Managing custom catalogs -# File: olm-managing-custom-catalogs -# - Name: Catalog source pod scheduling -# File: olm-cs-podsched -# - Name: Troubleshooting Operator issues -# File: olm-troubleshooting-operator-issues -# - Name: Developing Operators -# Dir: operator_sdk -# Topics: -# - Name: About the Operator SDK -# File: osdk-about -# - Name: Installing the Operator SDK CLI -# File: osdk-installing-cli -# - Name: Go-based Operators -# Dir: golang -# Topics: -# Quick start excluded, because it requires cluster-admin permissions. -# - Name: Getting started -# File: osdk-golang-quickstart -# - Name: Tutorial -# File: osdk-golang-tutorial -# - Name: Project layout -# File: osdk-golang-project-layout -# - Name: Updating Go-based projects -# File: osdk-golang-updating-projects -# - Name: Ansible-based Operators -# Dir: ansible -# Topics: -# Quick start excluded, because it requires cluster-admin permissions. -# - Name: Getting started -# File: osdk-ansible-quickstart -# - Name: Tutorial -# File: osdk-ansible-tutorial -# - Name: Project layout -# File: osdk-ansible-project-layout -# - Name: Updating Ansible-based projects -# File: osdk-ansible-updating-projects -# - Name: Ansible support -# File: osdk-ansible-support -# - Name: Kubernetes Collection for Ansible -# File: osdk-ansible-k8s-collection -# - Name: Using Ansible inside an Operator -# File: osdk-ansible-inside-operator -# - Name: Custom resource status management -# File: osdk-ansible-cr-status -# - Name: Helm-based Operators -# Dir: helm -# Topics: -# Quick start excluded, because it requires cluster-admin permissions. -# - Name: Getting started -# File: osdk-helm-quickstart -# - Name: Tutorial -# File: osdk-helm-tutorial -# - Name: Project layout -# File: osdk-helm-project-layout -# - Name: Updating Helm-based projects -# File: osdk-helm-updating-projects -# - Name: Helm support -# File: osdk-helm-support -# - Name: Hybrid Helm Operator <= Tech Preview -# File: osdk-hybrid-helm -# - Name: Updating Hybrid Helm-based projects <= Tech Preview -# File: osdk-hybrid-helm-updating-projects -# - Name: Java-based Operators <= Tech Preview -# Dir: java -# Topics: -# - Name: Getting started -# File: osdk-java-quickstart -# - Name: Tutorial -# File: osdk-java-tutorial -# - Name: Project layout -# File: osdk-java-project-layout -# - Name: Updating Java-based projects -# File: osdk-java-updating-projects -# - Name: Defining cluster service versions (CSVs) -# File: osdk-generating-csvs -# - Name: Working with bundle images -# File: osdk-working-bundle-images -# - Name: Complying with pod security admission -# File: osdk-complying-with-psa -# - Name: Validating Operators using the scorecard -# File: osdk-scorecard -# - Name: Validating Operator bundles -# File: osdk-bundle-validate -# - Name: High-availability or single-node cluster detection and support -# File: osdk-ha-sno -# - Name: Configuring built-in monitoring with Prometheus -# File: osdk-monitoring-prometheus -# - Name: Configuring leader election -# File: osdk-leader-election -# - Name: Object pruning utility -# File: osdk-pruning-utility -# - Name: Migrating package manifest projects to bundle format -# File: osdk-pkgman-to-bundle -# - Name: Operator SDK CLI reference -# File: osdk-cli-ref -# - Name: Migrating to Operator SDK v0.1.0 -# File: osdk-migrating-to-v0-1-0 -# ROSA customers can't configure/edit the cluster Operators -# - Name: Cluster Operators reference -# File: operator-reference -# --- -# Name: Networking -# Dir: networking -# Distros: openshift-rosa-hcp -# Topics: -# - Name: Understanding the DNS Operator -# File: dns-operator -# - Name: Understanding the Ingress Operator -# File: ingress-operator -# - Name: AWS Load Balancer Operator -# File: aws-load-balancer-operator -# - Name: OpenShift SDN default CNI network provider -# Dir: openshift_sdn -# Topics: -# - Name: Enabling multicast for a project -# File: enabling-multicast -# - Name: Network verification -# File: network-verification -# - Name: Configuring a cluster-wide proxy during installation -# File: configuring-cluster-wide-proxy -# - Name: CIDR range definitions -# File: cidr-range-definitions -# - Name: Network policy -# Dir: network_policy -# Topics: -# - Name: About network policy -# File: about-network-policy -# - Name: Creating a network policy -# File: creating-network-policy -# - Name: Viewing a network policy -# File: viewing-network-policy -# - Name: Deleting a network policy -# File: deleting-network-policy -# - Name: Configuring multitenant isolation with network policy -# File: multitenant-network-policy -# - Name: OVN-Kubernetes network plugin -# Dir: ovn_kubernetes_network_provider -# Topics: -# - Name: Configuring an egress IP address -# File: configuring-egress-ips-ovn -# - Name: Configuring Routes -# Dir: routes -# Topics: -# - Name: Route configuration -# File: route-configuration -# - Name: Secured routes -# File: secured-routes -# --- -# Name: Building applications -# Dir: applications -# Distros: openshift-rosa-hcp -# Topics: -# - Name: Building applications overview -# File: index -# - Name: Projects -# Dir: projects -# Topics: -# - Name: Working with projects -# File: working-with-projects -# cannot impersonate resource "users" in API group -# - Name: Creating a project as another user -# File: creating-project-other-user -# - Name: Configuring project creation -# File: configuring-project-creation -# - Name: Creating applications -# Dir: creating_applications -# Topics: -# - Name: Creating applications using the Developer perspective -# File: odc-creating-applications-using-developer-perspective -# - Name: Creating applications from installed Operators -# File: creating-apps-from-installed-operators -# - Name: Creating applications using the CLI -# File: creating-applications-using-cli -# - Name: Viewing application composition using the Topology view -# File: odc-viewing-application-composition-using-topology-view -# cannot create required namespace -# - Name: Exporting applications -# File: odc-exporting-applications -# - Name: Working with Helm charts -# Dir: working_with_helm_charts -# Topics: -# - Name: Understanding Helm -# File: understanding-helm -# - Name: Installing Helm -# File: installing-helm -# - Name: Configuring custom Helm chart repositories -# File: configuring-custom-helm-chart-repositories -# - Name: Working with Helm releases -# File: odc-working-with-helm-releases -# - Name: Deployments -# Dir: deployments -# Topics: -# - Name: Custom domains for applications -# File: rosa-config-custom-domains-applications -# - Name: Understanding Deployments and DeploymentConfigs -# File: what-deployments-are -# - Name: Managing deployment processes -# File: managing-deployment-processes -# - Name: Using deployment strategies -# File: deployment-strategies -# - Name: Using route-based deployment strategies -# File: route-based-deployment-strategies -# - Name: Quotas -# Dir: quotas -# Topics: -# - Name: Resource quotas per project -# File: quotas-setting-per-project -# - Name: Resource quotas across multiple projects -# File: quotas-setting-across-multiple-projects -# - Name: Using config maps with applications -# File: config-maps -# - Name: Monitoring project and application metrics using the Developer perspective -# File: odc-monitoring-project-and-application-metrics-using-developer-perspective -# - Name: Monitoring application health -# File: application-health -# - Name: Editing applications -# File: odc-editing-applications -# - Name: Working with quotas -# File: working-with-quotas -# - Name: Pruning objects to reclaim resources -# File: pruning-objects -# - Name: Idling applications -# File: idling-applications -# - Name: Deleting applications -# File: odc-deleting-applications -# - Name: Using the Red Hat Marketplace -# File: red-hat-marketplace -# - Name: Application GitOps workflows -# File: rosa-app-gitops-workflows -# - Name: Application logging -# File: rosa-app-logging -# - Name: Applications -# File: rosa-apps -# - Name: Application metrics and alerts -# File: rosa-app-metrics and alerts -# - Name: Projects -# File: rosa-projects -# - Name: Using the internal registry -# File: rosa-using-internal-registry ---- -Name: Backing up and restoring applications -Dir: rosa_backing_up_and_restoring_applications -Distros: openshift-rosa-hcp -Topics: -- Name: Installing OADP on ROSA with STS - File: backing-up-applications +# - Name: Backing up applications +# File: backing-up-applications --- Name: Registry Dir: registry @@ -1305,752 +315,3 @@ Distros: openshift-rosa-hcp Topics: - Name: Overview of nodes File: index -# - Name: Working with pods -# Dir: pods -# Topics: -# - Name: About pods -# File: nodes-pods-using -# - Name: Viewing pods -# File: nodes-pods-viewing -# - Name: Configuring a cluster for pods -# File: nodes-pods-configuring -# Distros: openshift-rosa-hcp -# Cannot create namespace to install VPA; revisit after Operator book converted -# - Name: Automatically adjust pod resource levels with the vertical pod autoscaler -# File: nodes-pods-vertical-autoscaler -# - Name: Providing sensitive data to pods -# File: nodes-pods-secrets -# - Name: Creating and using config maps -# File: nodes-pods-configmaps -# Cannot create required kubeletconfigs -# - Name: Using Device Manager to make devices available to nodes -# File: nodes-pods-plugins -# Distros: openshift-rosa-hcp -# - Name: Including pod priority in pod scheduling decisions -# File: nodes-pods-priority -# Distros: openshift-rosa-hcp -# - Name: Placing pods on specific nodes using node selectors -# File: nodes-pods-node-selectors -# Distros: openshift-rosa-hcp -# Cannot create namespace to install Run Once; revisit after Operator book converted -# - Name: Run Once Duration Override Operator -# Dir: run_once_duration_override -# Distros: openshift-rosa-hcp -# Topics: -# - Name: Run Once Duration Override Operator overview -# File: index -# - Name: Run Once Duration Override Operator release notes -# File: run-once-duration-override-release-notes -# - Name: Overriding the active deadline for run-once pods -# File: run-once-duration-override-install -# - Name: Uninstalling the Run Once Duration Override Operator -# File: run-once-duration-override-uninstall -# - Name: Automatically scaling pods with the Custom Metrics Autoscaler Operator -# Dir: cma -# Distros: openshift-rosa-hcp -# Topics: -# - Name: Custom Metrics Autoscaler Operator overview -# File: nodes-cma-autoscaling-custom -# - Name: Custom Metrics Autoscaler Operator release notes -# File: nodes-cma-autoscaling-custom-rn -# - Name: Installing the custom metrics autoscaler -# File: nodes-cma-autoscaling-custom-install -# - Name: Understanding the custom metrics autoscaler triggers -# File: nodes-cma-autoscaling-custom-trigger -# - Name: Understanding the custom metrics autoscaler trigger authentications -# File: nodes-cma-autoscaling-custom-trigger-auth -# - Name: Pausing the custom metrics autoscaler -# File: nodes-cma-autoscaling-custom-pausing -# - Name: Gathering audit logs -# File: nodes-cma-autoscaling-custom-audit-log -# - Name: Gathering debugging data -# File: nodes-cma-autoscaling-custom-debugging -# - Name: Viewing Operator metrics -# File: nodes-cma-autoscaling-custom-metrics -# - Name: Understanding how to add custom metrics autoscalers -# File: nodes-cma-autoscaling-custom-adding -# - Name: Removing the Custom Metrics Autoscaler Operator -# File: nodes-cma-autoscaling-custom-removing -# - Name: Controlling pod placement onto nodes (scheduling) -# Dir: scheduling -# Distros: openshift-rosa-hcp -# Topics: -# - Name: About pod placement using the scheduler -# File: nodes-scheduler-about -# - Name: Placing pods relative to other pods using pod affinity and anti-affinity rules -# File: nodes-scheduler-pod-affinity -# - Name: Controlling pod placement on nodes using node affinity rules -# File: nodes-scheduler-node-affinity -# - Name: Placing pods onto overcommited nodes -# File: nodes-scheduler-overcommit -# - Name: Controlling pod placement using node taints -# File: nodes-scheduler-taints-tolerations -# - Name: Placing pods on specific nodes using node selectors -# File: nodes-scheduler-node-selectors -# - Name: Controlling pod placement using pod topology spread constraints -# File: nodes-scheduler-pod-topology-spread-constraints -# - Name: Placing a pod on a specific node by name -# File: nodes-scheduler-node-names -# - Name: Placing a pod in a specific project -# File: nodes-scheduler-node-projects -# - Name: Keeping your cluster balanced using the descheduler -# File: nodes-scheduler-descheduler -# Cannot create namespace to install Desceduler Operator; revisit after Operator book converted -# - Name: Evicting pods using the descheduler -# File: nodes-descheduler -# Cannot create namespace to install Secondary Scheduler Operator; revisit after Operator book converted -# - Name: Secondary scheduler -# Dir: secondary_scheduler -# Distros: openshift-enterprise -# Topics: -# - Name: Secondary scheduler overview -# File: index -# - Name: Secondary Scheduler Operator release notes -# File: nodes-secondary-scheduler-release-notes -# - Name: Scheduling pods using a secondary scheduler -# File: nodes-secondary-scheduler-configuring -# - Name: Uninstalling the Secondary Scheduler Operator -# File: nodes-secondary-scheduler-uninstalling -# - Name: Using Jobs and DaemonSets -# Dir: jobs -# Topics: -# - Name: Running background tasks on nodes automatically with daemonsets -# File: nodes-pods-daemonsets -# Distros: openshift-rosa-hcp -# - Name: Running tasks in pods using jobs -# File: nodes-nodes-jobs -# - Name: Working with nodes -# Dir: nodes -# Distros: openshift-rosa-hcp -# Topics: -# - Name: Viewing and listing the nodes in your cluster -# File: nodes-nodes-viewing -# cannot use oc adm cordon; cannot patch resource "machinesets"; cannot patch resource "nodes" -# - Name: Working with nodes -# File: nodes-nodes-working -# cannot create resource "kubeletconfigs", "schedulers", "machineconfigs", "kubeletconfigs" -# - Name: Managing nodes -# File: nodes-nodes-managing -# cannot create resource "kubeletconfigs" -# - Name: Managing graceful node shutdown -# File: nodes-nodes-graceful-shutdown -# cannot create resource "kubeletconfigs" -# - Name: Managing the maximum number of pods per node -# File: nodes-nodes-managing-max-pods -# - Name: Using the Node Tuning Operator -# File: nodes-node-tuning-operator -# - Name: Remediating, fencing, and maintaining nodes -# File: nodes-remediating-fencing-maintaining-rhwa -# Cannot create namespace needed to oc debug and reboot; revisit after Operator book converted -# - Name: Understanding node rebooting -# File: nodes-nodes-rebooting -# cannot create resource "kubeletconfigs" -# - Name: Freeing node resources using garbage collection -# File: nodes-nodes-garbage-collection -# cannot create resource "kubeletconfigs" -# - Name: Allocating resources for nodes -# File: nodes-nodes-resources-configuring -# cannot create resource "kubeletconfigs" -# - Name: Allocating specific CPUs for nodes in a cluster -# File: nodes-nodes-resources-cpus -# cannot create resource "kubeletconfigs" -# - Name: Configuring the TLS security profile for the kubelet -# File: nodes-nodes-tls -# Distros: openshift-rosa-hcp -# - Name: Monitoring for problems in your nodes -# File: nodes-nodes-problem-detector -# - Name: Machine Config Daemon metrics -# File: nodes-nodes-machine-config-daemon-metrics -# cannot patch resource "nodes" -# - Name: Creating infrastructure nodes -# File: nodes-nodes-creating-infrastructure-nodes -# - Name: Working with containers -# Dir: containers -# Topics: -# - Name: Understanding containers -# File: nodes-containers-using -# - Name: Using Init Containers to perform tasks before a pod is deployed -# File: nodes-containers-init -# Distros: openshift-rosa-hcp -# - Name: Using volumes to persist container data -# File: nodes-containers-volumes -# - Name: Mapping volumes using projected volumes -# File: nodes-containers-projected-volumes -# - Name: Allowing containers to consume API objects -# File: nodes-containers-downward-api -# - Name: Copying files to or from a container -# File: nodes-containers-copying-files -# - Name: Executing remote commands in a container -# File: nodes-containers-remote-commands -# - Name: Using port forwarding to access applications in a container -# File: nodes-containers-port-forwarding -# cannot patch resource "configmaps" -# - Name: Using sysctls in containers -# File: nodes-containers-sysctls -# - Name: Working with clusters -# Dir: clusters -# Topics: -# - Name: Viewing system event information in a cluster -# File: nodes-containers-events -# - Name: Analyzing cluster resource levels -# File: nodes-cluster-resource-levels -# Distros: openshift-rosa-hcp -# - Name: Setting limit ranges -# File: nodes-cluster-limit-ranges -# - Name: Configuring cluster memory to meet container memory and risk requirements -# File: nodes-cluster-resource-configure -# Distros: openshift-rosa-hcp -# - Name: Configuring your cluster to place pods on overcommited nodes -# File: nodes-cluster-overcommit -# Distros: openshift-rosa-hcp -# - Name: Configuring the Linux cgroup version on your nodes -# File: nodes-cluster-cgroups-2 -# Distros: openshift-enterprise -# - Name: Configuring the Linux cgroup version on your nodes -# File: nodes-cluster-cgroups-okd -# Distros: openshift-origin -# The TechPreviewNoUpgrade Feature Gate is not allowed -# - Name: Enabling features using FeatureGates -# File: nodes-cluster-enabling-features -# Distros: openshift-rosa-hcp -# Error: nodes.config.openshift.io "cluster" could not be patched -# - Name: Improving cluster stability in high latency environments using worker latency profiles -# File: nodes-cluster-worker-latency-profiles -# Not supported per Michael McNeill -# - Name: Remote worker nodes on the network edge -# Dir: edge -# Topics: -# - Name: Using remote worker node at the network edge -# File: nodes-edge-remote-workers -# Not supported per Michael McNeill -# - Name: Worker nodes for single-node OpenShift clusters -# Dir: nodes -# Distros: openshift-rosa-hcp -# Topics: -# - Name: Adding worker nodes to single-node OpenShift clusters -# File: nodes-sno-worker-nodes -# --- -# Name: Logging -# Dir: logging -# Distros: openshift-rosa-hcp -# Topics: -# - Name: Release notes -# Dir: logging_release_notes -# Topics: -# - Name: Logging 5.9 -# File: logging-5-9-release-notes -# - Name: Logging 5.8 -# File: logging-5-8-release-notes -# - Name: Logging 5.7 -# File: logging-5-7-release-notes -# - Name: Support -# File: cluster-logging-support -# - Name: Troubleshooting logging -# Dir: troubleshooting -# Topics: -# - Name: Viewing Logging status -# File: cluster-logging-cluster-status -# - Name: Troubleshooting log forwarding -# File: log-forwarding-troubleshooting -# - Name: Troubleshooting logging alerts -# File: troubleshooting-logging-alerts -# - Name: Viewing the status of the Elasticsearch log store -# File: cluster-logging-log-store-status -# - Name: About Logging -# File: cluster-logging -# - Name: Installing Logging -# File: cluster-logging-deploying -# - Name: Updating Logging -# File: cluster-logging-upgrading -# - Name: Visualizing logs -# Dir: log_visualization -# Topics: -# - Name: About log visualization -# File: log-visualization -# - Name: Log visualization with the web console -# File: log-visualization-ocp-console -# - Name: Viewing cluster dashboards -# File: cluster-logging-dashboards -# - Name: Log visualization with Kibana -# File: logging-kibana -# - Name: Accessing the service logs -# File: sd-accessing-the-service-logs -# - Name: Viewing cluster logs in the AWS Console -# File: rosa-viewing-logs -# - Name: Configuring your Logging deployment -# Dir: config -# Topics: -# - Name: Configuring CPU and memory limits for Logging components -# File: cluster-logging-memory -# #- Name: Configuring systemd-journald and Fluentd -# # File: cluster-logging-systemd -# - Name: Log collection and forwarding -# Dir: log_collection_forwarding -# Topics: -# - Name: About log collection and forwarding -# File: log-forwarding -# - Name: Log output types -# File: logging-output-types -# - Name: Enabling JSON log forwarding -# File: cluster-logging-enabling-json-logging -# - Name: Configuring log forwarding -# File: configuring-log-forwarding -# - Name: Configuring the logging collector -# File: cluster-logging-collector -# - Name: Collecting and storing Kubernetes events -# File: cluster-logging-eventrouter -# - Name: Log storage -# Dir: log_storage -# Topics: -# - Name: About log storage -# File: about-log-storage -# - Name: Installing log storage -# File: installing-log-storage -# - Name: Configuring the LokiStack log store -# File: cluster-logging-loki -# - Name: Configuring the Elasticsearch log store -# File: logging-config-es-store -# - Name: Logging alerts -# Dir: logging_alerts -# Topics: -# - Name: Default logging alerts -# File: default-logging-alerts -# - Name: Custom logging alerts -# File: custom-logging-alerts -# - Name: Performance and reliability tuning -# Dir: performance_reliability -# Topics: -# - Name: Flow control mechanisms -# File: logging-flow-control-mechanisms -# - Name: Filtering logs by content -# File: logging-content-filtering -# - Name: Filtering logs by metadata -# File: logging-input-spec-filtering -# - Name: Scheduling resources -# Dir: scheduling_resources -# Topics: -# - Name: Using node selectors to move logging resources -# File: logging-node-selectors -# - Name: Using tolerations to control logging pod placement -# File: logging-taints-tolerations -# - Name: Uninstalling Logging -# File: cluster-logging-uninstall -# - Name: Exported fields -# File: cluster-logging-exported-fields -# - Name: API reference -# Dir: api_reference -# Topics: -# - Name: 5.8 Logging API reference -# File: logging-5-8-reference -# - Name: 5.7 Logging API reference -# File: logging-5-7-reference -# - Name: 5.6 Logging API reference -# File: logging-5-6-reference -# - Name: Glossary -# File: logging-common-terms -# --- -# Name: Observability -# Dir: observability -# Distros: openshift-rosa-hcp -# Topics: -# - Name: Monitoring -# Dir: monitoring -# Distros: openshift-rosa-hcp -# Topics: -# - Name: Monitoring overview -# File: monitoring-overview -# - Name: Accessing monitoring for user-defined projects -# File: sd-accessing-monitoring-for-user-defined-projects -# - Name: Configuring the monitoring stack -# File: configuring-the-monitoring-stack -# - Name: Disabling monitoring for user-defined projects -# File: sd-disabling-monitoring-for-user-defined-projects -# - Name: Enabling alert routing for user-defined projects -# File: enabling-alert-routing-for-user-defined-projects -# - Name: Managing metrics -# File: managing-metrics -# - Name: Managing alerts -# File: managing-alerts -# - Name: Reviewing monitoring dashboards -# File: reviewing-monitoring-dashboards -# - Name: Accessing third-party monitoring APIs -# File: accessing-third-party-monitoring-apis -# - Name: Troubleshooting monitoring issues -# File: troubleshooting-monitoring-issues -# - Name: Config map reference for the Cluster Monitoring Operator -# File: config-map-reference-for-the-cluster-monitoring-operator -# --- -# Name: Service Mesh -# Dir: service_mesh -# Distros: openshift-rosa-hcp -# Topics: -# - Name: Service Mesh 2.x -# Dir: v2x -# Topics: -# - Name: About OpenShift Service Mesh -# File: ossm-about -# - Name: Service Mesh 2.x release notes -# File: servicemesh-release-notes -# - Name: Service Mesh architecture -# File: ossm-architecture -# - Name: Service Mesh deployment models -# File: ossm-deployment-models -# - Name: Service Mesh and Istio differences -# File: ossm-vs-community -# - Name: Preparing to install Service Mesh -# File: preparing-ossm-installation -# - Name: Installing the Operators -# File: installing-ossm -# - Name: Creating the ServiceMeshControlPlane -# File: ossm-create-smcp -# - Name: Adding workloads to a service mesh -# File: ossm-create-mesh -# - Name: Enabling sidecar injection -# File: prepare-to-deploy-applications-ossm -# - Name: Upgrading Service Mesh -# File: upgrading-ossm -# - Name: Managing users and profiles -# File: ossm-profiles-users -# - Name: Security -# File: ossm-security -# - Name: Traffic management -# File: ossm-traffic-manage -# - Name: Metrics, logs, and traces -# File: ossm-observability -# - Name: Performance and scalability -# File: ossm-performance-scalability -# - Name: Deploying to production -# File: ossm-deploy-production -# - Name: Federation -# File: ossm-federation -# - Name: Extensions -# File: ossm-extensions -# - Name: 3scale WebAssembly for 2.1 -# File: ossm-threescale-webassembly-module -# - Name: 3scale Istio adapter for 2.0 -# File: threescale-adapter -# - Name: Troubleshooting Service Mesh -# File: ossm-troubleshooting-istio -# - Name: Control plane configuration reference -# File: ossm-reference-smcp -# - Name: Kiali configuration reference -# File: ossm-reference-kiali -# - Name: Jaeger configuration reference -# File: ossm-reference-jaeger -# - Name: Uninstalling Service Mesh -# File: removing-ossm -# Service Mesh 1.x is tech preview -# - Name: Service Mesh 1.x -# Dir: v1x -# Topics: -# - Name: Service Mesh 1.x release notes -# File: servicemesh-release-notes -# - Name: Service Mesh architecture -# File: ossm-architecture -# - Name: Service Mesh and Istio differences -# File: ossm-vs-community -# - Name: Preparing to install Service Mesh -# File: preparing-ossm-installation -# - Name: Installing Service Mesh -# File: installing-ossm -# - Name: Security -# File: ossm-security -# - Name: Traffic management -# File: ossm-traffic-manage -# - Name: Deploying applications on Service Mesh -# File: prepare-to-deploy-applications-ossm -# - Name: Data visualization and observability -# File: ossm-observability -# - Name: Custom resources -# File: ossm-custom-resources -# - Name: 3scale Istio adapter for 1.x -# File: threescale-adapter -# - Name: Removing Service Mesh -# File: removing-ossm -# --- -# Name: Serverless -# Dir: serverless -# Distros: openshift-rosa-hcp -# Topics: -# - Name: About Serverless -# Dir: about -# Topics: -# - Name: Serverless overview -# File: about-serverless -# --- -# Name: Virtualization -# Dir: virt -# Distros: openshift-rosa-hcp -# Topics: -# - Name: About -# Dir: about_virt -# Topics: -# - Name: About OpenShift Virtualization -# File: about-virt -# Distros: openshift-rosa-hcp -# - Name: About OKD Virtualization -# File: about-virt -# Distros: openshift-origin -# - Name: Security policies -# File: virt-security-policies -# - Name: Architecture -# File: virt-architecture -# Distros: openshift-rosa-hcp -# - Name: Release notes -# Dir: release_notes -# Topics: -# - Name: OpenShift Virtualization release notes -# File: virt-release-notes-placeholder -# Distros: openshift-rosa-hcp -# - Name: Getting started -# Dir: getting_started -# Topics: -# - Name: Getting started with OpenShift Virtualization -# File: virt-getting-started -# Distros: openshift-rosa-hcp -# - Name: Getting started with OKD Virtualization -# File: virt-getting-started -# Distros: openshift-origin -# - Name: virtctl and libguestfs -# File: virt-using-the-cli-tools -# - Name: Installing -# Dir: install -# Topics: -# - Name: Preparing your cluster -# File: preparing-cluster-for-virt -# - Name: Installing OpenShift Virtualization -# File: installing-virt -# - Name: Uninstalling OpenShift Virtualization -# File: uninstalling-virt -# - Name: Post-installation configuration -# Dir: post_installation_configuration -# Topics: -# - Name: Post-installation configuration -# File: virt-post-install-config -# - Name: Node placement rules -# File: virt-node-placement-virt-components -# - Name: Network configuration -# File: virt-post-install-network-config -# - Name: Storage configuration -# File: virt-post-install-storage-config -# - Name: Updating -# Dir: updating -# Topics: -# - Name: Updating OpenShift Virtualization -# File: upgrading-virt -# Distros: openshift-rosa-hcp -# - Name: Virtual machines -# Dir: virtual_machines -# Topics: -# - Name: Creating VMs from Red Hat images -# Dir: creating_vms_rh -# Topics: -# - Name: Creating VMs from Red Hat images overview -# File: virt-creating-vms-from-rh-images-overview -# - Name: Creating VMs from templates -# File: virt-creating-vms-from-templates -# - Name: Creating VMs from instance types -# File: virt-creating-vms-from-instance-types -# - Name: Creating VMs from the CLI -# File: virt-creating-vms-from-cli -# - Name: Creating VMs from custom images -# Dir: creating_vms_custom -# Topics: -# - Name: Creating VMs from custom images overview -# File: virt-creating-vms-from-custom-images-overview -# - Name: Creating VMs by using container disks -# File: virt-creating-vms-from-container-disks -# - Name: Creating VMs by importing images from web pages -# File: virt-creating-vms-from-web-images -# - Name: Creating VMs by uploading images -# File: virt-creating-vms-uploading-images -# - Name: Cloning VMs -# File: virt-cloning-vms -# - Name: Creating VMs by cloning PVCs -# File: virt-creating-vms-by-cloning-pvcs -# - Name: Installing the QEMU guest agent and VirtIO drivers -# File: virt-installing-qemu-guest-agent -# - Name: Connecting to VM consoles -# File: virt-accessing-vm-consoles -# - Name: Configuring SSH access to VMs -# File: virt-accessing-vm-ssh -# - Name: Editing virtual machines -# File: virt-edit-vms -# - Name: Editing boot order -# File: virt-edit-boot-order -# - Name: Deleting virtual machines -# File: virt-delete-vms -# - Name: Exporting virtual machines -# File: virt-exporting-vms -# - Name: Managing virtual machine instances -# File: virt-manage-vmis -# - Name: Controlling virtual machine states -# File: virt-controlling-vm-states -# - Name: Using virtual Trusted Platform Module devices -# File: virt-using-vtpm-devices -# - Name: Managing virtual machines with OpenShift Pipelines -# File: virt-managing-vms-openshift-pipelines -# - Name: Advanced virtual machine management -# Dir: advanced_vm_management -# Topics: -# Advanced virtual machine configuration -# - Name: Working with resource quotas for virtual machines -# File: virt-working-with-resource-quotas-for-vms -# - Name: Specifying nodes for virtual machines -# File: virt-specifying-nodes-for-vms -# - Name: Configuring certificate rotation -# File: virt-configuring-certificate-rotation -# - Name: Configuring the default CPU model -# File: virt-configuring-default-cpu-model -# - Name: UEFI mode for virtual machines -# File: virt-uefi-mode-for-vms -# - Name: Configuring PXE booting for virtual machines -# File: virt-configuring-pxe-booting -# Huge pages not supported in ROSA -# - Name: Using huge pages with virtual machines -# File: virt-using-huge-pages-with-vms -# CPU Manager not supported in ROSA -# - Name: Enabling dedicated resources for a virtual machine -# File: virt-dedicated-resources-vm -# - Name: Scheduling virtual machines -# File: virt-schedule-vms -# Cannot create required machine config in ROSA as required -# - Name: Configuring PCI passthrough -# File: virt-configuring-pci-passthrough -# Cannot create required machine config in ROSA as required -# - Name: Configuring virtual GPUs -# File: virt-configuring-virtual-gpus -# Feature is TP, thus not supported in ROSA -# - Name: Enabling descheduler evictions on virtual machines -# File: virt-enabling-descheduler-evictions -# - Name: About high availability for virtual machines -# File: virt-high-availability-for-vms -# - Name: Control plane tuning -# File: virt-vm-control-plane-tuning -# - Name: VM disks -# Dir: virtual_disks -# Topics: -# - Name: Hot-plugging VM disks -# File: virt-hot-plugging-virtual-disks -# - Name: Expanding VM disks -# File: virt-expanding-vm-disks -# - Name: Networking -# Dir: vm_networking -# Topics: -# - Name: Networking configuration overview -# File: virt-networking-overview -# - Name: Connecting a VM to the default pod network -# File: virt-connecting-vm-to-default-pod-network -# - Name: Exposing a VM by using a service -# File: virt-exposing-vm-with-service -# Not supported in ROSA/OSD -# - Name: Connecting a VM to a Linux bridge network -# File: virt-connecting-vm-to-linux-bridge -# - Name: Connecting a VM to an SR-IOV network -# File: virt-connecting-vm-to-sriov -# - Name: Using DPDK with SR-IOV -# File: virt-using-dpdk-with-sriov -# - Name: Connecting a VM to an OVN-Kubernetes secondary network -# File: virt-connecting-vm-to-ovn-secondary-network -# - Name: Hot plugging secondary network interfaces -# File: virt-hot-plugging-network-interfaces -# - Name: Connecting a VM to a service mesh -# File: virt-connecting-vm-to-service-mesh -# - Name: Configuring a dedicated network for live migration -# File: virt-dedicated-network-live-migration -# - Name: Configuring and viewing IP addresses -# File: virt-configuring-viewing-ips-for-vms -# Tech Preview features not supported in ROSA/OSD -# - Name: Accessing a VM by using the cluster FQDN -# File: virt-accessing-vm-secondary-network-fqdn -# - Name: Managing MAC address pools for network interfaces -# File: virt-using-mac-address-pool-for-vms -# - Name: Storage -# Dir: storage -# Topics: -# - Name: Storage configuration overview -# File: virt-storage-config-overview -# - Name: Configuring storage profiles -# File: virt-configuring-storage-profile -# - Name: Managing automatic boot source updates -# File: virt-automatic-bootsource-updates -# - Name: Reserving PVC space for file system overhead -# File: virt-reserving-pvc-space-fs-overhead -# - Name: Configuring local storage by using HPP -# File: virt-configuring-local-storage-with-hpp -# - Name: Enabling user permissions to clone data volumes across namespaces -# File: virt-enabling-user-permissions-to-clone-datavolumes -# - Name: Configuring CDI to override CPU and memory quotas -# File: virt-configuring-cdi-for-namespace-resourcequota -# - Name: Preparing CDI scratch space -# File: virt-preparing-cdi-scratch-space -# - Name: Using preallocation for data volumes -# File: virt-using-preallocation-for-datavolumes -# - Name: Managing data volume annotations -# File: virt-managing-data-volume-annotations -# Virtual machine live migration -# - Name: Live migration -# Dir: live_migration -# Topics: -# - Name: About live migration -# File: virt-about-live-migration -# - Name: Configuring live migration -# File: virt-configuring-live-migration -# - Name: Initiating and canceling live migration -# File: virt-initiating-live-migration -# Node maintenance mode -# - Name: Nodes -# Dir: nodes -# Topics: -# - Name: Node maintenance -# File: virt-node-maintenance -# - Name: Managing node labeling for obsolete CPU models -# File: virt-managing-node-labeling-obsolete-cpu-models -# - Name: Preventing node reconciliation -# File: virt-preventing-node-reconciliation -# Hiding in ROSA as user cannot cordon and drain nodes -# - Name: Deleting a failed node to trigger VM failover -# File: virt-triggering-vm-failover-resolving-failed-node -# - Name: Monitoring -# Dir: monitoring -# Topics: -# - Name: Monitoring overview -# File: virt-monitoring-overview -# Hiding in ROSA/OSD as TP not supported -# - Name: Cluster checkup framework -# File: virt-running-cluster-checkups -# - Name: Prometheus queries for virtual resources -# File: virt-prometheus-queries -# - Name: Virtual machine custom metrics -# File: virt-exposing-custom-metrics-for-vms -# - Name: Virtual machine health checks -# File: virt-monitoring-vm-health -# - Name: Runbooks -# File: virt-runbooks -# - Name: Support -# Dir: support -# Topics: -# - Name: Support overview -# File: virt-support-overview -# - Name: Collecting data for Red Hat Support -# File: virt-collecting-virt-data -# Distros: openshift-rosa-hcp -# - Name: Troubleshooting -# File: virt-troubleshooting -# - Name: Backup and restore -# Dir: backup_restore -# Topics: -# - Name: Backup and restore by using VM snapshots -# File: virt-backup-restore-snapshots -# - Name: Installing and configuring OADP -# File: virt-installing-configuring-oadp -# - Name: Backing up and restoring virtual machines -# File: virt-backup-restore-overview -# - Name: Backing up virtual machines -# File: virt-backing-up-vms -# - Name: Restoring virtual machines -# File: virt-restoring-vms -# - Name: Collecting OKD Virtualization data for community report -# File: virt-collecting-virt-data -# Distros: openshift-origin -# Distros: openshift-origin \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/release-notes/oadp-1-4-release-notes.adoc b/backup_and_restore/application_backup_and_restore/release-notes/oadp-1-4-release-notes.adoc index feee9a31b6..f53184e964 100644 --- a/backup_and_restore/application_backup_and_restore/release-notes/oadp-1-4-release-notes.adoc +++ b/backup_and_restore/application_backup_and_restore/release-notes/oadp-1-4-release-notes.adoc @@ -14,10 +14,7 @@ The release notes for {oadp-first} describe new features and enhancements, depre For additional information about {oadp-short}, see link:https://access.redhat.com/articles/5456281[{oadp-first} FAQs] ==== -<<<<<<< HEAD include::modules/oadp-1-4-1-release-notes.adoc[leveloffset=+1] -======= ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) include::modules/oadp-1-4-0-release-notes.adoc[leveloffset=+1] include::modules/oadp-backing-up-dpa-configuration-1-4-0.adoc[leveloffset=+3] include::modules/oadp-upgrading-oadp-operator-1-4-0.adoc[leveloffset=+3] @@ -31,8 +28,4 @@ include::modules/oadp-upgrading-oadp-operator-1-4-0.adoc[leveloffset=+3] To upgrade from OADP 1.3 to 1.4, no Data Protection Application (DPA) changes are required. -<<<<<<< HEAD -include::modules/oadp-verifying-upgrade-1-4-0.adoc[leveloffset=+2] -======= -include::modules/oadp-verifying-upgrade-1-4-0.adoc[leveloffset=+2] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) +include::modules/oadp-verifying-upgrade-1-4-0.adoc[leveloffset=+2] \ No newline at end of file diff --git a/canary.txt b/canary.txt deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/edge_computing/image_based_upgrade/cnf-understanding-image-based-upgrade.adoc b/edge_computing/image_based_upgrade/cnf-understanding-image-based-upgrade.adoc index 614d692bee..1f45ad0fcb 100644 --- a/edge_computing/image_based_upgrade/cnf-understanding-image-based-upgrade.adoc +++ b/edge_computing/image_based_upgrade/cnf-understanding-image-based-upgrade.adoc @@ -76,11 +76,8 @@ include::modules/cnf-image-based-upgrade.adoc[leveloffset=+1] * xref:../../edge_computing/image_based_upgrade/cnf-image-based-upgrade-base.adoc#cnf-image-based-upgrade[Performing an image-based upgrade for {sno} clusters with {lcao}] -<<<<<<< HEAD * xref:../../edge_computing/image_based_upgrade/ztp-image-based-upgrade.adoc#ztp-image-based-upgrade[Performing an image-based upgrade for {sno} clusters using {ztp}] -======= ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) include::modules/cnf-image-based-upgrade-guidelines.adoc[leveloffset=+1] [role="_additional-resources"] diff --git a/edge_computing/image_based_upgrade/ztp-image-based-upgrade.adoc b/edge_computing/image_based_upgrade/ztp-image-based-upgrade.adoc index 1932dc8bd3..09c4d789ed 100644 --- a/edge_computing/image_based_upgrade/ztp-image-based-upgrade.adoc +++ b/edge_computing/image_based_upgrade/ztp-image-based-upgrade.adoc @@ -6,33 +6,17 @@ include::_attributes/common-attributes.adoc[] toc::[] -<<<<<<< HEAD // Lifecycle Agent (LCA) You can use a single resource on the hub cluster, the `ImageBasedGroupUpgrade` custom resource (CR), to manage an imaged-based upgrade on a selected group of managed clusters through all stages. {cgu-operator-first} reconciles the `ImageBasedGroupUpgrade` CR and creates the underlying resources to complete the defined stage transitions, either in a manually controlled or a fully automated upgrade flow. For more information about the image-based upgrade, see "Understanding the image-based upgrade for single-node OpenShift clusters". -======= -You can use a single resource on the hub cluster, the `ImageBasedGroupUpgrade` custom resource (CR), to manage an imaged-based upgrade on a selected group of managed clusters through all stages. -{cgu-operator-first} reconciles the `ImageBasedGroupUpgrade` CR and creates the underlying resources to complete the defined stage transitions, either in a manually controlled or a fully automated upgrade flow. - -// Lifecycle Agent (LCA) - -include::modules/ztp-image-based-upgrade-concept.adoc[leveloffset=+1] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) [role="_additional-resources"] .Additional resources -<<<<<<< HEAD * xref:../../edge_computing/image_based_upgrade/cnf-understanding-image-based-upgrade.adoc#cnf-understanding-image-based-upgrade[Understanding the image-based upgrade for single-node OpenShift clusters] -======= -* xref:../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc#dr-scenario-3-recovering-expired-certs_dr-recovering-expired-certs[Recovering from expired control plane certificates] - -//// -* xref:../../edge_computing/ztp-preparing-the-hub-cluster.adoc#ztp-preparing-the-ztp-git-repository-ver-ind_ztp-preparing-the-hub-cluster[Preparing the {ztp} site configuration repository for version independence] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) include::modules/ztp-image-based-upgrade-concept.adoc[leveloffset=+1] @@ -50,7 +34,6 @@ include::modules/ztp-image-based-upgrade-procedure-steps.adoc[leveloffset=+1] * xref:../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-creating-backup-cr.adoc#oadp-creating-backup-cr-doc[Creating a Backup CR] * xref:../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc#oadp-creating-restore-cr_restoring-applications[Creating a Restore CR] -<<<<<<< HEAD * xref:../../edge_computing/image_based_upgrade/ztp-image-based-upgrade.adoc#ztp-image-based-upgrade-supported-combinations_ztp-gitops[Supported action combinations] @@ -64,9 +47,6 @@ include::modules/ztp-image-based-upgrade-procedure-cancel.adoc[leveloffset=+1] * xref:../../edge_computing/image_based_upgrade/ztp-image-based-upgrade.adoc#ztp-image-based-upgrade-supported-combinations_ztp-gitops[Supported action combinations] include::modules/ztp-image-based-upgrade-procedure-rollback.adoc[leveloffset=+1] -======= -//// ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) [role="_additional-resources"] .Additional resources diff --git a/hosted_control_planes/hcp-deploy/hcp-deploy-non-bm.adoc b/hosted_control_planes/hcp-deploy/hcp-deploy-non-bm.adoc index c6c04913ec..6f41e5f072 100644 --- a/hosted_control_planes/hcp-deploy/hcp-deploy-non-bm.adoc +++ b/hosted_control_planes/hcp-deploy/hcp-deploy-non-bm.adoc @@ -58,11 +58,7 @@ include::modules/hcp-non-bm-hc.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources -<<<<<<< HEAD * xref:../../hosted_control_planes/hcp-import.adoc#hcp-import-manual_hcp-import[Manually importing a hosted cluster] -======= -* xref:../../hosted_control_planes/hcp-import.adoc#hcp-import-manual_hcp-import[Manually importing a hosted control plane cluster] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) include::modules/hcp-non-bm-hc-console.adoc[leveloffset=+2] diff --git a/installing/installing_openstack/installing-openstack-three-node.adoc b/installing/installing_openstack/installing-openstack-three-node.adoc index ccafc40e08..89b1a2e1e2 100644 --- a/installing/installing_openstack/installing-openstack-three-node.adoc +++ b/installing/installing_openstack/installing-openstack-three-node.adoc @@ -8,18 +8,9 @@ toc::[] In {product-title} version {product-version}, you can install a three-node cluster on {rh-openstack-first}. A three-node cluster consists of three control plane machines, which also act as compute machines. This type of cluster provides a smaller, more resource efficient cluster, for cluster administrators and developers to use for testing, development, and production. -<<<<<<< HEAD You can install a three-node cluster on installer-provisioned infrastructure only. -======= -You can install a three-node cluster by using either installer-provisioned or user-provisioned infrastructure. ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) include::modules/installation-three-node-cluster-cloud-provider.adoc[leveloffset=+1] == Next steps -<<<<<<< HEAD -* xref:../../installing/installing_openstack/installing-openstack-installer-custom.adoc#installing-openstack-installer-custom[Installing a cluster on OpenStack with customizations] -======= -* xref:../../installing/installing_openstack/installing-openstack-installer-custom.adoc#installing-openstack-installer-custom[Installing a cluster on OpenStack with customizations] -* xref:../../installing/installing_openstack/installing-openstack-user.adoc#installing-openstack-user[Installing a cluster on OpenStack on your own infrastructure] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) +* xref:../../installing/installing_openstack/installing-openstack-installer-custom.adoc#installing-openstack-installer-custom[Installing a cluster on OpenStack with customizations] \ No newline at end of file diff --git a/microshift_welcome/index.adoc b/microshift_welcome/index.adoc index 13c965f2eb..3fe3870cec 100644 --- a/microshift_welcome/index.adoc +++ b/microshift_welcome/index.adoc @@ -21,11 +21,7 @@ To browse the {microshift-short} {product-version} documentation, use one of the To get started with {microshift-short}, use the following links: //text is in main assembly for the sake of cross references -<<<<<<< HEAD * xref:../microshift_release_notes/microshift-4-18-release-notes.adoc#microshift-4-18-release-notes[{product-title} release notes] -======= -* xref:../microshift_release_notes/microshift-4-17-release-notes.adoc#microshift-4-17-release-notes[{product-title} release notes] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) * xref:../microshift_install_rpm/microshift-install-rpm.adoc#microshift-install-rpm[Installing {product-title}] For related information, use the following links: diff --git a/modules/about-developer-perspective.adoc b/modules/about-developer-perspective.adoc index b15c427c0c..22cea552cb 100644 --- a/modules/about-developer-perspective.adoc +++ b/modules/about-developer-perspective.adoc @@ -31,8 +31,4 @@ The *Developer* perspective provides workflows specific to developer use cases, You can use the *Topology* view to display applications, components, and workloads of your project. If you have no workloads in the project, the *Topology* view will show some links to create or import them. You can also use the *Quick Search* to import components directly. .Additional resources -<<<<<<< HEAD See link:https://docs.openshift.com/container-platform/4.17/applications/odc-viewing-application-composition-using-topology-view.html[Viewing application composition using the Topology] view for more information on using the *Topology* view in *Developer* perspective. -======= -See link:https://docs.openshift.com/container-platform/4.16/applications/odc-viewing-application-composition-using-topology-view.html[Viewing application composition using the Topology] view for more information on using the *Topology* view in *Developer* perspective. ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) diff --git a/modules/cnf-image-based-upgrade-configure-container-storage-image-cleanup.adoc b/modules/cnf-image-based-upgrade-configure-container-storage-image-cleanup.adoc index f240d752d9..55a5ee6c0b 100644 --- a/modules/cnf-image-based-upgrade-configure-container-storage-image-cleanup.adoc +++ b/modules/cnf-image-based-upgrade-configure-container-storage-image-cleanup.adoc @@ -9,11 +9,7 @@ Configure the minimum threshold for available storage space through annotations. .Prerequisites -<<<<<<< HEAD * You have created an `ImageBasedUpgrade` CR. -======= -* Create an `ImageBasedUpgrade` CR. ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) .Procedure diff --git a/modules/dedicated-aws-vpc-verifying-troubleshooting.adoc b/modules/dedicated-aws-vpc-verifying-troubleshooting.adoc index cf3373e7fb..3da569f906 100644 --- a/modules/dedicated-aws-vpc-verifying-troubleshooting.adoc +++ b/modules/dedicated-aws-vpc-verifying-troubleshooting.adoc @@ -32,7 +32,6 @@ quick and clear output if a connection can be established: .. Create a temporary pod using the `busybox` image, which cleans up after itself: + -[source,terminal] ---- $ oc run netcat-test \ --image=busybox -i -t \ @@ -45,7 +44,6 @@ $ oc run netcat-test \ -- * Example successful connection results: + -[source,terminal] ---- / nc -zvv 192.168.1.1 8080 10.181.3.180 (10.181.3.180:8080) open @@ -54,7 +52,6 @@ sent 0, rcvd 0 * Example failed connection results: + -[source,terminal] ---- / nc -zvv 192.168.1.2 8080 nc: 10.181.3.180 (10.181.3.180:8081): Connection refused @@ -64,7 +61,6 @@ sent 0, rcvd 0 .. Exit the container, which automatically deletes the Pod: + -[source,terminal] ---- / exit ---- diff --git a/modules/dedicated-aws-vpn-verifying.adoc b/modules/dedicated-aws-vpn-verifying.adoc index 06ba4e32d9..0e798d0352 100644 --- a/modules/dedicated-aws-vpn-verifying.adoc +++ b/modules/dedicated-aws-vpn-verifying.adoc @@ -30,7 +30,6 @@ quick and clear output if a connection can be established: .. Create a temporary pod using the `busybox` image, which cleans up after itself: + -[source,terminal] ---- $ oc run netcat-test \ --image=busybox -i -t \ @@ -43,7 +42,6 @@ $ oc run netcat-test \ -- * Example successful connection results: + -[source,terminal] ---- / nc -zvv 192.168.1.1 8080 10.181.3.180 (10.181.3.180:8080) open @@ -52,7 +50,6 @@ sent 0, rcvd 0 * Example failed connection results: + -[source,terminal] ---- / nc -zvv 192.168.1.2 8080 nc: 10.181.3.180 (10.181.3.180:8081): Connection refused @@ -62,7 +59,6 @@ sent 0, rcvd 0 .. Exit the container, which automatically deletes the Pod: + -[source,terminal] ---- / exit ---- diff --git a/modules/destroy-hc-ibmz-cli.adoc b/modules/destroy-hc-ibmz-cli.adoc index 1316e55415..02a524c32e 100644 --- a/modules/destroy-hc-ibmz-cli.adoc +++ b/modules/destroy-hc-ibmz-cli.adoc @@ -6,11 +6,7 @@ [id="destroy-hc-ibmz-cli_{context}"] = Destroying a hosted cluster on x86 bare metal with {ibm-z-title} compute nodes -<<<<<<< HEAD To destroy a hosted cluster and its managed cluster on `x86` bare metal with {ibm-z-title} compute nodes, you can use the command-line interface (CLI). -======= -You can use the command-line interface (CLI) to destroy a hosted cluster on `x86` bare metal with {ibm-z-title} compute nodes and its managed cluster. ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) .Procedure diff --git a/modules/installing-ocp-agent-ibm-z-zvm.adoc b/modules/installing-ocp-agent-ibm-z-zvm.adoc index 638116adc8..7ae89676ca 100644 --- a/modules/installing-ocp-agent-ibm-z-zvm.adoc +++ b/modules/installing-ocp-agent-ibm-z-zvm.adoc @@ -8,13 +8,10 @@ Use the following procedure to manually add {ibm-z-name} agents with z/VM. Only use this procedure for {ibm-z-name} clusters with z/VM. -<<<<<<< HEAD .Prerequisites * A running file server with access to the guest Virtual Machines. -======= ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) .Procedure diff --git a/modules/microshift-updates-troubleshooting.adoc b/modules/microshift-updates-troubleshooting.adoc index b4c7eb9319..4f2f32677a 100644 --- a/modules/microshift-updates-troubleshooting.adoc +++ b/modules/microshift-updates-troubleshooting.adoc @@ -28,16 +28,10 @@ Check the following update paths: *{product-title} update paths* -<<<<<<< HEAD * Generally Available Version 4.18.0 to 4.18.z on {op-system-base} 9.4 * Generally Available Version 4.17.1 to 4.17.z on {op-system-base} 9.4 * Generally Available Version 4.15.0 from {op-system-base} 9.2 to 4.16.0 on {op-system-base} 9.4 * Generally Available Version 4.14.0 from {op-system-base} 9.2 to 4.15.0 on {op-system-base} 9.4 -======= -* Generally Available Version 4.16.0 to 4.16.z on {op-system-ostree} 9.4 -* Generally Available Version 4.15.0 from {op-system-base} 9.2 to 4.16.0 on {op-system-base} 9.4 -* Generally Available Version 4.14.0 from {op-system-base} 9.2 to 4.16.0 on {op-system-base} 9.4 ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) [id="microshift-ostree-update-failed_{context}"] == OSTree update failed diff --git a/modules/nw-ingress-controller-nodeportservice-projects.adoc b/modules/nw-ingress-controller-nodeportservice-projects.adoc index c1caac9d8d..81ff2a548d 100644 --- a/modules/nw-ingress-controller-nodeportservice-projects.adoc +++ b/modules/nw-ingress-controller-nodeportservice-projects.adoc @@ -19,11 +19,7 @@ Before you set a `NodePort`-type `Service` for each project, read the following * You installed the {oc-first}. * Logged in as a user with `cluster-admin` privileges. * You created a wildcard DNS record. -<<<<<<< HEAD // https://docs.openshift.com/container-platform/4.17/networking/ingress-controller-dnsmgt.html (does not detail how to create the DNS) -======= -// https://docs.openshift.com/container-platform/4.16/networking/ingress-controller-dnsmgt.html (does not detail how to create the DNS) ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) .Procedure diff --git a/modules/odc-accessing-perspectives.adoc b/modules/odc-accessing-perspectives.adoc index c43900fcb3..4475396dee 100644 --- a/modules/odc-accessing-perspectives.adoc +++ b/modules/odc-accessing-perspectives.adoc @@ -13,11 +13,7 @@ You can access the *Administrator* and *Developer* perspective from the web cons To access a perspective, ensure that you have logged in to the web console. Your default perspective is automatically determined by the permission of the users. The *Administrator* perspective is selected for users with access to all projects, while the *Developer* perspective is selected for users with limited access to their own projects .Additional resources -<<<<<<< HEAD See link:https://docs.openshift.com/container-platform/4.17/web_console/adding-user-preferences.html[Adding User Preferences] for more information on changing perspectives. -======= -See link:https://docs.openshift.com/container-platform/4.16/web_console/adding-user-preferences.html[Adding User Preferences] for more information on changing perspectives. ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) .Procedure diff --git a/modules/olmv1-installing-an-operator.adoc b/modules/olmv1-installing-an-operator.adoc index 59c0c4bb84..a3d22c94ff 100644 --- a/modules/olmv1-installing-an-operator.adoc +++ b/modules/olmv1-installing-an-operator.adoc @@ -10,14 +10,11 @@ You can install an extension from a catalog by creating a custom resource (CR) and applying it to the cluster. {olmv1-first} supports installing cluster extensions, including {olmv0} Operators via the `registry+v1` bundle format, that are scoped to the cluster. For more information, see _Supported extensions_. -<<<<<<< HEAD [IMPORTANT] ==== include::snippets/olmv1-known-issue-private-registries.adoc[] ==== -======= ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) .Prerequisites * You have added a catalog to your cluster. diff --git a/modules/ossm-release-2-5-2.adoc b/modules/ossm-release-2-5-2.adoc index 9a8f82ab44..a83d68b245 100644 --- a/modules/ossm-release-2-5-2.adoc +++ b/modules/ossm-release-2-5-2.adoc @@ -25,11 +25,7 @@ This release addresses Common Vulnerabilities and Exposures (CVEs), contains bug |Component |Version |Istio -<<<<<<< HEAD |1.18.7 -======= -|1.18.5 ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) |Envoy Proxy |1.26.8 diff --git a/modules/ossm-release-2-5-3.adoc b/modules/ossm-release-2-5-3.adoc index 7c53c589d5..b830e71851 100644 --- a/modules/ossm-release-2-5-3.adoc +++ b/modules/ossm-release-2-5-3.adoc @@ -16,11 +16,7 @@ This release of {SMProductName} is included with the {SMProductName} Operator 2. |Component |Version |Istio -<<<<<<< HEAD |1.18.7 -======= -|1.18.5 ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) |Envoy Proxy |1.26.8 diff --git a/modules/ossm-release-2-6-0.adoc b/modules/ossm-release-2-6-0.adoc index ca7515f57d..2689207c4d 100644 --- a/modules/ossm-release-2-6-0.adoc +++ b/modules/ossm-release-2-6-0.adoc @@ -18,10 +18,6 @@ This release adds new features, addresses Common Vulnerabilities and Exposures ( This release ends maintenance support for {SMProductName} version 2.3. If you are using {SMProductShortName} version 2.3, you should update to a supported version. -<<<<<<< HEAD -======= -include::snippets/ossm-current-version-support-snippet.adoc[] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) //FIPS messaging verified with Matt Werner, CS, OCP on 06/27/2024 via Slack. It is also the same FIPS messaging currently used by Serverless. //Per Scott Dodson on 07/15/204 via Slack, confirmed that RHEL 2.9 has been submitted for FIPS validation. Admonition updated accordingly. //Per Kirsten Newcomer on 07/16/2024 via Slack, FIPS messaging for Service Mesh has been changed. Jamie (PM) has agreed with change. @@ -119,20 +115,11 @@ When updating existing instances of the `ServiceMeshControlPlane` resource to {S {SMProductName} 2.6 is the last release that includes support for {JaegerName} and {es-op}. Both {JaegerShortName} and {es-op} will be removed in the next release. If you are currently using {JaegerShortName} and {es-op}, you need to switch to {TempoName} and {OTELName}. -<<<<<<< HEAD //Gateway API Update for 2.6 OSSM-5854 subsequently revised by OSSM-8241 //Kubernetes Gateway API and {product-title} Gateway API are the same. It is referenced as {product-title} Gateway API in 2.5 and as {product-title} Gateway API here https://docs.openshift.com/container-platform/4.15/nodes/clusters/nodes-cluster-enabling-features.html so to be consistent, it is also referenced as {product-title} Gateway API for 2.6. [id="gateway-api-ga-cluster-wide-deployments-ossm-2-6-0_{context}"] == Gateway API use is generally available for {SMProductName} cluster-wide deployments This release introduces the General Availability for using the Kubernetes Gateway API version 1.0.0 with {SMProductName} 2.6. This API use is limited to {SMProductName}. The Gateway API custom resource definitions (CRDs) are not supported. -======= -//Gateway API Update for 2.6 OSSM-5854 -//Kubernetes Gateway API and {product-title} Gateway API are the same. It is referenced as {product-title} Gateway API in 2.5 and as {product-title} Gateway API here https://docs.openshift.com/container-platform/4.15/nodes/clusters/nodes-cluster-enabling-features.html so to be consistent, it is also referenced as {product-title} Gateway API for 2.6. -[id="gateway-api-ga-cluster-wide-deployments-ossm-2-6-0_{context}"] -== {product-title} Gateway API generally available for cluster-wide deployments -//Jacek. Approved 07/11/2024 -This release introduces the General Availability of {product-title} Gateway API, also known as the Kubernetes Gateway API, which is enabled by default only for cluster-wide deployments. ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) Gateway API is now enabled by default if cluster-wide mode is enabled (`spec.mode: ClusterWide`). It can be enabled even if the custom resource definitions (CRDs) are not installed in the cluster. diff --git a/modules/rosa-policy-security-and-compliance.adoc b/modules/rosa-policy-security-and-compliance.adoc index 7ca27a565e..625ea9af50 100644 --- a/modules/rosa-policy-security-and-compliance.adoc +++ b/modules/rosa-policy-security-and-compliance.adoc @@ -118,8 +118,4 @@ to ensure application and data security controls are properly enforced. - Use IAM tools to apply the appropriate permissions to AWS resources in the customer account. -|=== - -.Additional resources - -* For more information about customer or shared responsibilities, see the xref:../../rosa_architecture/rosa_policy_service_definition/rosa-policy-process-security.adoc#rosa-policy-process-security[ROSA Security] document. \ No newline at end of file +|=== \ No newline at end of file diff --git a/modules/rosa-sdpolicy-instance-types.adoc b/modules/rosa-sdpolicy-instance-types.adoc index 66efb9bf5a..31bee2660b 100644 --- a/modules/rosa-sdpolicy-instance-types.adoc +++ b/modules/rosa-sdpolicy-instance-types.adoc @@ -13,11 +13,7 @@ endif::[] = Instance types ifdef::rosa-with-hcp[] -<<<<<<< HEAD All {hcp-title} clusters require a minimum of 2 worker nodes. Shutting down the underlying infrastructure through the cloud provider console is unsupported and can lead to data loss. -======= -All {hcp-title} clusters require a minimum of 2 worker nodes. All {hcp-title} clusters support a maximum of 250 worker nodes. Shutting down the underlying infrastructure through the cloud provider console is unsupported and can lead to data loss. ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) endif::rosa-with-hcp[] ifndef::rosa-with-hcp[] Single availability zone clusters require a minimum of 3 control plane nodes, 2 infrastructure nodes, and 2 worker nodes deployed to a single availability zone. diff --git a/modules/sd-hcp-planning-cluster-maximums.adoc b/modules/sd-hcp-planning-cluster-maximums.adoc index c16c1bc840..b8fd076b15 100644 --- a/modules/sd-hcp-planning-cluster-maximums.adoc +++ b/modules/sd-hcp-planning-cluster-maximums.adoc @@ -8,19 +8,11 @@ Consider the following tested object maximums when you plan a {hcp-title-first} cluster installation. The table specifies the maximum limits for each tested type in a {hcp-title} cluster. -<<<<<<< HEAD These guidelines are based on a cluster of 500 compute (also known as worker) nodes. For smaller clusters, the maximums are lower. [NOTE] ==== Customers running {hcp-title} 4.14.x and 4.15.x clusters require a minimum z-stream version of 4.14.28 or 4.15.15 and greater to scale to 500 worker nodes. For earlier versions, the maximum is 90 worker nodes. -======= -These guidelines are based on a cluster of 250 compute (also known as worker) nodes. For smaller clusters, the maximums are lower. - -[NOTE] -==== -Customers running {hcp-title} 4.14.x and 4.15.x clusters require a minimum z-stream version of 4.14.28 or 4.15.15 and greater to scale to 250 worker nodes. For earlier versions, the maximum is 90 worker nodes. ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) ==== .Tested cluster maximums diff --git a/modules/telco-ran-crs-cluster-tuning.adoc b/modules/telco-ran-crs-cluster-tuning.adoc index 9245bb8757..a3a65c3941 100644 --- a/modules/telco-ran-crs-cluster-tuning.adoc +++ b/modules/telco-ran-crs-cluster-tuning.adoc @@ -18,13 +18,5 @@ Disconnected registry,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs Disconnected registry,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-disconnectedicsp-yaml[DisconnectedICSP.yaml],No,No Disconnected registry,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-operatorhub-yaml[OperatorHub.yaml],"OperatorHub is required for {sno} and optional for multi-node clusters",No Monitoring configuration,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-reducemonitoringfootprint-yaml[ReduceMonitoringFootprint.yaml],No,No -<<<<<<< HEAD Network diagnostics disable,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-disablesnonetworkdiag-yaml[DisableSnoNetworkDiag.yaml],No,No -======= -OperatorHub,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-09-openshift-marketplace-ns-yaml[09-openshift-marketplace-ns.yaml],No,No -OperatorHub,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-defaultcatsrc-yaml[DefaultCatsrc.yaml],No,No -OperatorHub,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-disableolmpprof-yaml[DisableOLMPprof.yaml],No,No -OperatorHub,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-disconnectedicsp-yaml[DisconnectedICSP.yaml],No,No -OperatorHub,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-operatorhub-yaml[OperatorHub.yaml],Yes,No ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) |==== diff --git a/modules/telco-ran-crs-day-2-operators.adoc b/modules/telco-ran-crs-day-2-operators.adoc index 4425bc482a..71ef8ee3b4 100644 --- a/modules/telco-ran-crs-day-2-operators.adoc +++ b/modules/telco-ran-crs-day-2-operators.adoc @@ -17,23 +17,15 @@ Cluster logging,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc# Cluster logging,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-clusterlogserviceaccountauditbinding-yaml[ClusterLogServiceAccountAuditBinding.yaml],No,Yes Cluster logging,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-clusterlogserviceaccountinfrastructurebinding-yaml[ClusterLogServiceAccountInfrastructureBinding.yaml],No,Yes Cluster logging,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-clusterlogsubscription-yaml[ClusterLogSubscription.yaml],No,No -<<<<<<< HEAD LifeCycle Agent Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-imagebasedupgrade-yaml[ImageBasedUpgrade.yaml],Yes,No LifeCycle Agent Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-lcasubscription-yaml[LcaSubscription.yaml],Yes,No LifeCycle Agent Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-lcasubscriptionns-yaml[LcaSubscriptionNS.yaml],Yes,No LifeCycle Agent Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-lcasubscriptionopergroup-yaml[LcaSubscriptionOperGroup.yaml],Yes,No -======= -Lifecycle Agent ,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-imagebasedupgrade-yaml[ImageBasedUpgrade.yaml],Yes,Yes -Lifecycle Agent ,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-lcasubscription-yaml[LcaSubscription.yaml],Yes,Yes -Lifecycle Agent ,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-lcasubscriptionns-yaml[LcaSubscriptionNS.yaml],Yes,Yes -Lifecycle Agent ,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-lcasubscriptionopergroup-yaml[LcaSubscriptionOperGroup.yaml],Yes,Yes ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) Local Storage Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-storageclass-yaml[StorageClass.yaml],Yes,No Local Storage Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-storagelv-yaml[StorageLV.yaml],Yes,No Local Storage Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-storagens-yaml[StorageNS.yaml],Yes,No Local Storage Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-storageopergroup-yaml[StorageOperGroup.yaml],Yes,No Local Storage Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-storagesubscription-yaml[StorageSubscription.yaml],Yes,No -<<<<<<< HEAD LVM Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-lvmoperatorstatus-yaml[LVMOperatorStatus.yaml],Yes,No LVM Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-storagelvmcluster-yaml[StorageLVMCluster.yaml],Yes,No LVM Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-storagelvmsubscription-yaml[StorageLVMSubscription.yaml],Yes,No @@ -48,23 +40,6 @@ PTP fast event notifications,xref:../../telco_ref_design_specs/ran/telco-ran-ref PTP Operator - high availability,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpconfigboundary-yaml[PtpConfigBoundary.yaml],No,No PTP Operator - high availability,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpconfigforha-yaml[PtpConfigForHA.yaml],No,No PTP Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpconfigdualcardgmwpc-yaml[PtpConfigDualCardGmWpc.yaml],No,No -======= -LVM Storage,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-lvmoperatorstatus-yaml[LVMOperatorStatus.yaml],No,Yes -LVM Storage,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-storagelvmcluster-yaml[StorageLVMCluster.yaml],No,Yes -LVM Storage,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-storagelvmsubscription-yaml[StorageLVMSubscription.yaml],No,Yes -LVM Storage,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-storagelvmsubscriptionns-yaml[StorageLVMSubscriptionNS.yaml],No,Yes -LVM Storage,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-storagelvmsubscriptionopergroup-yaml[StorageLVMSubscriptionOperGroup.yaml],No,Yes -Node Tuning Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-performanceprofile-yaml[PerformanceProfile.yaml],No,No -Node Tuning Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-tunedperformancepatch-yaml[TunedPerformancePatch.yaml],No,No -PTP fast event notifications,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpconfigboundaryforevent-yaml[PtpConfigBoundaryForEvent.yaml],Yes,Yes -PTP fast event notifications,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpconfigforhaforevent-yaml[PtpConfigForHAForEvent.yaml],Yes,Yes -PTP fast event notifications,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpconfigmasterforevent-yaml[PtpConfigMasterForEvent.yaml],Yes,Yes -PTP fast event notifications,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpconfigslaveforevent-yaml[PtpConfigSlaveForEvent.yaml],Yes,Yes -PTP fast event notifications,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpoperatorconfigforevent-yaml[PtpOperatorConfigForEvent.yaml],Yes,No -PTP Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpconfigboundary-yaml[PtpConfigBoundary.yaml],No,No -PTP Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpconfigdualcardgmwpc-yaml[PtpConfigDualCardGmWpc.yaml],No,No -PTP Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpconfigforha-yaml[PtpConfigForHA.yaml],No,Yes ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) PTP Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpconfiggmwpc-yaml[PtpConfigGmWpc.yaml],No,No PTP Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpconfigslave-yaml[PtpConfigSlave.yaml],No,No PTP Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpoperatorconfig-yaml[PtpOperatorConfig.yaml],No,No diff --git a/modules/telco-ran-yaml-ref-cluster-tuning.adoc b/modules/telco-ran-yaml-ref-cluster-tuning.adoc index a58ec394d1..c3f3b9d962 100644 --- a/modules/telco-ran-yaml-ref-cluster-tuning.adoc +++ b/modules/telco-ran-yaml-ref-cluster-tuning.adoc @@ -27,13 +27,6 @@ include::snippets/ztp_ConsoleOperatorDisable.yaml[] include::snippets/ztp_09-openshift-marketplace-ns.yaml[] ---- -[id="ztp-09-openshift-marketplace-ns-yaml"] -.09-openshift-marketplace-ns.yaml -[source,yaml] ----- -include::snippets/ztp_09-openshift-marketplace-ns.yaml[] ----- - [id="ztp-defaultcatsrc-yaml"] .DefaultCatsrc.yaml [source,yaml] @@ -62,7 +55,6 @@ include::snippets/ztp_DisconnectedICSP.yaml[] include::snippets/ztp_OperatorHub.yaml[] ---- -<<<<<<< HEAD [id="ztp-reducemonitoringfootprint-yaml"] .ReduceMonitoringFootprint.yaml [source,yaml] @@ -76,5 +68,3 @@ include::snippets/ztp_ReduceMonitoringFootprint.yaml[] ---- include::snippets/ztp_DisableSnoNetworkDiag.yaml[] ---- -======= ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) diff --git a/modules/telco-ran-yaml-ref-day-2-operators.adoc b/modules/telco-ran-yaml-ref-day-2-operators.adoc index b76d8638f0..cdb9cc5c41 100644 --- a/modules/telco-ran-yaml-ref-day-2-operators.adoc +++ b/modules/telco-ran-yaml-ref-day-2-operators.adoc @@ -202,16 +202,6 @@ include::snippets/ztp_PtpConfigSlaveForEvent.yaml[] include::snippets/ztp_PtpConfigBoundary.yaml[] ---- -<<<<<<< HEAD -======= -[id="ztp-ptpconfigdualcardgmwpc-yaml"] -.PtpConfigDualCardGmWpc.yaml -[source,yaml] ----- -include::snippets/ztp_PtpConfigDualCardGmWpc.yaml[] ----- - ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) [id="ztp-ptpconfigforha-yaml"] .PtpConfigForHA.yaml [source,yaml] @@ -219,7 +209,6 @@ include::snippets/ztp_PtpConfigDualCardGmWpc.yaml[] include::snippets/ztp_PtpConfigForHA.yaml[] ---- -<<<<<<< HEAD [id="ztp-ptpconfigdualcardgmwpc-yaml"] .PtpConfigDualCardGmWpc.yaml [source,yaml] @@ -227,8 +216,6 @@ include::snippets/ztp_PtpConfigForHA.yaml[] include::snippets/ztp_PtpConfigDualCardGmWpc.yaml[] ---- -======= ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) [id="ztp-ptpconfiggmwpc-yaml"] .PtpConfigGmWpc.yaml [source,yaml] diff --git a/nodes/index.adoc b/nodes/index.adoc index 5ca25cf8be..d0341ebb29 100644 --- a/nodes/index.adoc +++ b/nodes/index.adoc @@ -34,11 +34,18 @@ image::295_OpenShift_Nodes_Overview_1222.png[Overview of control plane and worke The read operations allow an administrator or a developer to get information about nodes in an {product-title} cluster. +ifdef::openshift-rosa-hcp[] +* List all the nodes in a cluster. +* Get information about a node, such as memory and CPU usage, health, status, and age. +* List pods running on a node. +ifndef::openshift-rosa-hcp[] +ifndef::openshift-rosa-hcp[] * xref:../nodes/nodes/nodes-nodes-viewing.adoc#nodes-nodes-viewing-listing_nodes-nodes-viewing[List all the nodes in a cluster]. * Get information about a node, such as memory and CPU usage, health, status, and age. * xref:../nodes/nodes/nodes-nodes-viewing.adoc#nodes-nodes-viewing-listing-pods_nodes-nodes-viewing[List pods running on a node]. +ifndef::openshift-rosa-hcp[] -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [discrete] === Management operations @@ -52,28 +59,36 @@ through several tasks: * xref:../nodes/nodes/nodes-nodes-managing-max-pods.adoc#nodes-nodes-managing-max-pods-proc_nodes-nodes-managing-max-pods[Configure the number of pods that can run on a node] based on the number of processor cores on the node, a hard limit, or both. * Reboot a node gracefully using xref:../nodes/nodes/nodes-nodes-rebooting.adoc#nodes-nodes-rebooting-affinity_nodes-nodes-rebooting[pod anti-affinity]. * xref:../nodes/nodes/nodes-nodes-working.adoc#deleting-nodes[Delete a node from a cluster] by scaling down the cluster using a compute machine set. To delete a node from a bare-metal cluster, you must first drain all pods on the node and then manually delete the node. -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [discrete] === Enhancement operations {product-title} allows you to do more than just access and manage nodes; as an administrator, you can perform the following tasks on nodes to make the cluster more efficient, application-friendly, and to provide a better environment for your developers. +ifndef::openshift-rosa-hcp[] +* Manage node-level tuning for high-performance applications that require some level of kernel tuning by using the Node Tuning Operator. +* Run background tasks on nodes automatically with daemon sets. You can create and use daemon sets to create shared storage, run a logging pod on every node, or deploy a monitoring agent on all nodes. +endif::openshift-rosa-hcp[] +ifndef::openshift-rosa-hcp[] * Manage node-level tuning for high-performance applications that require some level of kernel tuning by xref:../nodes/nodes/nodes-node-tuning-operator.adoc#nodes-node-tuning-operator[using the Node Tuning Operator]. ifndef::openshift-rosa,openshift-dedicated[] -======= -ifndef::openshift-enterprise,openshift-rosa-hcp,openshift-rosa[] -xref:../nodes/nodes/nodes-node-tuning-operator.adoc#nodes-node-tuning-operator[using the Node Tuning Operator]. +* Enable TLS security profiles on the node to protect communication between the kubelet and the Kubernetes API server. +endif::openshift-rosa,openshift-dedicated[] +* xref:../nodes/jobs/nodes-pods-daemonsets.adoc#nodes-pods-daemonsets[Run background tasks on nodes automatically with daemon sets]. You can create and use daemon sets to create shared storage, run a logging pod on every node, or deploy a monitoring agent on all nodes. +ifndef::openshift-rosa,openshift-dedicated[] * xref:../nodes/nodes/nodes-nodes-garbage-collection.adoc#nodes-nodes-garbage-collection[Free node resources using garbage collection]. You can ensure that your nodes are running efficiently by removing terminated containers and the images not referenced by any running pods. * xref:../nodes/nodes/nodes-nodes-managing.adoc#nodes-nodes-kernel-arguments_nodes-nodes-managing[Add kernel arguments to a set of nodes]. * Configure an {product-title} cluster to have worker nodes at the network edge (remote worker nodes). For information on the challenges of having remote worker nodes in an {product-title} cluster and some recommended approaches for managing pods on a remote worker node, see xref:../nodes/edge/nodes-edge-remote-workers.adoc#nodes-edge-remote-workers[Using remote worker nodes at the network edge]. endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa-hcp[] [id="pods-overview"] == About pods A pod is one or more containers deployed together on a node. As a cluster administrator, you can define a pod, assign it to run on a healthy node that is ready for scheduling, and manage. A pod runs as long as the containers are running. You cannot change a pod once it is defined and is running. Some operations you can perform when working with pods are: +ifndef::openshift-rosa-hcp[] [discrete] === Read operations @@ -81,12 +96,23 @@ As an administrator, you can get information about pods in a project through the * xref:../nodes/pods/nodes-pods-viewing.adoc#nodes-pods-viewing-project_nodes-pods-viewing[List pods associated with a project], including information such as the number of replicas and restarts, current status, and age. * xref:../nodes/pods/nodes-pods-viewing.adoc#nodes-pods-viewing-usage_nodes-pods-viewing[View pod usage statistics] such as CPU, memory, and storage consumption. +endif::openshift-rosa-hcp[] [discrete] === Management operations The following list of tasks provides an overview of how an administrator can manage pods in an {product-title} cluster. +ifdef::openshift-rosa-hcp[] +* Control scheduling of pods using the advanced scheduling features available in {product-title}: +** Node-to-pod binding rules such as pod affinity, node affinity, and anti-affinity. +** Node labels and selectors. +** Pod topology spread constraints. +* Configure how pods behave after a restart using pod controllers and restart policies. +* Limit both egress and ingress traffic on a pod. +* Add and remove volumes to and from any object that has a pod template. A volume is a mounted file system available to all the containers in a pod. Container storage is ephemeral; you can use volumes to persist container data. +endif::openshift-rosa-hcp[] +ifndef::openshift-rosa-hcp[] * Control scheduling of pods using the advanced scheduling features available in {product-title}: ** Node-to-pod binding rules such as xref:../nodes/scheduling/nodes-scheduler-pod-affinity.adoc#nodes-scheduler-pod-affinity-example-affinity_nodes-scheduler-pod-affinity[pod affinity], xref:../nodes/scheduling/nodes-scheduler-node-affinity.adoc#nodes-scheduler-node-affinity[node affinity], and xref:../nodes/scheduling/nodes-scheduler-pod-affinity.adoc#nodes-scheduler-pod-anti-affinity-configuring_nodes-scheduler-pod-affinity[anti-affinity]. ** xref:../nodes/scheduling/nodes-scheduler-node-selectors.adoc#nodes-scheduler-node-selectors[Node labels and selectors]. @@ -102,13 +128,14 @@ endif::openshift-rosa,openshift-dedicated[] * xref:../nodes/pods/nodes-pods-configuring.adoc#nodes-pods-configuring-restart_nodes-pods-configuring[Configure how pods behave after a restart using pod controllers and restart policies]. * xref:../nodes/pods/nodes-pods-configuring.adoc#nodes-pods-configuring-bandwidth_nodes-pods-configuring[Limit both egress and ingress traffic on a pod]. * xref:../nodes/containers/nodes-containers-volumes.adoc#nodes-containers-volumes[Add and remove volumes to and from any object that has a pod template]. A volume is a mounted file system available to all the containers in a pod. Container storage is ephemeral; you can use volumes to persist container data. +endif::openshift-rosa-hcp[] [discrete] === Enhancement operations You can work with pods more easily and efficiently with the help of various tools and features available in {product-title}. The following operations involve using those tools and features to better manage pods. -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [cols="2,1,2"] |=== |Operation |User |More information @@ -133,7 +160,7 @@ As a developer, use a vertical pod autoscaler to ensure your pods stay up during |=== -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] ifdef::openshift-rosa,openshift-dedicated[] * Secrets: Some applications need sensitive information, such as passwords and usernames. An administrator can use the `Secret` object to provide sensitive data to pods xref:../nodes/pods/nodes-pods-secrets.adoc#nodes-pods-secrets[using the `Secret` object]. endif::openshift-rosa,openshift-dedicated[] @@ -143,6 +170,18 @@ endif::openshift-rosa,openshift-dedicated[] A container is the basic unit of an {product-title} application, which comprises the application code packaged along with its dependencies, libraries, and binaries. Containers provide consistency across environments and multiple deployment targets: physical servers, virtual machines (VMs), and private or public cloud. +ifdef::openshift-rosa-hcp[] +Linux container technologies are lightweight mechanisms for isolating running processes and limiting access to only designated resources. +As an administrator, You can perform various tasks on a Linux container, such as: + +* Copy files to and from a container. +* Allow containers to consume API objects. +* Execute remote commands in a container. +* Use port forwarding to access applications in a container. + +{product-title} provides specialized containers called Init containers. Init containers run before application containers and can contain utilities or setup scripts not present in an application image. You can use an Init container to perform tasks before the rest of a pod is deployed. +endif::openshift-rosa-hcp[] +ifndef::openshift-rosa-hcp[] Linux container technologies are lightweight mechanisms for isolating running processes and limiting access to only designated resources. As an administrator, You can perform various tasks on a Linux container, such as: @@ -152,12 +191,13 @@ As an administrator, You can perform various tasks on a Linux container, such as * xref:../nodes/containers/nodes-containers-port-forwarding.adoc#nodes-containers-port-forwarding[Use port forwarding to access applications in a container]. {product-title} provides specialized containers called xref:../nodes/containers/nodes-containers-init.adoc#nodes-containers-init[Init containers]. Init containers run before application containers and can contain utilities or setup scripts not present in an application image. You can use an Init container to perform tasks before the rest of a pod is deployed. +endif::openshift-rosa-hcp[] Apart from performing specific tasks on nodes, pods, and containers, you can work with the overall {product-title} cluster to keep the cluster efficient and the application pods highly available. //cannot create the required namespace for these operators -ifndef::openshift-rosa,openshift-dedicated[] +ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [id="nodes-about-autoscaling-pod_{context}"] == About autoscaling pods on a node @@ -177,7 +217,7 @@ Vertical Pod Autoscaler:: The Vertical Pod Autoscaler (VPA) can automatically review the historic and current CPU and memory resources for containers in pods and can update the resource limits and requests based on the usage values it learns. + For more information, see xref:../nodes/pods/nodes-pods-vertical-autoscaler.adoc#nodes-pods-vpa[Automatically adjust pod resource levels with the vertical pod autoscaler]. -endif::openshift-rosa,openshift-dedicated[] +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] [id="commonterms-node"] == Glossary of common terms for {product-title} nodes diff --git a/operators/admin/olm-restricted-networks.adoc b/operators/admin/olm-restricted-networks.adoc index 17f33eb01f..0ca4c3e16b 100644 --- a/operators/admin/olm-restricted-networks.adoc +++ b/operators/admin/olm-restricted-networks.adoc @@ -20,4 +20,4 @@ This guide describes the following process that is required to enable OLM in dis After enabling OLM in a disconnected environment, you can continue to use your unrestricted workstation to keep your local OperatorHub sources updated as newer versions of Operators are released. -For more information, see xref:../../disconnected/using-olm.adoc#olm-restricted-networks[Using Operator Lifecycle Manager in disconnected environments] in the Disconnected environments section. +For more information, see xref:../../disconnected/using-olm.adoc#olm-restricted-networks[Using Operator Lifecycle Manager in disconnected environments] in the Disconnected environments section. \ No newline at end of file diff --git a/rest_api/objects/index.adoc b/rest_api/objects/index.adoc index d9f434b698..96b26e4ef9 100644 --- a/rest_api/objects/index.adoc +++ b/rest_api/objects/index.adoc @@ -1818,21 +1818,12 @@ Type:: | Property | Type | Description | `owned` -<<<<<<< HEAD | `array (APIServiceDescription)` | | `required` | `array (APIServiceDescription)` | -======= -| xref:../objects/index.adoc#com-github-operator-framework-api-pkg-operators-v1alpha1-APIServiceDescription[`array (APIServiceDescription)`] -| - -| `required` -| xref:../objects/index.adoc#com-github-operator-framework-api-pkg-operators-v1alpha1-APIServiceDescription[`array (APIServiceDescription)`] -| ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) |=== @@ -1860,21 +1851,12 @@ Type:: | Property | Type | Description | `owned` -<<<<<<< HEAD | `array (CRDDescription)` | | `required` | `array (CRDDescription)` | -======= -| xref:../objects/index.adoc#com-github-operator-framework-api-pkg-operators-v1alpha1-CRDDescription[`array (CRDDescription)`] -| - -| `required` -| xref:../objects/index.adoc#com-github-operator-framework-api-pkg-operators-v1alpha1-CRDDescription[`array (CRDDescription)`] -| ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) |=== @@ -1949,11 +1931,7 @@ Required:: | `metadata` | xref:../objects/index.adoc#io-k8s-apimachinery-pkg-apis-meta-v1-ListMeta[`ListMeta`] -<<<<<<< HEAD | -======= -| ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) |=== @@ -2609,11 +2587,7 @@ Required:: | `metadata` | xref:../objects/index.adoc#io-k8s-apimachinery-pkg-apis-meta-v1-ListMeta[`ListMeta`] -<<<<<<< HEAD | -======= -| ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) |=== @@ -2768,11 +2742,7 @@ Type:: | defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. | `items` -<<<<<<< HEAD | `array (KeyToPath)` -======= -| xref:../objects/index.adoc#io-k8s-api-core-v1-KeyToPath[`array (KeyToPath)`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. | `name` @@ -2817,11 +2787,7 @@ Required:: | fsType to mount. Ex. "ext4", "xfs", "ntfs". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply. | `nodePublishSecretRef` -<<<<<<< HEAD | xref:../objects/index.adoc#io-k8s-api-core-v1-LocalObjectReference_v2[`LocalObjectReference`] -======= -| xref:../objects/index.adoc#io-k8s-api-core-v1-LocalObjectReference[`LocalObjectReference`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed. | `readOnly` @@ -2907,11 +2873,7 @@ Required:: | Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". | `valueFrom` -<<<<<<< HEAD | `EnvVarSource` -======= -| xref:../objects/index.adoc#io-k8s-api-core-v1-EnvVarSource[`EnvVarSource`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | Source for the environment variable's value. Cannot be used if value is not empty. |=== @@ -3082,11 +3044,7 @@ Required:: | `lastTransitionTime` | xref:../objects/index.adoc#io-k8s-apimachinery-pkg-apis-meta-v1-Time[`Time`] -<<<<<<< HEAD | -======= -| ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | `message` | `string` @@ -3747,7 +3705,6 @@ Type:: | accessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes | `awsElasticBlockStore` -<<<<<<< HEAD | `AWSElasticBlockStoreVolumeSource` | awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore @@ -3757,17 +3714,6 @@ Type:: | `azureFile` | `AzureFilePersistentVolumeSource` -======= -| xref:../objects/index.adoc#io-k8s-api-core-v1-AWSElasticBlockStoreVolumeSource[`AWSElasticBlockStoreVolumeSource`] -| awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - -| `azureDisk` -| xref:../objects/index.adoc#io-k8s-api-core-v1-AzureDiskVolumeSource[`AzureDiskVolumeSource`] -| azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. - -| `azureFile` -| xref:../objects/index.adoc#io-k8s-api-core-v1-AzureFilePersistentVolumeSource[`AzureFilePersistentVolumeSource`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | azureFile represents an Azure File Service mount on the host and bind mount to the pod. | `capacity` @@ -3775,19 +3721,11 @@ Type:: | capacity is the description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity | `cephfs` -<<<<<<< HEAD | `CephFSPersistentVolumeSource` | cephFS represents a Ceph FS mount on the host that shares a pod's lifetime | `cinder` | `CinderPersistentVolumeSource` -======= -| xref:../objects/index.adoc#io-k8s-api-core-v1-CephFSPersistentVolumeSource[`CephFSPersistentVolumeSource`] -| cephFS represents a Ceph FS mount on the host that shares a pod's lifetime - -| `cinder` -| xref:../objects/index.adoc#io-k8s-api-core-v1-CinderPersistentVolumeSource[`CinderPersistentVolumeSource`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md | `claimRef` @@ -3795,7 +3733,6 @@ Type:: | claimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding | `csi` -<<<<<<< HEAD | `CSIPersistentVolumeSource` | csi represents storage that is handled by an external CSI driver (Beta feature). @@ -3829,41 +3766,6 @@ Type:: | `local` | `LocalVolumeSource` -======= -| xref:../objects/index.adoc#io-k8s-api-core-v1-CSIPersistentVolumeSource[`CSIPersistentVolumeSource`] -| csi represents storage that is handled by an external CSI driver (Beta feature). - -| `fc` -| xref:../objects/index.adoc#io-k8s-api-core-v1-FCVolumeSource[`FCVolumeSource`] -| fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. - -| `flexVolume` -| xref:../objects/index.adoc#io-k8s-api-core-v1-FlexPersistentVolumeSource[`FlexPersistentVolumeSource`] -| flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. - -| `flocker` -| xref:../objects/index.adoc#io-k8s-api-core-v1-FlockerVolumeSource[`FlockerVolumeSource`] -| flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running - -| `gcePersistentDisk` -| xref:../objects/index.adoc#io-k8s-api-core-v1-GCEPersistentDiskVolumeSource[`GCEPersistentDiskVolumeSource`] -| gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - -| `glusterfs` -| xref:../objects/index.adoc#io-k8s-api-core-v1-GlusterfsPersistentVolumeSource[`GlusterfsPersistentVolumeSource`] -| glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://examples.k8s.io/volumes/glusterfs/README.md - -| `hostPath` -| xref:../objects/index.adoc#io-k8s-api-core-v1-HostPathVolumeSource[`HostPathVolumeSource`] -| hostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - -| `iscsi` -| xref:../objects/index.adoc#io-k8s-api-core-v1-ISCSIPersistentVolumeSource[`ISCSIPersistentVolumeSource`] -| iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. - -| `local` -| xref:../objects/index.adoc#io-k8s-api-core-v1-LocalVolumeSource[`LocalVolumeSource`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | local represents directly-attached storage with node affinity | `mountOptions` @@ -3871,19 +3773,11 @@ Type:: | mountOptions is the list of mount options, e.g. ["ro", "soft"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options | `nfs` -<<<<<<< HEAD | `NFSVolumeSource` | nfs represents an NFS mount on the host. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs | `nodeAffinity` | `VolumeNodeAffinity` -======= -| xref:../objects/index.adoc#io-k8s-api-core-v1-NFSVolumeSource[`NFSVolumeSource`] -| nfs represents an NFS mount on the host. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - -| `nodeAffinity` -| xref:../objects/index.adoc#io-k8s-api-core-v1-VolumeNodeAffinity[`VolumeNodeAffinity`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | nodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume. | `persistentVolumeReclaimPolicy` @@ -3896,7 +3790,6 @@ Possible enum values: - `"Retain"` means the volume will be left in its current phase (Released) for manual reclamation by the administrator. The default policy is Retain. | `photonPersistentDisk` -<<<<<<< HEAD | `PhotonPersistentDiskVolumeSource` | photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine @@ -3914,25 +3807,6 @@ Possible enum values: | `scaleIO` | `ScaleIOPersistentVolumeSource` -======= -| xref:../objects/index.adoc#io-k8s-api-core-v1-PhotonPersistentDiskVolumeSource[`PhotonPersistentDiskVolumeSource`] -| photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine - -| `portworxVolume` -| xref:../objects/index.adoc#io-k8s-api-core-v1-PortworxVolumeSource[`PortworxVolumeSource`] -| portworxVolume represents a portworx volume attached and mounted on kubelets host machine - -| `quobyte` -| xref:../objects/index.adoc#io-k8s-api-core-v1-QuobyteVolumeSource[`QuobyteVolumeSource`] -| quobyte represents a Quobyte mount on the host that shares a pod's lifetime - -| `rbd` -| xref:../objects/index.adoc#io-k8s-api-core-v1-RBDPersistentVolumeSource[`RBDPersistentVolumeSource`] -| rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md - -| `scaleIO` -| xref:../objects/index.adoc#io-k8s-api-core-v1-ScaleIOPersistentVolumeSource[`ScaleIOPersistentVolumeSource`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. | `storageClassName` @@ -3940,11 +3814,7 @@ Possible enum values: | storageClassName is the name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass. | `storageos` -<<<<<<< HEAD | `StorageOSPersistentVolumeSource` -======= -| xref:../objects/index.adoc#io-k8s-api-core-v1-StorageOSPersistentVolumeSource[`StorageOSPersistentVolumeSource`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://examples.k8s.io/volumes/storageos/README.md | `volumeAttributesClassName` @@ -3960,11 +3830,7 @@ Possible enum values: - `"Filesystem"` means the volume will be or is formatted with a filesystem. | `vsphereVolume` -<<<<<<< HEAD | `VsphereVirtualDiskVolumeSource` -======= -| xref:../objects/index.adoc#io-k8s-api-core-v1-VsphereVirtualDiskVolumeSource[`VsphereVirtualDiskVolumeSource`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine |=== @@ -4077,11 +3943,7 @@ Type:: | Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata | `spec` -<<<<<<< HEAD | `PodSpec` -======= -| xref:../objects/index.adoc#io-k8s-api-core-v1-PodSpec[`PodSpec`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status |=== @@ -4194,11 +4056,7 @@ Type:: | hard is the set of desired hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ | `scopeSelector` -<<<<<<< HEAD | `ScopeSelector_v2` -======= -| xref:../objects/index.adoc#io-k8s-api-core-v1-ScopeSelector_v2[`ScopeSelector_v2`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota but expressed using ScopeSelectorOperator in combination with possible values. For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched. | `scopes` @@ -4260,11 +4118,7 @@ Type:: | Property | Type | Description | `claims` -<<<<<<< HEAD | `array (ResourceClaim)` -======= -| xref:../objects/index.adoc#io-k8s-api-core-v1-ResourceClaim[`array (ResourceClaim)`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. @@ -4401,11 +4255,7 @@ Type:: | defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. | `items` -<<<<<<< HEAD | `array (KeyToPath)` -======= -| xref:../objects/index.adoc#io-k8s-api-core-v1-KeyToPath[`array (KeyToPath)`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. | `optional` @@ -4574,11 +4424,7 @@ Type:: | Property | Type | Description | `matchLabelExpressions` -<<<<<<< HEAD | `array (TopologySelectorLabelRequirement)` -======= -| xref:../objects/index.adoc#io-k8s-api-core-v1-TopologySelectorLabelRequirement[`array (TopologySelectorLabelRequirement)`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | A list of topology selector requirements by labels. |=== @@ -5012,11 +4858,7 @@ Type:: | Property | Type | Description | `clusterRoleSelectors` -<<<<<<< HEAD | `array (LabelSelector_v3)` -======= -| xref:../objects/index.adoc#io-k8s-apimachinery-pkg-apis-meta-v1-LabelSelector_v3[`array (LabelSelector_v3)`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added |=== @@ -5502,7 +5344,6 @@ Type:: | | `additionalItems` -<<<<<<< HEAD | `` | @@ -5520,63 +5361,27 @@ Type:: | `default` | `JSON` -======= -| xref:../objects/index.adoc#io-k8s-apiextensions-apiserver-pkg-apis-apiextensions-v1-JSONSchemaPropsOrBool[``] -| - -| `additionalProperties` -| xref:../objects/index.adoc#io-k8s-apiextensions-apiserver-pkg-apis-apiextensions-v1-JSONSchemaPropsOrBool[``] -| - -| `allOf` -| xref:../objects/index.adoc#io-k8s-apiextensions-apiserver-pkg-apis-apiextensions-v1-JSONSchemaProps[`array (undefined)`] -| - -| `anyOf` -| xref:../objects/index.adoc#io-k8s-apiextensions-apiserver-pkg-apis-apiextensions-v1-JSONSchemaProps[`array (undefined)`] -| - -| `default` -| xref:../objects/index.adoc#io-k8s-apiextensions-apiserver-pkg-apis-apiextensions-v1-JSON[`JSON`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | default is a default value for undefined object fields. Defaulting is a beta feature under the CustomResourceDefaulting feature gate. Defaulting requires spec.preserveUnknownFields to be false. | `definitions` | xref:../objects/index.adoc#io-k8s-apiextensions-apiserver-pkg-apis-apiextensions-v1-JSONSchemaProps[`object (undefined)`] -<<<<<<< HEAD | | `dependencies` | `object (undefined)` | -======= -| - -| `dependencies` -| xref:../objects/index.adoc#io-k8s-apiextensions-apiserver-pkg-apis-apiextensions-v1-JSONSchemaPropsOrStringArray[`object (undefined)`] -| ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | `description` | `string` | | `enum` -<<<<<<< HEAD | `array (JSON)` | | `example` | `JSON`] | -======= -| xref:../objects/index.adoc#io-k8s-apiextensions-apiserver-pkg-apis-apiextensions-v1-JSON[`array (JSON)`] -| - -| `example` -| xref:../objects/index.adoc#io-k8s-apiextensions-apiserver-pkg-apis-apiextensions-v1-JSON[`JSON`] -| ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | `exclusiveMaximum` | `boolean` @@ -5587,13 +5392,8 @@ Type:: | | `externalDocs` -<<<<<<< HEAD | `ExternalDocumentation` | -======= -| xref:../objects/index.adoc#io-k8s-apiextensions-apiserver-pkg-apis-apiextensions-v1-ExternalDocumentation[`ExternalDocumentation`] -| ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | `format` | `string` @@ -5606,13 +5406,8 @@ Type:: | | `items` -<<<<<<< HEAD | `` | -======= -| xref:../objects/index.adoc#io-k8s-apiextensions-apiserver-pkg-apis-apiextensions-v1-JSONSchemaPropsOrArray[``] -| ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | `maxItems` | `integer` @@ -5652,11 +5447,7 @@ Type:: | `not` | xref:../objects/index.adoc#io-k8s-apiextensions-apiserver-pkg-apis-apiextensions-v1-JSONSchemaProps[``] -<<<<<<< HEAD | -======= -| ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | `nullable` | `boolean` @@ -5664,11 +5455,7 @@ Type:: | `oneOf` | xref:../objects/index.adoc#io-k8s-apiextensions-apiserver-pkg-apis-apiextensions-v1-JSONSchemaProps[`array (undefined)`] -<<<<<<< HEAD | -======= -| ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | `pattern` | `string` @@ -5676,19 +5463,11 @@ Type:: | `patternProperties` | xref:../objects/index.adoc#io-k8s-apiextensions-apiserver-pkg-apis-apiextensions-v1-JSONSchemaProps[`object (undefined)`] -<<<<<<< HEAD | | `properties` | xref:../objects/index.adoc#io-k8s-apiextensions-apiserver-pkg-apis-apiextensions-v1-JSONSchemaProps[`object (undefined)`] | -======= -| - -| `properties` -| xref:../objects/index.adoc#io-k8s-apiextensions-apiserver-pkg-apis-apiextensions-v1-JSONSchemaProps[`object (undefined)`] -| ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | `required` | `array (string)` @@ -5764,11 +5543,7 @@ Defaults to atomic for arrays. | x-kubernetes-preserve-unknown-fields stops the API server decoding step from pruning fields which are not specified in the validation schema. This affects fields recursively, but switches back to normal pruning behaviour if nested properties or additionalProperties are specified in the schema. This can either be true or undefined. False is forbidden. | `x-kubernetes-validations` -<<<<<<< HEAD | `array (ValidationRule)` -======= -| xref:../objects/index.adoc#io-k8s-apiextensions-apiserver-pkg-apis-apiextensions-v1-ValidationRule[`array (ValidationRule)`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | x-kubernetes-validations describes a list of validation rules written in the CEL expression language. This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled. |=== @@ -5919,11 +5694,7 @@ Type:: | Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. | `preconditions` -<<<<<<< HEAD | `Preconditions` -======= -| xref:../objects/index.adoc#io-k8s-apimachinery-pkg-apis-meta-v1-Preconditions[`Preconditions`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned. | `propagationPolicy` @@ -6008,11 +5779,7 @@ Type:: | Property | Type | Description | `matchExpressions` -<<<<<<< HEAD | `array (LabelSelectorRequirement)` -======= -| xref:../objects/index.adoc#io-k8s-apimachinery-pkg-apis-meta-v1-LabelSelectorRequirement[`array (LabelSelectorRequirement)`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | matchExpressions is a list of label selector requirements. The requirements are ANDed. | `matchLabels` @@ -6043,11 +5810,7 @@ Type:: | Property | Type | Description | `matchExpressions` -<<<<<<< HEAD | `array (LabelSelectorRequirement_v2)` -======= -| xref:../objects/index.adoc#io-k8s-apimachinery-pkg-apis-meta-v1-LabelSelectorRequirement_v2[`array (LabelSelectorRequirement_v2)`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | matchExpressions is a list of label selector requirements. The requirements are ANDed. | `matchLabels` @@ -6172,11 +5935,7 @@ Applied only if Name is not specified. More info: https://git.k8s.io/community/c | Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels | `managedFields` -<<<<<<< HEAD | `array (ManagedFieldsEntry)` -======= -| xref:../objects/index.adoc#io-k8s-apimachinery-pkg-apis-meta-v1-ManagedFieldsEntry[`array (ManagedFieldsEntry)`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like "ci-cd". The set of fields is always in the version that the workflow used when modifying the object. | `name` @@ -6190,11 +5949,7 @@ Applied only if Name is not specified. More info: https://git.k8s.io/community/c Must be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces | `ownerReferences` -<<<<<<< HEAD | `array (OwnerReference)` -======= -| xref:../objects/index.adoc#io-k8s-apimachinery-pkg-apis-meta-v1-OwnerReference[`array (OwnerReference)`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. | `resourceVersion` @@ -6277,11 +6032,7 @@ Applied only if Name is not specified. More info: https://git.k8s.io/community/c | Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels | `managedFields` -<<<<<<< HEAD | `array (ManagedFieldsEntry)` -======= -| xref:../objects/index.adoc#io-k8s-apimachinery-pkg-apis-meta-v1-ManagedFieldsEntry[`array (ManagedFieldsEntry)`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like "ci-cd". The set of fields is always in the version that the workflow used when modifying the object. | `name` @@ -6295,11 +6046,7 @@ Applied only if Name is not specified. More info: https://git.k8s.io/community/c Must be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces | `ownerReferences` -<<<<<<< HEAD | `array (OwnerReference)` -======= -| xref:../objects/index.adoc#io-k8s-apimachinery-pkg-apis-meta-v1-OwnerReference[`array (OwnerReference)`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. | `resourceVersion` @@ -6350,11 +6097,7 @@ Type:: | Suggested HTTP return code for this status, 0 if not set. | `details` -<<<<<<< HEAD | `StatusDetails` -======= -| xref:../objects/index.adoc#io-k8s-apimachinery-pkg-apis-meta-v1-StatusDetails[`StatusDetails`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type. | `kind` @@ -6409,11 +6152,7 @@ Type:: | Suggested HTTP return code for this status, 0 if not set. | `details` -<<<<<<< HEAD | `StatusDetails_v2` -======= -| xref:../objects/index.adoc#io-k8s-apimachinery-pkg-apis-meta-v1-StatusDetails_v2[`StatusDetails_v2`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type. | `kind` @@ -6468,11 +6207,7 @@ Type:: | Suggested HTTP return code for this status, 0 if not set. | `details` -<<<<<<< HEAD | `StatusDetails_v2` -======= -| xref:../objects/index.adoc#io-k8s-apimachinery-pkg-apis-meta-v1-StatusDetails_v2[`StatusDetails_v2`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type. | `kind` @@ -6527,11 +6262,7 @@ Type:: | Suggested HTTP return code for this status, 0 if not set. | `details` -<<<<<<< HEAD | `StatusDetails_v2` -======= -| xref:../objects/index.adoc#io-k8s-apimachinery-pkg-apis-meta-v1-StatusDetails_v2[`StatusDetails_v2`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type. | `kind` @@ -6586,11 +6317,7 @@ Type:: | Suggested HTTP return code for this status, 0 if not set. | `details` -<<<<<<< HEAD | `StatusDetails_v2` -======= -| xref:../objects/index.adoc#io-k8s-apimachinery-pkg-apis-meta-v1-StatusDetails_v2[`StatusDetails_v2`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type. | `kind` @@ -6645,11 +6372,7 @@ Type:: | Suggested HTTP return code for this status, 0 if not set. | `details` -<<<<<<< HEAD | `StatusDetails_v2` -======= -| xref:../objects/index.adoc#io-k8s-apimachinery-pkg-apis-meta-v1-StatusDetails_v2[`StatusDetails_v2`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type. | `kind` @@ -6704,11 +6427,7 @@ Type:: | Suggested HTTP return code for this status, 0 if not set. | `details` -<<<<<<< HEAD | `StatusDetails_v2` -======= -| xref:../objects/index.adoc#io-k8s-apimachinery-pkg-apis-meta-v1-StatusDetails_v2[`StatusDetails_v2`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type. | `kind` @@ -6763,11 +6482,7 @@ Type:: | Suggested HTTP return code for this status, 0 if not set. | `details` -<<<<<<< HEAD | `StatusDetails_v2` -======= -| xref:../objects/index.adoc#io-k8s-apimachinery-pkg-apis-meta-v1-StatusDetails_v2[`StatusDetails_v2`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type. | `kind` @@ -6822,11 +6537,7 @@ Type:: | Suggested HTTP return code for this status, 0 if not set. | `details` -<<<<<<< HEAD | `StatusDetails_v2` -======= -| xref:../objects/index.adoc#io-k8s-apimachinery-pkg-apis-meta-v1-StatusDetails_v2[`StatusDetails_v2`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type. | `kind` @@ -6881,11 +6592,7 @@ Type:: | Suggested HTTP return code for this status, 0 if not set. | `details` -<<<<<<< HEAD | `StatusDetails_v2` -======= -| xref:../objects/index.adoc#io-k8s-apimachinery-pkg-apis-meta-v1-StatusDetails_v2[`StatusDetails_v2`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type. | `kind` @@ -6940,11 +6647,7 @@ Type:: | Suggested HTTP return code for this status, 0 if not set. | `details` -<<<<<<< HEAD | `StatusDetails_v2` -======= -| xref:../objects/index.adoc#io-k8s-apimachinery-pkg-apis-meta-v1-StatusDetails_v2[`StatusDetails_v2`] ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) | Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type. | `kind` diff --git a/rosa_architecture/about-hcp.adoc b/rosa_architecture/about-hcp.adoc index 8113c277a6..f427726cb1 100644 --- a/rosa_architecture/about-hcp.adoc +++ b/rosa_architecture/about-hcp.adoc @@ -89,7 +89,7 @@ ifdef::openshift-rosa-hcp[] link:https://docs.openshift.com/rosa/rosa_architecture/rosa_policy_service_definition/rosa-policy-process-security.html#rosa-policy-process-security[Understanding process and security] endif::openshift-rosa-hcp[] ifndef::openshift-rosa-hcp[] -xref:../../rosa_architecture/rosa_policy_service_definition/rosa-policy-process-security.adoc#rosa-policy-process-security[Understanding process and security] +xref:../rosa_architecture/rosa_policy_service_definition/rosa-policy-process-security.adoc#rosa-policy-process-security[Understanding process and security] endif::openshift-rosa-hcp[] | xref:../rosa_architecture/rosa_policy_service_definition/rosa-hcp-service-definition.adoc#rosa-hcp-service-definition[{hcp-title} service definition] diff --git a/rosa_architecture/cloud-experts-rosa-hcp-sts-explained.adoc b/rosa_architecture/cloud-experts-rosa-hcp-sts-explained.adoc index 0bbc265523..83d134e93e 100644 --- a/rosa_architecture/cloud-experts-rosa-hcp-sts-explained.adoc +++ b/rosa_architecture/cloud-experts-rosa-hcp-sts-explained.adoc @@ -56,7 +56,6 @@ The policies determine the allowed actions for each of the roles. See link:https *** link:https://docs.aws.amazon.com/aws-managed-policy/latest/reference/ROSAKubeControllerPolicy.html[ROSAKubeControllerPolicy] *** link:https://docs.aws.amazon.com/aws-managed-policy/latest/reference/ROSAManageSubscription.html[ROSAManageSubscription] *** link:https://docs.aws.amazon.com/aws-managed-policy/latest/reference/ROSANodePoolManagementPolicy.html[ROSANodePoolManagementPolicy] --- + [NOTE] ==== diff --git a/rosa_architecture/rosa-understanding.adoc b/rosa_architecture/rosa-understanding.adoc index c159dfb2fa..0d4c63888b 100644 --- a/rosa_architecture/rosa-understanding.adoc +++ b/rosa_architecture/rosa-understanding.adoc @@ -41,7 +41,7 @@ For additional information about ROSA installation, see link:https://www.redhat. //- The service is limited to the set permissions. //- When the service is run, it obtains credentials that expire in one hour, so there is no need to rotate or revoke credentials. The expiration also reduces the risks of credentials leaking and being reused. -//A listing of the account-wide and per-cluster roles is provided in xref:../rosa_architecture/rosa-sts-about-iam-resources.adoc#rosa-sts-about-iam-resources[About IAM resources for ROSA clusters that use STS]. +//A listing of the account-wide and per-cluster roles is provided in ../rosa_architecture/rosa-sts-about-iam-resources.adoc#rosa-sts-about-iam-resources[About IAM resources for ROSA clusters that use STS]. //[id="rosa-understanding-aws-without-sts_{context}"] //=== ROSA without STS @@ -60,7 +60,6 @@ To get started with deploying your cluster, ensure your AWS account has met the == Additional resources * xref:../ocm/ocm-overview.adoc#ocm-overview[OpenShift Cluster Manager] -* xref:../rosa_architecture/rosa-sts-about-iam-resources.adoc#rosa-sts-about-iam-resources[About IAM resources for ROSA clusters that use STS] * xref:../rosa_getting_started/rosa-getting-started.adoc#rosa-getting-started[Getting started with {product-title}] * link:https://aws.amazon.com/rosa/pricing/[AWS pricing page] diff --git a/rosa_architecture/rosa_policy_service_definition/rosa-hcp-instance-types.adoc b/rosa_architecture/rosa_policy_service_definition/rosa-hcp-instance-types.adoc index b067ac38ec..79685a49b5 100644 --- a/rosa_architecture/rosa_policy_service_definition/rosa-hcp-instance-types.adoc +++ b/rosa_architecture/rosa_policy_service_definition/rosa-hcp-instance-types.adoc @@ -18,10 +18,5 @@ include::modules/rosa-sdpolicy-am-aws-compute-types-graviton.adoc[leveloffset=+1 [role="_additional-resources"] .Additional resources -<<<<<<< HEAD - -* xref:../../rosa_planning/rosa-hcp-limits-scalability.adoc#rosa-hcp-limits-scalability[{hcp-title} limits and scalability] -======= ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) * link:https://aws.amazon.com/ec2/instance-types[AWS Instance Types] diff --git a/rosa_architecture/rosa_policy_service_definition/rosa-instance-types.adoc b/rosa_architecture/rosa_policy_service_definition/rosa-instance-types.adoc index 166318c615..fb68cef0da 100644 --- a/rosa_architecture/rosa_policy_service_definition/rosa-instance-types.adoc +++ b/rosa_architecture/rosa_policy_service_definition/rosa-instance-types.adoc @@ -12,10 +12,6 @@ include::modules/rosa-sdpolicy-am-aws-compute-types.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources -<<<<<<< HEAD * xref:../../rosa_planning/rosa-limits-scalability.adoc#rosa-limits-scalability[Limits and scalability] -======= ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) - * link:https://aws.amazon.com/ec2/instance-types[AWS Instance Types] \ No newline at end of file diff --git a/rosa_architecture/rosa_policy_service_definition/rosa-policy-responsibility-matrix.adoc b/rosa_architecture/rosa_policy_service_definition/rosa-policy-responsibility-matrix.adoc index fdb31f168e..fd99af76da 100644 --- a/rosa_architecture/rosa_policy_service_definition/rosa-policy-responsibility-matrix.adoc +++ b/rosa_architecture/rosa_policy_service_definition/rosa-policy-responsibility-matrix.adoc @@ -42,9 +42,4 @@ include::modules/rosa-policy-disaster-recovery.adoc[leveloffset=+1] * xref:../../rosa_cluster_admin/rosa_nodes/rosa-nodes-machinepools-about.adoc#rosa-nodes-machinepools-about[About machine pools] -include::modules/rosa-policy-customer-responsibility.adoc[leveloffset=+1] - -[role="_additional-resources"] -== Additional resources - -* For more information about Red{nbsp}Hat site reliability engineering (SRE) teams access, see xref:../../rosa_architecture/rosa_policy_service_definition/rosa-sre-access.adoc#rosa-policy-identity-access-management_rosa-sre-access[Identity and access management]. +include::modules/rosa-policy-customer-responsibility.adoc[leveloffset=+1] \ No newline at end of file diff --git a/rosa_architecture/rosa_policy_service_definition/rosa-service-definition.adoc b/rosa_architecture/rosa_policy_service_definition/rosa-service-definition.adoc index b83d545fd0..ddfb568ffe 100644 --- a/rosa_architecture/rosa_policy_service_definition/rosa-service-definition.adoc +++ b/rosa_architecture/rosa_policy_service_definition/rosa-service-definition.adoc @@ -19,13 +19,6 @@ include::modules/rosa-sdpolicy-am-cluster-self-service.adoc[leveloffset=+2] [role="_additional-resources"] .Additional resources -ifdef::openshift-rosa-hcp[] -* link:https://docs.openshift.com/rosa/rosa_architecture/rosa_policy_service_definition/rosa-service-definition.html#rosa-sdpolicy-red-hat-operator_rosa-service-definition[Red{nbsp}Hat Operator Support] -endif::openshift-rosa-hcp[] -ifndef::openshift-rosa-hcp[] -* xref:../../rosa_architecture/rosa_policy_service_definition/rosa-service-definition.adoc#rosa-sdpolicy-red-hat-operator_rosa-service-definition[Red{nbsp}Hat Operator Support] -endif::openshift-rosa-hcp[] - ifdef::openshift-rosa-hcp[] * link:https://docs.openshift.com/rosa/rosa_cluster_admin/rosa-configuring-pid-limits.html#rosa-configuring-pid-limits[Configuring PID limits] endif::openshift-rosa-hcp[] @@ -38,7 +31,6 @@ include::modules/rosa-sdpolicy-instance-types.adoc[leveloffset=+2] [role="_additional-resources"] .Additional resources -<<<<<<< HEAD For a detailed listing of supported instance types, see ifdef::openshift-rosa-hcp[] xref:../rosa_policy_service_definition/rosa-hcp-instance-types.adoc#rosa-instance-types[{product-title} instance types].endif::openshift-rosa-hcp[] @@ -46,21 +38,11 @@ ifndef::openshift-rosa-hcp[] xref:../rosa_policy_service_definition/rosa-instance-types.adoc#rosa-instance-types[{product-title} instance types]. endif::openshift-rosa-hcp[] ======= -<<<<<<< HEAD * xref:../rosa_policy_service_definition/rosa-instance-types.adoc#rosa-instance-types[{product-title} instance types] * xref:../../rosa_planning/rosa-limits-scalability.adoc#rosa-limits-scalability[Limits and scalability] -======= For a detailed listing of supported instance types, see -ifdef::openshift-rosa-hcp[] -link:https://docs.openshift.com/rosa/rosa_architecture/rosa_policy_service_definition/rosa-hcp-instance-types.html#rosa-hcp-instance-types[{product-title} instance types]. -endif::openshift-rosa-hcp[] -ifndef::openshift-rosa-hcp[] -xref:../rosa_policy_service_definition/rosa-instance-types.adoc#rosa-instance-types[{product-title} instance types]. -endif::openshift-rosa-hcp[] ->>>>>>> 42a0000f89 (Upgrading ROSA with HCP updates) ->>>>>>> 8c77e6d308 (Upgrading ROSA with HCP updates) include::modules/rosa-sdpolicy-am-regions-az.adoc[leveloffset=+2] diff --git a/rosa_learning/creating_cluster_workshop/cloud-experts-getting-started-managing-worker-nodes.adoc b/rosa_learning/creating_cluster_workshop/cloud-experts-getting-started-managing-worker-nodes.adoc index daee316650..2314c00890 100644 --- a/rosa_learning/creating_cluster_workshop/cloud-experts-getting-started-managing-worker-nodes.adoc +++ b/rosa_learning/creating_cluster_workshop/cloud-experts-getting-started-managing-worker-nodes.adoc @@ -9,12 +9,8 @@ toc::[] //rosaworkshop.io content metadata //Brought into ROSA product docs 2023-11-30 -ifndef::openshift-rosa-hcp[] -In {product-title} (ROSA), changing aspects of your worker nodes is performed through the use of machine pools. A machine pool allows users to manage many machines as a single entity. Every ROSA cluster has a default machine pool that is created when the cluster is created. For more information, see the xref:../../rosa_cluster_admin/rosa_nodes/rosa-nodes-machinepools-about.adoc#rosa-nodes-machinepools-about[machine pool] documentation. -endif::openshift-rosa-hcp[] -ifdef::openshift-rosa-hcp[] In {product-title} (ROSA), changing aspects of your worker nodes is performed through the use of machine pools. A machine pool allows users to manage many machines as a single entity. Every ROSA cluster has a default machine pool that is created when the cluster is created. - +ifdef::openshift-rosa-hcp[] For more information, see the link:https://docs.openshift.com/rosa/rosa_cluster_admin/rosa_nodes/rosa-nodes-machinepools-about.html[machine pool] documentation. endif::openshift-rosa-hcp[] @@ -215,23 +211,9 @@ rosa create machinepool --cluster= --name= --replicas=>>>>>> 72fb79ab1d (Updated the HCP migration to include the ROSA Tutorals and Learning sections) -======= -. To see all the xref:../../rosa_architecture/rosa_policy_service_definition/rosa-service-definition.adoc#rosa-sdpolicy-aws-instance-types_rosa-service-definition[instance types available], run the following command: ->>>>>>> e3ad7b9382 (OSDOCS-11269) -======= -ifdef::openshift-rosa-hcp[] -. To see all the link:https://docs.openshift.com/rosa/rosa_architecture/rosa_policy_service_definition/rosa-instance-types.html[instance types available], run the following command: -endif::openshift-rosa-hcp[] ->>>>>>> 7aaa9ed78d (Updated the HCP migration to include the ROSA Tutorals and Learning sections) + [source,terminal] ---- diff --git a/rosa_release_notes/rosa-release-notes.adoc b/rosa_release_notes/rosa-release-notes.adoc index 8c4fa91ca0..decd7716f1 100644 --- a/rosa_release_notes/rosa-release-notes.adoc +++ b/rosa_release_notes/rosa-release-notes.adoc @@ -25,13 +25,9 @@ toc::[] [id="rosa-q3-2024_{context}"] === Q3 2024 -<<<<<<< HEAD * **{hcp-title} multi-architecture cluster update.** {hcp-title-first} clusters created before 25 July, 2024 will migrate to a multi-architecture image on their next upgrade allowing you to use {AWS} Arm-based Graviton instance types for your workloads. For more information, see xref:../upgrading/rosa-hcp-upgrading.adoc#rosa-upgrade-options_rosa-hcp-upgrading[Upgrading ROSA with HCP clusters]. * **{hcp-title} cluster node limit update.** {hcp-title} clusters can now scale to 500 worker nodes. This is an increase from the previous limit of 250 nodes. The 250 node limit is an increase from the previous limit 90 nodes on 26 August, 2024. For more information, see xref:../rosa_planning/rosa-hcp-limits-scalability.adoc#tested-cluster-maximums-hcp-sd_rosa-hcp-limits-scalability[ROSA with HCP cluster maximums]. -======= -* **{hcp-title} cluster node limit update.** {hcp-title} clusters can now scale to 250 worker nodes. This is an increase from the previous limit of 180 nodes. For more information, see xref:../rosa_planning/rosa-hcp-limits-scalability.adoc#tested-cluster-maximums-hcp-sd_rosa-hcp-limits-scalability[ROSA with HCP limits and scalability]. ->>>>>>> c17ffd7cec (Adding the Security HCP cherrypick) * **IMDSv2 support in {hcp-title}.** You can now enforce the use of the IMDSv2 endpoint for default machine pool worker nodes on new {hcp-title} clusters and for new machine pools on existing clusters. For more information, see xref:../rosa_hcp/terraform/rosa-hcp-creating-a-cluster-quickly-terraform.adoc#rosa-hcp-creating-a-cluster-quickly-terraform[Creating a default ROSA cluster using Terraform]. diff --git a/snippets/ibu-ImageBasedGroupUpgrade.adoc b/snippets/ibu-ImageBasedGroupUpgrade.adoc index e9fd8b1bfd..b19b03af74 100644 --- a/snippets/ibu-ImageBasedGroupUpgrade.adoc +++ b/snippets/ibu-ImageBasedGroupUpgrade.adoc @@ -21,6 +21,7 @@ spec: pullSecretRef: name: "" extraManifests: # <3> + - name: example-extra-manifests namespace: openshift-lifecycle-agent oadpContent: # <4> - name: oadp-cm diff --git a/snippets/ztp_PtpConfigDualCardGmWpc.yaml b/snippets/ztp_PtpConfigDualCardGmWpc.yaml index ba985821df..8fb6b3aea6 100644 --- a/snippets/ztp_PtpConfigDualCardGmWpc.yaml +++ b/snippets/ztp_PtpConfigDualCardGmWpc.yaml @@ -89,11 +89,11 @@ spec: - "-p" - "MON-HW" reportOutput: true - - args: #ubxtool -P 29.20 -p CFG-MSG,1,38,300 + - args: #ubxtool -P 29.20 -p CFG-MSG,1,38,248 - "-P" - "29.20" - "-p" - - "CFG-MSG,1,38,300" + - "CFG-MSG,1,38,248" reportOutput: true ts2phcOpts: " " ts2phcConf: | diff --git a/snippets/ztp_PtpConfigGmWpc.yaml b/snippets/ztp_PtpConfigGmWpc.yaml index b084c9aba8..a1f133825c 100644 --- a/snippets/ztp_PtpConfigGmWpc.yaml +++ b/snippets/ztp_PtpConfigGmWpc.yaml @@ -82,11 +82,11 @@ spec: - "-p" - "MON-HW" reportOutput: true - - args: #ubxtool -P 29.20 -p CFG-MSG,1,38,300 + - args: #ubxtool -P 29.20 -p CFG-MSG,1,38,248 - "-P" - "29.20" - "-p" - - "CFG-MSG,1,38,300" + - "CFG-MSG,1,38,248" reportOutput: true ts2phcOpts: " " ts2phcConf: | diff --git a/upgrading/rosa-hcp-upgrading.adoc b/upgrading/rosa-hcp-upgrading.adoc index e07da89658..a9ba2225d2 100644 --- a/upgrading/rosa-hcp-upgrading.adoc +++ b/upgrading/rosa-hcp-upgrading.adoc @@ -8,8 +8,8 @@ toc::[] include::modules/rosa-hcp-upgrade-options.adoc[leveloffset=+1] -.Additional resources -* xref:../cli_reference/rosa_cli/rosa-manage-objects-cli.adoc#rosa-edit-machinepool_rosa-managing-objects-cli[ROSA CLI reference: `rosa edit machinepool`] +// .Additional resources +// * ../cli_reference/rosa_cli/rosa-manage-objects-cli.adoc#rosa-edit-machinepool_rosa-managing-objects-cli[ROSA CLI reference: `rosa edit machinepool`] //This cannot be a module if we want to use the xrefs [id="rosa-lifecycle-policy_{context}"] @@ -47,10 +47,21 @@ Upgrading the entire cluster involves upgrading both the hosted control plane an * You have installed and configured the latest version of the ROSA CLI. * No other upgrades are in progress or scheduled to take place at the same time as this upgrade. + +ifdef::context[:prevcontext: {context}] +:context: rosa-hcp-upgrading-whole-cluster + include::modules/rosa-hcp-upgrading-cli-control-plane.adoc[leveloffset=+2] + +ifdef::prevcontext[:context: {prevcontext}] +ifdef::context[:prevcontext: {context}] + +:context: rosa-hcp-upgrading-whole-cluster + include::modules/rosa-hcp-upgrading-cli-machinepool.adoc[leveloffset=+2] -include::modules/rosa-hcp-upgrading-cli-tutorial.adoc[leveloffset=+1] ifdef::prevcontext[:context: {prevcontext}] ifndef::prevcontext[:!context:] +//LB: Remove until here if we don't want the "whole cluster" upgrade section + include::modules/rosa-hcp-upgrading-cli-tutorial.adoc[leveloffset=+1]