From 2b6ff1704a4e70f365e1684a79c06a02d91c0468 Mon Sep 17 00:00:00 2001 From: Michael Ryan Peter Date: Fri, 19 Sep 2025 15:13:29 -0400 Subject: [PATCH] OSDOCS#15857: Change *OperatorHub* to *Software Catalog* &tc. --- _topic_maps/_topic_map.yml | 2 +- _topic_maps/_topic_map_osd.yml | 2 +- _topic_maps/_topic_map_rosa.yml | 2 +- _topic_maps/_topic_map_rosa_hcp.yml | 2 +- ...arted-with-service-binding-ibm-power-ibm-z.adoc | 2 +- .../getting-started-with-service-binding.adoc | 6 +++--- applications/odc-exporting-applications.adoc | 2 +- architecture/control-plane.adoc | 2 +- architecture/index.adoc | 2 +- architecture/understanding-development.adoc | 4 ++-- .../aws-sts/oadp-aws-sts.adoc | 6 +++--- .../installing/installing-oadp-aws.adoc | 2 +- .../installing/installing-oadp-azure.adoc | 2 +- .../installing/installing-oadp-gcp.adoc | 2 +- .../installing/installing-oadp-kubevirt.adoc | 2 +- .../installing/installing-oadp-mcg.adoc | 2 +- .../installing/installing-oadp-ocs.adoc | 2 +- .../oadp-rosa-backing-up-applications.adoc | 2 +- .../creating-applications-with-cicd-pipelines.adoc | 2 +- ...erts-deploying-application-integrating-aws.adoc | 4 ++-- .../cloud-experts-external-dns.adoc | 2 +- .../cloud-experts-using-aws-ack.adoc | 6 +++--- .../installing-mirroring-installation-images.adoc | 2 +- .../updating/disconnected-update-osus.adoc | 2 +- disconnected/updating/disconnected-update.adoc | 2 +- disconnected/using-olm.adoc | 10 +++++----- metering/metering-installing-metering.adoc | 4 ++-- metering/metering-upgrading-metering.adoc | 8 ++++---- .../microshift-operators-olm.adoc | 4 ++-- modules/about-administrator_web-console.adoc | 2 +- modules/arch-olm-operators.adoc | 6 +++--- modules/architecture-platform-benefits.adoc | 2 +- modules/cco-short-term-creds-aws-olm.adoc | 2 +- modules/cco-short-term-creds-azure-olm.adoc | 2 +- modules/cco-short-term-creds-gcp-olm.adoc | 2 +- modules/cert-manager-install-console.adoc | 4 ++-- modules/cert-manager-uninstall-console.adoc | 2 +- modules/cli-manager-adding-plugin-yaml.adoc | 4 ++-- modules/cli-manager-installing.adoc | 6 +++--- modules/cli-manager-uninstalling.adoc | 2 +- .../cluster-logging-updating-logging-to-5-1.adoc | 8 ++++---- .../cluster-logging-upgrading-elasticsearch.adoc | 8 ++++---- ...stalling-lifecycle-agent-using-web-console.adoc | 8 ++++---- ...installing-numa-resources-operator-console.adoc | 6 +++--- ...lifecycle-manager-installation-web-console.adoc | 6 +++--- .../compliance-operator-console-installation.adoc | 6 +++--- modules/compliance-operator-hcp-install.adoc | 2 +- modules/compliance-operator-uninstall.adoc | 6 +++--- modules/coo-dashboard-ui-plugin-install.adoc | 4 ++-- .../coo-distributed-tracing-ui-plugin-install.adoc | 4 ++-- modules/coo-logging-ui-plugin-install.adoc | 2 +- modules/coo-monitoring-ui-plugin-install.adoc | 4 ++-- modules/coo-troubleshooting-ui-plugin-install.adoc | 4 ++-- modules/create-lokistack-cr-console.adoc | 2 +- modules/creating-nfd-cr-web-console.adoc | 2 +- modules/das-operator-installing-web-console.adoc | 14 +++++++------- modules/das-operator-uninstalling-web-console.adoc | 6 +++--- modules/dedicated-cluster-install-deploy.adoc | 14 +++++++------- .../distr-tracing-config-security-ossm-web.adoc | 2 +- modules/distr-tracing-deploy-default.adoc | 2 +- modules/distr-tracing-deploy-production-es.adoc | 2 +- modules/distr-tracing-deploy-streaming.adoc | 2 +- modules/distr-tracing-install-elasticsearch.adoc | 2 +- modules/distr-tracing-install-jaeger-operator.adoc | 6 +++--- modules/distr-tracing-install-overview.adoc | 4 ++-- modules/distr-tracing-removing-instance.adoc | 2 +- ...-tempo-install-tempomonolithic-web-console.adoc | 4 ++-- ...acing-tempo-install-tempostack-web-console.adoc | 4 ++-- .../distr-tracing-tempo-install-web-console.adoc | 2 +- .../distr-tracing-tempo-remove-web-console.adoc | 2 +- ...th-check-operator-installation-web-console.adoc | 6 +++--- ...ntenance-operator-installation-web-console.adoc | 8 ++++---- ...o-resuming-node-maintenance-cr-web-console.adoc | 4 ++-- ...ediation-operator-installation-web-console.adoc | 6 +++--- ...co-setting-node-maintenance-cr-web-console.adoc | 4 ++-- modules/enabling-log-console-plugin.adoc | 2 +- .../external-secrets-operator-install-console.adoc | 4 ++-- ...xternal-secrets-operator-uninstall-console.adoc | 2 +- ...-integrity-operator-installing-web-console.adoc | 6 +++--- modules/fio-uninstall-console.adoc | 4 ++-- modules/gitops-argo-cd-installation.adoc | 4 ++-- ...ps-creating-rolloutmanager-custom-resource.adoc | 2 +- ...ps-deleting-rolloutmanager-custom-resource.adoc | 2 +- modules/gitops-release-notes-1-3-2.adoc | 4 ++-- modules/go-health-monitoring.adoc | 4 ++-- modules/go-uninstalling-gitops-operator.adoc | 2 +- modules/hcp-aws-prereqs.adoc | 2 +- modules/hcp-bm-prereqs.adoc | 2 +- modules/hcp-cli-console.adoc | 2 +- modules/hcp-cli-gateway.adoc | 2 +- modules/hcp-cli-terminal.adoc | 2 +- modules/hcp-dc-mgmt-cluster.adoc | 4 ++-- modules/hcp-get-upgrade-versions.adoc | 2 +- modules/hcp-ibm-power-prereqs.adoc | 2 +- modules/hcp-ibm-z-prereqs.adoc | 4 ++-- modules/hcp-non-bm-prereqs.adoc | 4 ++-- .../hosted-control-planes-concepts-personas.adoc | 2 +- modules/hosted-control-planes-version-support.adoc | 2 +- modules/ibi-install-lcao-console.adoc | 8 ++++---- modules/installing-aws-load-balancer-operator.adoc | 4 ++-- .../installing-gitops-operator-in-web-console.adoc | 2 +- modules/installing-gitops-operator-using-cli.adoc | 6 +++--- modules/installing-oadp-aws-sts.adoc | 4 ++-- modules/installing-oadp-rosa-sts.adoc | 4 ++-- modules/installing-operator-oadp.adoc | 4 ++-- modules/installing-wmco-using-cli.adoc | 2 +- modules/installing-wmco-using-web-console.adoc | 2 +- ...installing-the-kubernetes-nmstate-operator.adoc | 2 +- modules/kmm-hub-running-kmm-on-the-spoke.adoc | 2 +- modules/kmm-installing-using-web-console.adoc | 6 +++--- modules/log6x-quickstart-opentelemetry.adoc | 2 +- modules/log6x-quickstart-viaq.adoc | 2 +- modules/logging-create-loki-cr-console.adoc | 2 +- modules/logging-es-deploy-console.adoc | 10 +++++----- ...nstance-by-using-the-argo-cd-admin-account.adoc | 2 +- modules/logging-install-es-operator.adoc | 4 ++-- modules/logging-loki-gui-install.adoc | 10 +++++----- modules/logging-upgrading-clo.adoc | 6 +++--- modules/logging-upgrading-loki.adoc | 6 +++--- ...ms-creating-lvms-cluster-using-web-console.adoc | 2 +- ...lvms-deleting-lvmcluster-using-web-console.adoc | 2 +- ...nager-operator-using-openshift-web-console.adoc | 4 ++-- ...ling-storage-of-clusters-using-web-console.adoc | 4 ++-- ...nager-operator-using-openshift-web-console.adoc | 4 ++-- modules/metallb-installing-using-web-console.adoc | 8 ++++---- modules/metering-debugging.adoc | 2 +- modules/metering-install-operator.adoc | 4 ++-- modules/metering-install-verify.adoc | 2 +- modules/migration-error-messages.adoc | 2 +- modules/migration-installing-mtc-on-ocp-4.adoc | 2 +- modules/migration-upgrading-mtc-on-ocp-4.adoc | 4 ++-- ...servability-operator-using-the-web-console.adoc | 6 +++--- ...servability-operator-using-the-web-console.adoc | 6 +++--- ...ating-podplacment-config-using-web-console.adoc | 2 +- ...eting-podplacment-config-using-web-console.adoc | 2 +- modules/multi-arch-installing-using-cli.adoc | 2 +- .../multi-arch-installing-using-web-console.adoc | 6 +++--- .../multi-arch-uninstalling-using-web-console.adoc | 6 +++--- modules/nbde-tang-server-operator-deploying.adoc | 4 ++-- ...e-tang-server-operator-identifying-url-cli.adoc | 2 +- ...erver-operator-identifying-url-web-console.adoc | 4 ++-- .../nbde-tang-server-operator-installing-cli.adoc | 6 +++--- ...ang-server-operator-installing-web-console.adoc | 8 ++++---- modules/network-observability-RTT.adoc | 4 ++-- .../network-observability-SRIOV-configuration.adoc | 4 ++-- ...k-observability-configuring-custom-metrics.adoc | 2 +- ...ervability-creating-metrics-network-events.adoc | 4 ++-- ...etwork-observability-deploy-network-policy.adoc | 4 ++-- ...work-observability-disabling-health-alerts.adoc | 4 ++-- modules/network-observability-dns-tracking.adoc | 4 ++-- .../network-observability-ebpf-agent-alert.adoc | 2 +- ...etwork-observability-ebpf-manager-operator.adoc | 6 +++--- modules/network-observability-enriched-flows.adoc | 2 +- .../network-observability-filtering-ebpf-rule.adoc | 2 +- ...k-observability-flowcollector-kafka-config.adoc | 4 ++-- .../network-observability-flowcollector-view.adoc | 2 +- .../network-observability-flowmetrics-charts.adoc | 2 +- modules/network-observability-loki-install.adoc | 6 +++--- .../network-observability-lokistack-create.adoc | 2 +- ...etwork-observability-netobserv-cli-install.adoc | 2 +- .../network-observability-operator-install.adoc | 8 ++++---- .../network-observability-operator-uninstall.adoc | 4 ++-- modules/network-observability-packet-drops.adoc | 4 ++-- .../network-observability-packet-translation.adoc | 2 +- ...iguring-ipsec-with-flow-collector-resource.adoc | 2 +- .../network-observability-tcp-flag-syn-flood.adoc | 4 ++-- ...twork-observability-viewing-network-events.adoc | 2 +- ...observability-virtualization-configuration.adoc | 2 +- ...k-observability-working-with-conversations.adoc | 2 +- .../network-observability-working-with-zones.adoc | 4 ++-- .../node-observability-install-web-console.adoc | 4 ++-- ...s-cluster-resource-override-deploy-console.adoc | 2 +- modules/nodes-cma-autoscaling-custom-install.adoc | 2 +- .../nodes-cma-autoscaling-custom-uninstalling.adoc | 4 ++-- ...nodes-cma-autoscaling-keda-controller-edit.adoc | 2 +- modules/nodes-descheduler-installing.adoc | 6 +++--- modules/nodes-descheduler-uninstalling.adoc | 4 ++-- .../nodes-pods-vertical-autoscaler-install.adoc | 2 +- .../nodes-pods-vertical-autoscaler-uninstall.adoc | 2 +- ...es-secondary-scheduler-configuring-console.adoc | 2 +- .../nodes-secondary-scheduler-install-console.adoc | 4 ++-- ...odes-secondary-scheduler-uninstall-console.adoc | 2 +- ...loying-the-node-feature-discovery-operator.adoc | 4 ++-- modules/nw-autoscaling-ingress-controller.adoc | 2 +- modules/nw-aws-load-balancer-operator.adoc | 2 +- modules/nw-bpfman-operator-installing-console.adoc | 4 ++-- modules/nw-dpu-installing-operator-ui.adoc | 4 ++-- modules/nw-external-dns-operator.adoc | 2 +- modules/nw-infw-operator-installing-console.adoc | 4 ++-- modules/nw-installing-external-dns-operator.adoc | 6 +++--- modules/nw-metalLB-basic-upgrade-operator.adoc | 2 +- modules/nw-metallb-installing-operator-cli.adoc | 4 ++-- .../nw-ptp-installing-operator-web-console.adoc | 6 +++--- modules/nw-sriov-installing-operator.adoc | 4 ++-- modules/oadp-installing-dpa-1-3.adoc | 2 +- modules/oadp-installing-operator.adoc | 4 ++-- ...ng-a-binding-connection-between-components.adoc | 6 +++--- .../olm-accessing-images-private-registries.adoc | 2 +- modules/olm-approving-pending-upgrade.adoc | 4 ++-- modules/olm-catalogsource.adoc | 2 +- modules/olm-changing-update-channel.adoc | 4 ++-- modules/olm-creating-catalog-from-index.adoc | 4 ++-- .../olm-creating-etcd-cluster-from-operator.adoc | 2 +- modules/olm-cs-health.adoc | 2 +- ...operators-from-a-cluster-using-web-console.adoc | 2 +- modules/olm-deprecations-schema.adoc | 4 ++-- modules/olm-filtering-fbc.adoc | 2 +- .../olm-installing-from-operatorhub-using-cli.adoc | 8 ++++---- ...talling-from-operatorhub-using-web-console.adoc | 10 +++++----- .../olm-installing-operators-from-operatorhub.adoc | 10 +++++----- modules/olm-mirroring-catalog-airgapped.adoc | 2 +- modules/olm-mirroring-catalog-post.adoc | 2 +- modules/olm-operator-framework.adoc | 4 ++-- modules/olm-operatorgroups-limitations.adoc | 4 ++-- modules/olm-operatorhub-architecture.adoc | 6 +++--- modules/olm-operatorhub-overview.adoc | 8 ++++---- modules/olm-overriding-proxy-settings.adoc | 2 +- modules/olm-refresh-subs.adoc | 2 +- modules/olm-reinstall.adoc | 4 ++-- ...estricted-networks-configuring-operatorhub.adoc | 6 +++--- modules/olm-updating-index-image.adoc | 2 +- ...stalling-pipelines-operator-in-web-console.adoc | 4 ++-- ...nstalling-pipelines-operator-using-the-cli.adoc | 4 ++-- ...talling-sbo-operator-using-the-web-console.adoc | 4 ++-- .../op-uninstalling-the-pipelines-operator.adoc | 2 +- modules/optional-capabilities-operators.adoc | 2 +- modules/osd-intro.adoc | 2 +- ...m-add-project-member-roll-resource-console.adoc | 4 ++-- ...-add-project-using-label-selectors-console.adoc | 4 ++-- ...-adding-project-using-smm-resource-console.adoc | 2 +- ...-control-plane-infrastructure-node-console.adoc | 4 ++-- modules/ossm-config-disable-networkpolicy.adoc | 2 +- modules/ossm-config-enabling-controlplane.adoc | 2 +- modules/ossm-config-external-jaeger.adoc | 2 +- ...-control-plane-infrastructure-node-console.adoc | 4 ++-- modules/ossm-config-mtls-min-max.adoc | 2 +- modules/ossm-config-sampling.adoc | 2 +- modules/ossm-config-sec-mtls-mesh.adoc | 2 +- modules/ossm-config-web-console.adoc | 2 +- modules/ossm-configuring-jaeger-v1x.adoc | 2 +- modules/ossm-control-plane-deploy-1x.adoc | 2 +- modules/ossm-control-plane-remove.adoc | 2 +- modules/ossm-control-plane-web.adoc | 2 +- ...idecar-injection-cluster-wide-mesh-console.adoc | 2 +- ...-deploy-cluster-wide-control-plane-console.adoc | 2 +- ...-namespaces-from-cluster-wide-mesh-console.adoc | 4 ++-- modules/ossm-federation-config-smcp.adoc | 2 +- modules/ossm-federation-create-export.adoc | 2 +- modules/ossm-federation-create-import.adoc | 2 +- modules/ossm-federation-create-meshPeer.adoc | 2 +- modules/ossm-install-kiali.adoc | 2 +- modules/ossm-install-ossm-operator.adoc | 4 ++-- modules/ossm-jaeger-config-elasticsearch-v1x.adoc | 2 +- modules/ossm-member-roll-create.adoc | 2 +- modules/ossm-member-roll-modify.adoc | 2 +- modules/ossm-migrating-to-20.adoc | 2 +- modules/ossm-recommended-resources.adoc | 2 +- modules/ossm-remove-operators.adoc | 2 +- modules/ossm-rn-new-features.adoc | 2 +- modules/ossm-troubleshooting-operators.adoc | 2 +- modules/ossm-tutorial-bookinfo-install.adoc | 2 +- modules/ossm-tutorial-bookinfo-removing.adoc | 2 +- modules/ossm-upgrading-smcp.adoc | 2 +- modules/ossm-validating-smcp.adoc | 8 ++++---- modules/otel-install-web-console.adoc | 6 +++--- modules/otel-remove-web-console.adoc | 2 +- .../persistent-storage-csi-gcp-file-install.adoc | 4 ++-- ...ersistent-storage-csi-olm-operator-install.adoc | 2 +- ...sistent-storage-csi-olm-operator-uninstall.adoc | 2 +- ...t-storage-csi-secrets-store-driver-install.adoc | 2 +- ...storage-csi-secrets-store-driver-uninstall.adoc | 2 +- modules/persistent-storage-local-discovery.adoc | 4 ++-- modules/persistent-storage-local-install.adoc | 2 +- modules/persistent-storage-local-metrics.adoc | 2 +- ...ersistent-storage-local-uninstall-operator.adoc | 2 +- modules/pipelines-web-console.adoc | 2 +- modules/power-monitoring-deleting-kepler.adoc | 2 +- ...ing-deleting-power-monitor-custom-resource.adoc | 4 ++-- ...ng-deploying-power-monitor-custom-resource.adoc | 2 +- modules/power-monitoring-installing-pmo.adoc | 6 +++--- modules/power-monitoring-uninstalling-pmo.adoc | 4 ++-- modules/psap-driver-toolkit.adoc | 2 +- ...installing-node-feature-discovery-operator.adoc | 6 +++--- modules/red-hat-marketplace-features.adoc | 4 ++-- modules/removing-cso-operator.adoc | 4 ++-- modules/removing-devworkspace-operator.adoc | 2 +- modules/removing-web-terminal-operator.adoc | 2 +- modules/rodoo-install-operator.adoc | 4 ++-- modules/rodoo-uninstall-operator.adoc | 4 ++-- modules/rosa-policy-incident.adoc | 2 +- modules/rosa-sdpolicy-platform.adoc | 4 +++- modules/rosa-sdpolicy-security.adoc | 2 +- .../sd-nodes-cma-autoscaling-custom-install.adoc | 2 +- modules/sdpolicy-platform.adoc | 2 +- modules/sdpolicy-security.adoc | 2 +- modules/security-pod-scan-cso.adoc | 2 +- .../serverless-creating-a-kafka-event-sink.adoc | 2 +- modules/serverless-install-cli.adoc | 6 +++--- .../serverless-install-eventing-web-console.adoc | 2 +- modules/serverless-install-kafka-odc.adoc | 2 +- .../serverless-install-serving-web-console.adoc | 2 +- modules/serverless-install-web-console.adoc | 4 ++-- modules/serverless-web-console.adoc | 2 +- modules/spo-installing.adoc | 8 ++++---- modules/spo-uninstall-console.adoc | 6 +++--- ...ility-controller-manager-pod-out-of-memory.adoc | 4 ++-- ...work-observability-loki-resource-exhausted.adoc | 2 +- ...twork-observability-loki-tenant-rate-limit.adoc | 2 +- modules/understanding-openshift.adoc | 4 ++-- modules/uninstall-cluster-logging-operator.adoc | 2 +- modules/uninstall-es-operator.adoc | 2 +- modules/uninstall-loki-operator.adoc | 2 +- modules/uninstalling-wmco.adoc | 2 +- modules/update-conditional-web-console.adoc | 4 ++-- .../update-service-create-service-web-console.adoc | 2 +- .../update-service-delete-service-web-console.adoc | 2 +- modules/update-service-install-web-console.adoc | 4 ++-- modules/update-service-uninstall-web-console.adoc | 2 +- modules/update-upgrading-web.adoc | 2 +- ...dating-control-plane-only-layered-products.adoc | 2 +- modules/virt-changing-update-settings.adoc | 2 +- modules/virt-creating-fusionaccess-cr.adoc | 2 +- .../virt-deleting-deployment-custom-resource.adoc | 4 ++-- .../virt-installing-fusion-access-operator.adoc | 6 +++--- modules/virt-installing-virt-operator.adoc | 2 +- modules/wmco-upgrades-eus-using-web-console.adoc | 2 +- modules/zero-trust-manager-install-console.adoc | 6 +++--- modules/zero-trust-manager-uninstall-console.adoc | 2 +- modules/ztp-lvms-installing-lvms-web-console.adoc | 8 ++++---- .../install-aws-load-balancer-operator.adoc | 4 ++-- .../understanding-aws-load-balancer-operator.adoc | 4 ++-- ...talling-the-cluster-observability-operator.adoc | 4 ++-- .../distr-tracing-tempo-installing.adoc | 2 +- observability/logging/logging-6.0/log6x-about.adoc | 2 +- observability/otel/otel-installing.adoc | 2 +- .../admin/olm-adding-operators-to-cluster.adoc | 4 ++-- operators/index.adoc | 4 ++-- operators/operator-reference.adoc | 2 +- .../olm-understanding-operatorhub.adoc | 2 +- .../understanding/olm/olm-understanding-olm.adoc | 2 +- .../olm-installing-operators-in-namespace.adoc | 2 +- osd_cluster_admin/dedicated-admin-role.adoc | 4 ++-- .../multiarch-tuning-operator.adoc | 2 +- post_installation_configuration/index.adoc | 2 +- .../preparing-for-users.adoc | 4 ++-- rosa_hcp/rosa-hcp-egress-zero-install.adoc | 4 ++-- .../compliance-operator-release-notes.adoc | 2 +- .../install/preparing-serverless-install.adoc | 2 +- service_mesh/v1x/installing-ossm.adoc | 2 +- snippets/olmv1-cli-only.adoc | 2 +- .../troubleshooting-operator-issues.adoc | 2 +- .../updating_a_cluster/updating-cluster-cli.adoc | 2 +- .../virt-backup-restore-overview.adoc | 2 +- virt/install/installing-virt.adoc | 2 +- .../virt-high-availability-for-vms.adoc | 2 +- virt/nodes/virt-node-maintenance.adoc | 2 +- .../virt-post-install-network-config.adoc | 2 +- web_console/capabilities_products-web-console.adoc | 4 ++-- .../web_terminal/installing-web-terminal.adoc | 6 +++--- welcome/oke_about.adoc | 2 +- whats_new/new-features.adoc | 12 ++++++------ .../enabling-windows-container-workloads.adoc | 2 +- 362 files changed, 617 insertions(+), 615 deletions(-) diff --git a/_topic_maps/_topic_map.yml b/_topic_maps/_topic_map.yml index 5c25579675..45f17c4c1a 100644 --- a/_topic_maps/_topic_map.yml +++ b/_topic_maps/_topic_map.yml @@ -1967,7 +1967,7 @@ Topics: File: olm-understanding-metrics - Name: Webhooks File: olm-webhooks - - Name: OperatorHub + - Name: Software catalog Distros: openshift-enterprise,openshift-origin File: olm-understanding-operatorhub - Name: Red Hat-provided Operator catalogs diff --git a/_topic_maps/_topic_map_osd.yml b/_topic_maps/_topic_map_osd.yml index 41c7462cee..ebe68a18d8 100644 --- a/_topic_maps/_topic_map_osd.yml +++ b/_topic_maps/_topic_map_osd.yml @@ -689,7 +689,7 @@ Topics: File: olm-understanding-metrics - Name: Webhooks File: olm-webhooks - - Name: OperatorHub + - Name: Software catalog File: olm-understanding-operatorhub - Name: Red Hat-provided Operator catalogs File: olm-rh-catalogs diff --git a/_topic_maps/_topic_map_rosa.yml b/_topic_maps/_topic_map_rosa.yml index 21f7bf0cb8..81ad194ac4 100644 --- a/_topic_maps/_topic_map_rosa.yml +++ b/_topic_maps/_topic_map_rosa.yml @@ -895,7 +895,7 @@ Topics: File: olm-understanding-metrics - Name: Webhooks File: olm-webhooks - - Name: OperatorHub + - Name: Software catalog File: olm-understanding-operatorhub - Name: Red Hat-provided Operator catalogs File: olm-rh-catalogs diff --git a/_topic_maps/_topic_map_rosa_hcp.yml b/_topic_maps/_topic_map_rosa_hcp.yml index 38cc8954ea..3d3da2711f 100644 --- a/_topic_maps/_topic_map_rosa_hcp.yml +++ b/_topic_maps/_topic_map_rosa_hcp.yml @@ -757,7 +757,7 @@ Topics: File: olm-understanding-metrics - Name: Webhooks File: olm-webhooks - - Name: OperatorHub + - Name: Software catalog File: olm-understanding-operatorhub - Name: Red Hat-provided Operator catalogs File: olm-rh-catalogs diff --git a/applications/connecting_applications_to_services/getting-started-with-service-binding-ibm-power-ibm-z.adoc b/applications/connecting_applications_to_services/getting-started-with-service-binding-ibm-power-ibm-z.adoc index bd200f551a..11c1f80362 100644 --- a/applications/connecting_applications_to_services/getting-started-with-service-binding-ibm-power-ibm-z.adoc +++ b/applications/connecting_applications_to_services/getting-started-with-service-binding-ibm-power-ibm-z.adoc @@ -16,7 +16,7 @@ The {servicebinding-title} manages the data plane for workloads and backing serv * You have access to an {product-title} cluster using an account with `cluster-admin` permissions. * You have installed the `oc` CLI. -* You have installed the {servicebinding-title} from OperatorHub. +* You have installed the {servicebinding-title} from the software catalog. //Deploying PostgreSQL operator include::modules/sbo-deploying-a-postgresql-database-operator-power-z.adoc[leveloffset=+1] diff --git a/applications/connecting_applications_to_services/getting-started-with-service-binding.adoc b/applications/connecting_applications_to_services/getting-started-with-service-binding.adoc index 535314cb29..685bd782ac 100644 --- a/applications/connecting_applications_to_services/getting-started-with-service-binding.adoc +++ b/applications/connecting_applications_to_services/getting-started-with-service-binding.adoc @@ -21,9 +21,9 @@ ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] * You have access to an {product-title} cluster using an account with `dedicated-admin` permissions. endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] * You have installed the `oc` CLI. -* You have installed {servicebinding-title} from OperatorHub. +* You have installed {servicebinding-title} from the software catalog. ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -* You have installed the 5.1.2 version of the Crunchy Postgres for Kubernetes Operator from OperatorHub using the *v5* Update channel. The installed Operator is available in an appropriate namespace, such as the `my-petclinic` namespace. +* You have installed the 5.1.2 version of the Crunchy Postgres for Kubernetes Operator from the software catalog using the *v5* Update channel. The installed Operator is available in an appropriate namespace, such as the `my-petclinic` namespace. + [NOTE] ==== @@ -31,7 +31,7 @@ You can create the namespace using the `oc create namespace my-petclinic` comman ==== endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -* You have installed the 5.1.2 version of the Crunchy Postgres for Kubernetes Operator from OperatorHub using the *v5* Update channel. The installed Operator is available in an appropriate project, such as the `my-petclinic` project. +* You have installed the 5.1.2 version of the Crunchy Postgres for Kubernetes Operator from the software catalog using the *v5* Update channel. The installed Operator is available in an appropriate project, such as the `my-petclinic` project. + [NOTE] ==== diff --git a/applications/odc-exporting-applications.adoc b/applications/odc-exporting-applications.adoc index f89cdf7e56..20a8c5ce84 100644 --- a/applications/odc-exporting-applications.adoc +++ b/applications/odc-exporting-applications.adoc @@ -11,7 +11,7 @@ As a developer, you can export your application in the ZIP file format. Based on [id="prerequisites_odc-exporting-applications"] == Prerequisites -* You have installed the gitops-primer Operator from the OperatorHub. +* You have installed the gitops-primer Operator from the software catalog. + [NOTE] ==== diff --git a/architecture/control-plane.adoc b/architecture/control-plane.adoc index fd9c8183d3..25e3a94178 100644 --- a/architecture/control-plane.adoc +++ b/architecture/control-plane.adoc @@ -55,7 +55,7 @@ include::modules/arch-olm-operators.adoc[leveloffset=+2] [role="_additional-resources"] .Additional resources * xref:../operators/understanding/olm/olm-understanding-olm.adoc#olm-understanding-olm[Operator Lifecycle Manager (OLM) concepts and resources] -* xref:../operators/understanding/olm-understanding-operatorhub.adoc#olm-understanding-operatorhub[Understanding OperatorHub]. +* xref:../operators/understanding/olm-understanding-operatorhub.adoc#olm-understanding-operatorhub[Understanding the software catalog]. include::modules/etcd-overview.adoc[leveloffset=+1] diff --git a/architecture/index.adoc b/architecture/index.adoc index 91801f3d31..479124681c 100644 --- a/architecture/index.adoc +++ b/architecture/index.adoc @@ -73,7 +73,7 @@ endif::openshift-dedicated,openshift-rosa[] As a developer, you can use different tools, methods, and formats to xref:../architecture/understanding-development.adoc#understanding-development[develop your containerized application] based on your unique requirements, for example: * Use various build-tool, base-image, and registry options to build a simple container application. -* Use supporting components such as OperatorHub and templates to develop your application. +* Use supporting components such as the software catalog and templates to develop your application. * Package and deploy your application as an Operator. You can also create a Kubernetes manifest and store it in a Git repository. diff --git a/architecture/understanding-development.adoc b/architecture/understanding-development.adoc index 87b60793d9..4b05e84148 100644 --- a/architecture/understanding-development.adoc +++ b/architecture/understanding-development.adoc @@ -327,8 +327,8 @@ a logging component. To fulfill that need, you might be able to obtain the required component from the following Catalogs that are available in the {product-title} web console: -* OperatorHub, which is available in each {product-title} {product-version} -cluster. The OperatorHub makes Operators available from Red Hat, +* The software catalog, which is available in each {product-title} {product-version} +cluster. The software catalog makes Operators available from Red Hat, certified Red Hat partners, and community members to the cluster operator. The cluster operator can make those Operators available in all or selected namespaces in the cluster, so developers can launch them and configure them diff --git a/backup_and_restore/application_backup_and_restore/aws-sts/oadp-aws-sts.adoc b/backup_and_restore/application_backup_and_restore/aws-sts/oadp-aws-sts.adoc index 6eaf5b949d..b673645723 100644 --- a/backup_and_restore/application_backup_and_restore/aws-sts/oadp-aws-sts.adoc +++ b/backup_and_restore/application_backup_and_restore/aws-sts/oadp-aws-sts.adoc @@ -13,7 +13,7 @@ include::snippets/oadp-mtc-operator.adoc[] You configure {aws-short} for Velero, create a default `Secret`, and then install the Data Protection Application. For more details, see xref:../../../backup_and_restore/application_backup_and_restore/installing/oadp-installing-operator.adoc#oadp-installing-operator-doc[Installing the OADP Operator]. -To install the OADP Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. See xref:../../../disconnected/using-olm.adoc#olm-restricted-networks[Using Operator Lifecycle Manager in disconnected environments] for details. +To install the OADP Operator in a restricted network environment, you must first disable the default software catalog sources and mirror the Operator catalog. See xref:../../../disconnected/using-olm.adoc#olm-restricted-networks[Using Operator Lifecycle Manager in disconnected environments] for details. You can install {oadp-short} on an AWS {sts-first} (AWS STS) cluster manually. Amazon {aws-short} provides {aws-short} STS as a web service that enables you to request temporary, limited-privilege credentials for users. You use STS to provide trusted users with temporary access to resources via API calls, your {aws-short} console, or the {aws-short} command-line interface (CLI). @@ -33,7 +33,7 @@ include::modules/installing-oadp-aws-sts.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources -* xref:../../../operators/user/olm-installing-operators-in-namespace.adoc#olm-installing-from-operatorhub-using-web-console_olm-installing-operators-in-namespace[Installing from OperatorHub using the web console] +* xref:../../../operators/user/olm-installing-operators-in-namespace.adoc#olm-installing-from-operatorhub-using-web-console_olm-installing-operators-in-namespace[Installing from the software catalog using the web console] * xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#backing-up-applications[Backing up applications] [id="oadp-aws-sts-backing-up-and-cleaning"] @@ -41,4 +41,4 @@ include::modules/installing-oadp-aws-sts.adoc[leveloffset=+1] include::modules/performing-a-backup-oadp-aws-sts.adoc[leveloffset=+2] -include::modules/cleanup-a-backup-oadp-aws-sts.adoc[leveloffset=+2] \ No newline at end of file +include::modules/cleanup-a-backup-oadp-aws-sts.adoc[leveloffset=+2] diff --git a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc index 89a8a058ff..d0ece5cb08 100644 --- a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc +++ b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc @@ -16,7 +16,7 @@ include::snippets/oadp-mtc-operator.adoc[] You configure AWS for Velero, create a default `Secret`, and then install the Data Protection Application. For more details, see xref:../../../backup_and_restore/application_backup_and_restore/installing/oadp-installing-operator.adoc#oadp-installing-operator-doc[Installing the OADP Operator]. -To install the OADP Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. See xref:../../../disconnected/using-olm.adoc#olm-restricted-networks[Using Operator Lifecycle Manager in disconnected environments] for details. +To install the OADP Operator in a restricted network environment, you must first disable the default software catalog sources and mirror the Operator catalog. See xref:../../../disconnected/using-olm.adoc#olm-restricted-networks[Using Operator Lifecycle Manager in disconnected environments] for details. include::modules/oadp-s3-and-gov-cloud.adoc[leveloffset=+1] diff --git a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc index 0d435bf933..9e16df985e 100644 --- a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc +++ b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc @@ -16,7 +16,7 @@ include::snippets/oadp-mtc-operator.adoc[] You configure Azure for Velero, create a default `Secret`, and then install the Data Protection Application. For more details, see xref:../../../backup_and_restore/application_backup_and_restore/installing/oadp-installing-operator.adoc#oadp-installing-operator-doc[Installing the OADP Operator]. -To install the OADP Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. See xref:../../../disconnected/using-olm.adoc#olm-restricted-networks[Using Operator Lifecycle Manager in disconnected environments] for details. +To install the OADP Operator in a restricted network environment, you must first disable the default software catalog sources and mirror the Operator catalog. See xref:../../../disconnected/using-olm.adoc#olm-restricted-networks[Using Operator Lifecycle Manager in disconnected environments] for details. include::modules/migration-configuring-azure.adoc[leveloffset=+1] diff --git a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc index e4d090139b..8fc764e5ae 100644 --- a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc +++ b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc @@ -16,7 +16,7 @@ include::snippets/oadp-mtc-operator.adoc[] You configure GCP for Velero, create a default `Secret`, and then install the Data Protection Application. For more details, see xref:../../../backup_and_restore/application_backup_and_restore/installing/oadp-installing-operator.adoc#oadp-installing-operator-doc[Installing the OADP Operator]. -To install the OADP Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. See xref:../../../disconnected/using-olm.adoc#olm-restricted-networks[Using Operator Lifecycle Manager in disconnected environments] for details. +To install the OADP Operator in a restricted network environment, you must first disable the default software catalog sources and mirror the Operator catalog. See xref:../../../disconnected/using-olm.adoc#olm-restricted-networks[Using Operator Lifecycle Manager in disconnected environments] for details. //include::modules/oadp-installing-operator.adoc[leveloffset=+1] include::modules/migration-configuring-gcp.adoc[leveloffset=+1] diff --git a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-kubevirt.adoc b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-kubevirt.adoc index 48c3072336..d52c9ef1b6 100644 --- a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-kubevirt.adoc +++ b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-kubevirt.adoc @@ -31,7 +31,7 @@ The following storage options are excluded: For more information, see xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-backing-up-applications-restic-doc.adoc#oadp-backing-up-applications-restic-doc[Backing up applications with File System Backup: Kopia or Restic]. ==== -To install the OADP Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. See xref:../../../disconnected/using-olm.adoc#olm-restricted-networks[Using Operator Lifecycle Manager in disconnected environments] for details. +To install the OADP Operator in a restricted network environment, you must first disable the default software catalog sources and mirror the Operator catalog. See xref:../../../disconnected/using-olm.adoc#olm-restricted-networks[Using Operator Lifecycle Manager in disconnected environments] for details. include::modules/install-and-configure-oadp-kubevirt.adoc[leveloffset=+1] diff --git a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc index cb8bd6ba38..03dc88d9d7 100644 --- a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc +++ b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc @@ -18,7 +18,7 @@ include::snippets/oadp-mtc-operator.adoc[] You can create a `Secret` CR for the backup location and install the Data Protection Application. For more details, see xref:../../../backup_and_restore/application_backup_and_restore/installing/oadp-installing-operator.adoc#oadp-installing-operator-doc[Installing the OADP Operator]. -To install the OADP Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. For details, see xref:../../../disconnected/using-olm.adoc#olm-restricted-networks[Using Operator Lifecycle Manager in disconnected environments]. +To install the OADP Operator in a restricted network environment, you must first disable the default software catalog sources and mirror the Operator catalog. For details, see xref:../../../disconnected/using-olm.adoc#olm-restricted-networks[Using Operator Lifecycle Manager in disconnected environments]. //include::modules/oadp-installing-operator.adoc[leveloffset=+1] include::modules/migration-configuring-mcg.adoc[leveloffset=+1] diff --git a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-ocs.adoc b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-ocs.adoc index 3b1e7dafe0..97faa72747 100644 --- a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-ocs.adoc +++ b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-ocs.adoc @@ -18,7 +18,7 @@ You can configure xref:../../../backup_and_restore/application_backup_and_restor You can create a `Secret` CR for the backup location and install the Data Protection Application. For more details, see xref:../../../backup_and_restore/application_backup_and_restore/installing/oadp-installing-operator.adoc#oadp-installing-operator-doc[Installing the OADP Operator]. -To install the OADP Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. For details, see xref:../../../disconnected/using-olm.adoc#olm-restricted-networks[Using Operator Lifecycle Manager in disconnected environments]. +To install the OADP Operator in a restricted network environment, you must first disable the default software catalog sources and mirror the Operator catalog. For details, see xref:../../../disconnected/using-olm.adoc#olm-restricted-networks[Using Operator Lifecycle Manager in disconnected environments]. //include::modules/oadp-installing-operator.adoc[leveloffset=+1] include::modules/oadp-about-backup-snapshot-locations-secrets.adoc[leveloffset=+1] diff --git a/backup_and_restore/application_backup_and_restore/oadp-rosa/oadp-rosa-backing-up-applications.adoc b/backup_and_restore/application_backup_and_restore/oadp-rosa/oadp-rosa-backing-up-applications.adoc index ac2082e2e6..66cdeef811 100644 --- a/backup_and_restore/application_backup_and_restore/oadp-rosa/oadp-rosa-backing-up-applications.adoc +++ b/backup_and_restore/application_backup_and_restore/oadp-rosa/oadp-rosa-backing-up-applications.adoc @@ -47,7 +47,7 @@ include::modules/updating-role-arn-oadp-rosa-sts.adoc[leveloffset=+1] .Additional resources // This xref points to a topic that is not published in the ROSA docs. ifndef::openshift-rosa,openshift-rosa-hcp[] -* xref:../../../operators/user/olm-installing-operators-in-namespace.adoc#olm-installing-from-operatorhub-using-web-console_olm-installing-operators-in-namespace[Installing from OperatorHub using the web console]. +* xref:../../../operators/user/olm-installing-operators-in-namespace.adoc#olm-installing-from-operatorhub-using-web-console_olm-installing-operators-in-namespace[Installing from the software catalog using the web console]. endif::openshift-rosa,openshift-rosa-hcp[] * xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#backing-up-applications[Backing up applications] diff --git a/cicd/pipelines/creating-applications-with-cicd-pipelines.adoc b/cicd/pipelines/creating-applications-with-cicd-pipelines.adoc index 18458e05cd..e3624f23b4 100644 --- a/cicd/pipelines/creating-applications-with-cicd-pipelines.adoc +++ b/cicd/pipelines/creating-applications-with-cicd-pipelines.adoc @@ -27,7 +27,7 @@ This section uses the `pipelines-tutorial` example to demonstrate the preceding == Prerequisites * You have access to an {product-title} cluster. -* You have installed xref:../../cicd/pipelines/installing-pipelines.adoc#installing-pipelines[{pipelines-shortname}] using the {pipelines-title} Operator listed in the OpenShift OperatorHub. After it is installed, it is applicable to the entire cluster. +* You have installed xref:../../cicd/pipelines/installing-pipelines.adoc#installing-pipelines[{pipelines-shortname}] using the {pipelines-title} Operator listed in the software catalog. After it is installed, it is applicable to the entire cluster. * You have installed xref:../../cli_reference/tkn_cli/installing-tkn.adoc#installing-tkn[{pipelines-shortname} CLI]. * You have forked the front-end link:https://github.com/openshift/pipelines-vote-ui/tree/{pipelines-ver}[`pipelines-vote-ui`] and back-end link:https://github.com/openshift/pipelines-vote-api/tree/{pipelines-ver}[`pipelines-vote-api`] Git repositories using your GitHub ID, and have administrator access to these repositories. * Optional: You have cloned the link:https://github.com/openshift/pipelines-tutorial/tree/{pipelines-ver}[`pipelines-tutorial`] Git repository. diff --git a/cloud_experts_tutorials/cloud-experts-deploying-application/cloud-experts-deploying-application-integrating-aws.adoc b/cloud_experts_tutorials/cloud-experts-deploying-application/cloud-experts-deploying-application-integrating-aws.adoc index db7eaac58b..12d23a4418 100644 --- a/cloud_experts_tutorials/cloud-experts-deploying-application/cloud-experts-deploying-application-integrating-aws.adoc +++ b/cloud_experts_tutorials/cloud-experts-deploying-application/cloud-experts-deploying-application-integrating-aws.adoc @@ -49,7 +49,7 @@ Install the ACK controller to create and delete buckets in the S3 service by usi We will use an Operator to make it easy. The Operator installation will also create an `ack-system` namespace and a service account `ack-s3-controller` for you. . Log in to the cluster console. -. On the left menu, click *Operators*, then *OperatorHub*. +. On the left menu, click *Ecosystem*, then *Software Catalog*. . In the filter box, enter "S3" and select *AWS Controller for Kubernetes - Amazon S3*. + image:cloud-experts-deploying-integrating-ack-operator.png[] @@ -390,4 +390,4 @@ $ aws s3 ls s3://${OSTOY_NAMESPACE}-bucket ---- $ aws s3 ls s3://ostoy-bucket 2023-05-04 22:20:51 51 OSToy.txt ----- \ No newline at end of file +---- diff --git a/cloud_experts_tutorials/cloud-experts-external-dns.adoc b/cloud_experts_tutorials/cloud-experts-external-dns.adoc index dfa7368026..e71180754e 100644 --- a/cloud_experts_tutorials/cloud-experts-external-dns.adoc +++ b/cloud_experts_tutorials/cloud-experts-external-dns.adoc @@ -261,7 +261,7 @@ EOF $ oc new-project external-dns-operator ---- -. Install the `External DNS` Operator from OperatorHub: +. Install the `External DNS` Operator from the software catalog: + [source,terminal] ---- diff --git a/cloud_experts_tutorials/cloud-experts-using-aws-ack.adoc b/cloud_experts_tutorials/cloud-experts-using-aws-ack.adoc index 4b93fdc602..4e496681d1 100644 --- a/cloud_experts_tutorials/cloud-experts-using-aws-ack.adoc +++ b/cloud_experts_tutorials/cloud-experts-using-aws-ack.adoc @@ -20,9 +20,9 @@ toc::[] link:https://aws-controllers-k8s.github.io/community/[AWS Controllers for Kubernetes] (ACK) lets you define and use AWS service resources directly from {product-title}. With ACK, you can take advantage of AWS-managed services for your applications without needing to define resources outside of the cluster or run services that provide supporting capabilities such as databases or message queues within the cluster. -You can install various ACK Operators directly from OperatorHub. This makes it easy to get started and use the Operators with your applications. This controller is a component of the AWS Controller for Kubernetes project, which is currently in developer preview. +You can install various ACK Operators directly from the software catalog. This makes it easy to get started and use the Operators with your applications. This controller is a component of the AWS Controller for Kubernetes project, which is currently in developer preview. -Use this tutorial to deploy the ACK S3 Operator. You can also adapt it for any other ACK Operator in the OperatorHub of your cluster. +Use this tutorial to deploy the ACK S3 Operator. You can also adapt it for any other ACK Operator in the software catalog of your cluster. [id="cloud-experts-using-aws-ack-prerequisites"] == Prerequisites @@ -145,7 +145,7 @@ $ oc -n ack-system create configmap \ --from-env-file=${SCRATCH}/config.txt ack-${ACK_SERVICE}-user-config ---- + -. Install the ACK S3 Operator from OperatorHub: +. Install the ACK S3 Operator from the software catalog: + [source,terminal] ---- diff --git a/disconnected/mirroring/installing-mirroring-installation-images.adoc b/disconnected/mirroring/installing-mirroring-installation-images.adoc index 8b25548b19..66e5b44a86 100644 --- a/disconnected/mirroring/installing-mirroring-installation-images.adoc +++ b/disconnected/mirroring/installing-mirroring-installation-images.adoc @@ -127,7 +127,7 @@ include::modules/olm-mirroring-catalog-post.adoc[leveloffset=+2] [role="_additional-resources"] .Additional resources -* xref:../../post_installation_configuration/preparing-for-users.adoc#post-install-mirrored-catalogs[Populating OperatorHub from mirrored Operator catalogs] +* xref:../../post_installation_configuration/preparing-for-users.adoc#post-install-mirrored-catalogs[Populating the software catalog from mirrored Operator catalogs] * xref:../../operators/admin/olm-managing-custom-catalogs.adoc#olm-filtering-fbc_olm-managing-custom-catalogs[Updating or filtering a file-based catalog image] [id="next-steps_installing-mirroring-installation-images"] diff --git a/disconnected/updating/disconnected-update-osus.adoc b/disconnected/updating/disconnected-update-osus.adoc index e66608c81a..4517ba2dde 100644 --- a/disconnected/updating/disconnected-update-osus.adoc +++ b/disconnected/updating/disconnected-update-osus.adoc @@ -73,7 +73,7 @@ To install the OpenShift Update Service, you must first install the OpenShift Up [NOTE] ==== -For clusters that are installed in disconnected environments, also known as disconnected clusters, Operator Lifecycle Manager by default cannot access the Red Hat-provided OperatorHub sources hosted on remote registries because those remote sources require full internet connectivity. For more information, see xref:../../disconnected/using-olm.adoc#olm-restricted-networks[Using Operator Lifecycle Manager in disconnected environments]. +For clusters that are installed in disconnected environments, also known as disconnected clusters, Operator Lifecycle Manager by default cannot access the Red Hat-provided software catalog sources hosted on remote registries because those remote sources require full internet connectivity. For more information, see xref:../../disconnected/using-olm.adoc#olm-restricted-networks[Using Operator Lifecycle Manager in disconnected environments]. ==== // Installing the OpenShift Update Service Operator by using the web console diff --git a/disconnected/updating/disconnected-update.adoc b/disconnected/updating/disconnected-update.adoc index cccc24f5f3..695739c172 100644 --- a/disconnected/updating/disconnected-update.adoc +++ b/disconnected/updating/disconnected-update.adoc @@ -21,7 +21,7 @@ Use the following procedures to update a cluster in a disconnected environment w * You must have access to the cluster as a user with `admin` privileges. See xref:../../authentication/using-rbac.adoc#using-rbac[Using RBAC to define and apply permissions]. * You must have a recent xref:../../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backup-etcd[etcd backup] in case your update fails and you must xref:../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[restore your cluster to a previous state]. -* You have updated all Operators previously installed through Operator Lifecycle Manager (OLM) to a version that is compatible with your target release. Updating the Operators ensures they have a valid update path when the default OperatorHub catalogs switch from the current minor version to the next during a cluster update. See xref:../../operators/admin/olm-upgrading-operators.adoc#olm-upgrading-operators[Updating installed Operators] for more information on how to check compatibility and, if necessary, update the installed Operators. +* You have updated all Operators previously installed through Operator Lifecycle Manager (OLM) to a version that is compatible with your target release. Updating the Operators ensures they have a valid update path when the default catalog sources switch from the current minor version to the next during a cluster update. See xref:../../operators/admin/olm-upgrading-operators.adoc#olm-upgrading-operators[Updating installed Operators] for more information on how to check compatibility and, if necessary, update the installed Operators. * You must ensure that all machine config pools (MCPs) are running and not paused. Nodes associated with a paused MCP are skipped during the update process. You can pause the MCPs if you are performing a canary rollout update strategy. * If your cluster uses manually maintained credentials, update the cloud provider resources for the new release. For more information, including how to determine if this is a requirement for your cluster, see xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials]. * If you run an Operator or you have configured any application with the pod disruption budget, you might experience an interruption during the update process. If `minAvailable` is set to 1 in `PodDisruptionBudget`, the nodes are drained to apply pending machine configs which might block the eviction process. If several nodes are rebooted, all the pods might run on only one node, and the `PodDisruptionBudget` field can prevent the node drain. diff --git a/disconnected/using-olm.adoc b/disconnected/using-olm.adoc index 674afea679..ce406839fd 100644 --- a/disconnected/using-olm.adoc +++ b/disconnected/using-olm.adoc @@ -6,19 +6,19 @@ include::_attributes/common-attributes.adoc[] toc::[] -For {product-title} clusters in disconnected environments, Operator Lifecycle Manager (OLM) by default cannot access the Red{nbsp}Hat-provided OperatorHub sources hosted on remote registries because those remote sources require full internet connectivity. +For {product-title} clusters in disconnected environments, Operator Lifecycle Manager (OLM) by default cannot access the Red{nbsp}Hat-provided software catalog sources hosted on remote registries because those remote sources require full internet connectivity. -However, as a cluster administrator you can still enable your cluster to use OLM in a disconnected environment if you have a workstation that has full internet access. The workstation, which requires full internet access to pull the remote OperatorHub content, is used to prepare local mirrors of the remote sources, and push the content to a mirror registry. +However, as a cluster administrator you can still enable your cluster to use OLM in a disconnected environment if you have a workstation that has full internet access. The workstation, which requires full internet access to pull the remote software catalog content, is used to prepare local mirrors of the remote sources, and push the content to a mirror registry. The mirror registry can be located on a bastion host, which requires connectivity to both your workstation and the disconnected cluster, or a completely disconnected, or _airgapped_, host, which requires removable media to physically move the mirrored content to the disconnected environment. This guide describes the following process that is required to enable OLM in disconnected environments: -* Disable the default remote OperatorHub sources for OLM. -* Use a workstation with full internet access to create and push local mirrors of the OperatorHub content to a mirror registry. +* Disable the default remote software catalog sources for OLM. +* Use a workstation with full internet access to create and push local mirrors of the software catalog content to a mirror registry. * Configure OLM to install and manage Operators from local sources on the mirror registry instead of the default remote sources. -After enabling OLM in a disconnected environment, you can continue to use your unrestricted workstation to keep your local OperatorHub sources updated as newer versions of Operators are released. +After enabling OLM in a disconnected environment, you can continue to use your unrestricted workstation to keep your local software catalog sources updated as newer versions of Operators are released. [IMPORTANT] ==== diff --git a/metering/metering-installing-metering.adoc b/metering/metering-installing-metering.adoc index c31ae259dd..a88c10c79b 100644 --- a/metering/metering-installing-metering.adoc +++ b/metering/metering-installing-metering.adoc @@ -11,7 +11,7 @@ include::modules/deprecated-feature.adoc[leveloffset=+1] Review the following sections before installing metering into your cluster. -To get started installing metering, first install the Metering Operator from OperatorHub. Next, configure your instance of metering by creating a `MeteringConfig` custom resource (CR). Installing the Metering Operator creates a default `MeteringConfig` resource that you can modify using the examples in the documentation. After creating your `MeteringConfig` resource, install the metering stack. Last, verify your installation. +To get started installing metering, first install the Metering Operator from the software catalog. Next, configure your instance of metering by creating a `MeteringConfig` custom resource (CR). Installing the Metering Operator creates a default `MeteringConfig` resource that you can modify using the examples in the documentation. After creating your `MeteringConfig` resource, install the metering stack. Last, verify your installation. include::modules/metering-install-prerequisites.adoc[leveloffset=+1] @@ -38,7 +38,7 @@ There can only be one `MeteringConfig` resource in the `openshift-metering` name .Procedure -. From the web console, ensure you are on the *Operator Details* page for the Metering Operator in the `openshift-metering` project. You can navigate to this page by clicking *Operators* -> *Installed Operators*, then selecting the Metering Operator. +. From the web console, ensure you are on the *Operator Details* page for the Metering Operator in the `openshift-metering` project. You can navigate to this page by clicking *Ecosystem* -> *Installed Operators*, then selecting the Metering Operator. . Under *Provided APIs*, click *Create Instance* on the Metering Configuration card. This opens a YAML editor with the default `MeteringConfig` resource file where you can define your configuration. + diff --git a/metering/metering-upgrading-metering.adoc b/metering/metering-upgrading-metering.adoc index 15c1703ba2..f9a04cfd3d 100644 --- a/metering/metering-upgrading-metering.adoc +++ b/metering/metering-upgrading-metering.adoc @@ -14,7 +14,7 @@ You can upgrade metering to {product-version} by updating the Metering Operator == Prerequisites * The cluster is updated to {product-version}. -* The xref:../metering/metering-installing-metering.adoc#metering-install-operator_installing-metering[Metering Operator] is installed from OperatorHub. +* The xref:../metering/metering-installing-metering.adoc#metering-install-operator_installing-metering[Metering Operator] is installed from the software catalog. + [NOTE] ==== @@ -31,7 +31,7 @@ Potential data loss can occur if you modify your metering storage configuration .Procedure -. Click *Operators* -> *Installed Operators* from the web console. +. Click *Ecosystem* -> *Installed Operators* from the web console. . Select the `openshift-metering` project. @@ -45,7 +45,7 @@ Potential data loss can occur if you modify your metering storage configuration ==== Wait several seconds to allow the subscription to update before proceeding to the next step. ==== -. Click *Operators* -> *Installed Operators*. +. Click *Ecosystem* -> *Installed Operators*. + The Metering Operator is shown as 4.9. For example: + @@ -61,7 +61,7 @@ You can verify the metering upgrade by performing any of the following checks: + -- .Procedure (UI) - . Navigate to *Operators* -> *Installed Operators* in the metering namespace. + . Navigate to *Ecosystem* -> *Installed Operators* in the metering namespace. . Click *Metering Operator*. . Click *Subscription* for *Subscription Details*. . Check the *Installed Version* for the upgraded metering version. The *Starting Version* shows the metering version prior to upgrading. diff --git a/microshift_running_apps/microshift_operators/microshift-operators-olm.adoc b/microshift_running_apps/microshift_operators/microshift-operators-olm.adoc index d45d5e3838..facf5035c9 100644 --- a/microshift_running_apps/microshift_operators/microshift-operators-olm.adoc +++ b/microshift_running_apps/microshift_operators/microshift-operators-olm.adoc @@ -16,7 +16,7 @@ Operator Lifecycle Manager (OLM) is used in {microshift-short} for installing an * Cluster Operators as applied in {ocp} are not used in {microshift-short}. * You must create your own catalogs for the add-on Operators you want to use with your applications. Catalogs are not provided by default. ** Each catalog must have an accessible `CatalogSource` added to a cluster, so that the OLM catalog Operator can use the catalog for content. -* You must use the CLI to conduct OLM activities with {microshift-short}. The console and OperatorHub GUIs are not available. +* You must use the CLI to conduct OLM activities with {microshift-short}. The console, software catalog, and catalog management GUIs are not available. ** Use the link:https://access.redhat.com/documentation/en-us/openshift_container_platform/{ocp-version}/html/cli_tools/opm-cli#cli-opm-install[Operator Package Manager `opm` CLI] with network-connected clusters, or for building catalogs for custom Operators that use an internal registry. ** To mirror your catalogs and Operators for disconnected or offline clusters, install link:https://docs.openshift.com/container-platform/{ocp-version}/installing/disconnected_install/installing-mirroring-disconnected.html#installation-oc-mirror-installing-plugin_installing-mirroring-disconnected[the oc-mirror OpenShift CLI plugin]. @@ -60,4 +60,4 @@ include::modules/microshift-olm-deploy-ops-spec-ns.adoc[leveloffset=+2] [role="_additional-resources"] .Additional resources * link:https://access.redhat.com/documentation/en-us/openshift_container_platform/{ocp-version}/html/operators/administrator-tasks#olm-upgrading-operators[Updating installed Operators] -* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/{ocp-version}/html/operators/administrator-tasks#olm-deleting-operator-from-a-cluster-using-cli_olm-deleting-operators-from-a-cluster[Deleting Operators from a cluster using the CLI] \ No newline at end of file +* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/{ocp-version}/html/operators/administrator-tasks#olm-deleting-operator-from-a-cluster-using-cli_olm-deleting-operators-from-a-cluster[Deleting Operators from a cluster using the CLI] diff --git a/modules/about-administrator_web-console.adoc b/modules/about-administrator_web-console.adoc index de5ff6ce86..56bc652646 100644 --- a/modules/about-administrator_web-console.adoc +++ b/modules/about-administrator_web-console.adoc @@ -18,7 +18,7 @@ endif::openshift-rosa-hcp[] The *Administrator* perspective provides workflows specific to administrator use cases, such as the ability to: * Manage workload, storage, networking, and cluster settings. -* Install and manage Operators using the OperatorHub. +* Install and manage Operators using the software catalog. * Add identity providers that allow users to log in and manage user access through roles and role bindings. * View and manage a variety of advanced settings such as cluster updates, partial cluster updates, cluster Operators, custom resource definitions (CRDs), role bindings, and resource quotas. * Access and manage monitoring features such as metrics, alerts, and monitoring dashboards. diff --git a/modules/arch-olm-operators.adoc b/modules/arch-olm-operators.adoc index 9aac5dddbb..61bf211d92 100644 --- a/modules/arch-olm-operators.adoc +++ b/modules/arch-olm-operators.adoc @@ -5,16 +5,16 @@ [id="olm-operators_{context}"] = Add-on Operators -Operator Lifecycle Manager (OLM) and OperatorHub are default components in {product-title} that help manage Kubernetes-native applications as Operators. Together they provide the system for discovering, installing, and managing the optional add-on Operators available on the cluster. +Operator Lifecycle Manager (OLM) and the software catalog are default components in {product-title} that help manage Kubernetes-native applications as Operators. Together they provide the system for discovering, installing, and managing the optional add-on Operators available on the cluster. -Using OperatorHub in the {product-title} web console, +Using the software catalog in the {product-title} web console, ifndef::openshift-dedicated,openshift-rosa[] cluster administrators endif::openshift-dedicated,openshift-rosa[] ifdef::openshift-dedicated,openshift-rosa[] administrators with the `dedicated-admin` role endif::openshift-dedicated,openshift-rosa[] -and authorized users can select Operators to install from catalogs of Operators. After installing an Operator from OperatorHub, it can be made available globally or in specific namespaces to run in user applications. +and authorized users can select Operators to install from catalogs of Operators. After installing an Operator from the software catalog, it can be made available globally or in specific namespaces to run in user applications. Default catalog sources are available that include Red Hat Operators, certified Operators, and community Operators. ifndef::openshift-dedicated,openshift-rosa[] diff --git a/modules/architecture-platform-benefits.adoc b/modules/architecture-platform-benefits.adoc index 3ea29d9ee3..ed90297236 100644 --- a/modules/architecture-platform-benefits.adoc +++ b/modules/architecture-platform-benefits.adoc @@ -90,7 +90,7 @@ for your applications to use. In {product-title}, Operators serve as the platfor Cluster Version Operator and Machine Config Operator allow simplified, cluster-wide management of those critical components. -Operator Lifecycle Manager (OLM) and the OperatorHub provide facilities for +Operator Lifecycle Manager (OLM) and the software catalog provide facilities for storing and distributing Operators to people developing and deploying applications. The {quay} Container Registry is a Quay.io container registry that serves diff --git a/modules/cco-short-term-creds-aws-olm.adoc b/modules/cco-short-term-creds-aws-olm.adoc index 642e1614e8..9b8de01768 100644 --- a/modules/cco-short-term-creds-aws-olm.adoc +++ b/modules/cco-short-term-creds-aws-olm.adoc @@ -8,4 +8,4 @@ Certain Operators managed by the Operator Lifecycle Manager (OLM) on {aws-short} clusters can use manual mode with {sts-short}. These Operators authenticate with limited-privilege, short-term credentials that are managed outside the cluster. -To determine if an Operator supports authentication with {aws-short} {sts-short}, see the Operator description in OperatorHub. +To determine if an Operator supports authentication with {aws-short} {sts-short}, see the Operator description in the software catalog. diff --git a/modules/cco-short-term-creds-azure-olm.adoc b/modules/cco-short-term-creds-azure-olm.adoc index 7d14eb7866..be624d3f87 100644 --- a/modules/cco-short-term-creds-azure-olm.adoc +++ b/modules/cco-short-term-creds-azure-olm.adoc @@ -8,4 +8,4 @@ Certain Operators managed by the Operator Lifecycle Manager (OLM) on {azure-short} clusters can use manual mode with {entra-first}. These Operators authenticate with short-term credentials that are managed outside the cluster. -To determine if an Operator supports authentication with {entra-short}, see the Operator description in OperatorHub. +To determine if an Operator supports authentication with {entra-short}, see the Operator description in the software catalog. diff --git a/modules/cco-short-term-creds-gcp-olm.adoc b/modules/cco-short-term-creds-gcp-olm.adoc index 519898f4b3..3cbdc09a0a 100644 --- a/modules/cco-short-term-creds-gcp-olm.adoc +++ b/modules/cco-short-term-creds-gcp-olm.adoc @@ -8,4 +8,4 @@ Certain Operators managed by the Operator Lifecycle Manager (OLM) on {gcp-short} clusters can use manual mode with {gcp-wid-short}. These Operators authenticate with limited-privilege, short-term credentials that are managed outside the cluster. -To determine if an Operator supports authentication with {gcp-wid-short}, see the Operator description in OperatorHub. +To determine if an Operator supports authentication with {gcp-wid-short}, see the Operator description in the software catalog. diff --git a/modules/cert-manager-install-console.adoc b/modules/cert-manager-install-console.adoc index 096b657aec..4f8c4101c9 100644 --- a/modules/cert-manager-install-console.adoc +++ b/modules/cert-manager-install-console.adoc @@ -17,7 +17,7 @@ You can use the web console to install the {cert-manager-operator}. . Log in to the {product-title} web console. -. Navigate to *Operators* -> *OperatorHub*. +. Navigate to *Ecosystem* -> *Software Catalog*. . Enter *{cert-manager-operator}* into the filter box. @@ -51,7 +51,7 @@ During the installation, the {product-title} web console allows you to select b .Verification -. Navigate to *Operators* -> *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . Verify that *{cert-manager-operator}* is listed with a *Status* of *Succeeded* in the `cert-manager-operator` namespace. . Verify that cert-manager pods are up and running by entering the following command: + diff --git a/modules/cert-manager-uninstall-console.adoc b/modules/cert-manager-uninstall-console.adoc index d22f7cb446..a4bd79128c 100644 --- a/modules/cert-manager-uninstall-console.adoc +++ b/modules/cert-manager-uninstall-console.adoc @@ -19,6 +19,6 @@ You can uninstall the {cert-manager-operator} by using the web console. . Log in to the {product-title} web console. . Uninstall the {cert-manager-operator} Operator. -.. Navigate to *Operators* -> *Installed Operators*. +.. Navigate to *Ecosystem* -> *Installed Operators*. .. Click the Options menu {kebab} next to the *{cert-manager-operator}* entry and click *Uninstall Operator*. .. In the confirmation dialog, click *Uninstall*. diff --git a/modules/cli-manager-adding-plugin-yaml.adoc b/modules/cli-manager-adding-plugin-yaml.adoc index 603c74bf96..b27535cf50 100644 --- a/modules/cli-manager-adding-plugin-yaml.adoc +++ b/modules/cli-manager-adding-plugin-yaml.adoc @@ -17,7 +17,7 @@ You can add a CLI plugin to the {cli-manager} by using the YAML View. . Log in to the {product-title} web console. -. Navigate to *Operators* -> *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . From the list, select *{cli-manager}*. @@ -70,4 +70,4 @@ $ oc get plugin/ -o yaml [source,terminal] ---- ready to be served. ----- \ No newline at end of file +---- diff --git a/modules/cli-manager-installing.adoc b/modules/cli-manager-installing.adoc index b8366633e8..ff3137979b 100644 --- a/modules/cli-manager-installing.adoc +++ b/modules/cli-manager-installing.adoc @@ -23,7 +23,7 @@ Install the {cli-manager} to facilitate adding CLI plugins in both connected and .. In the *Name* field, enter `openshift-cli-manager-operator` and click *Create*. . Install the {cli-manager}: -.. Navigate to *Operators* -> *OperatorHub*. +.. Navigate to *Ecosystem* -> *Software Catalog*. .. In the filter box, enter *{cli-manager}*. .. Select the *{cli-manager}* and click *Install*. .. On the *Install Operator* page, complete the following steps: @@ -42,5 +42,5 @@ Install the {cli-manager} to facilitate adding CLI plugins in both connected and .Verification -. Navigate to *Operators* -> *Installed Operators*. -. Verify that *{cli-manager}* is listed with a *Status* of *Succeeded*. \ No newline at end of file +. Navigate to *Ecosystem* -> *Installed Operators*. +. Verify that *{cli-manager}* is listed with a *Status* of *Succeeded*. diff --git a/modules/cli-manager-uninstalling.adoc b/modules/cli-manager-uninstalling.adoc index 9bda51ce43..3129ba9876 100644 --- a/modules/cli-manager-uninstalling.adoc +++ b/modules/cli-manager-uninstalling.adoc @@ -18,6 +18,6 @@ You can uninstall the {cli-manager} by using the web console. . Log in to the {product-title} web console. . Uninstall the {cli-manager} by completing the following steps: -.. Navigate to *Operators* -> *Installed Operators*. +.. Navigate to *Ecosystem* -> *Installed Operators*. .. Click the Options menu {kebab} next to the *{cli-manager}* entry and click *Uninstall Operator*. .. In the confirmation dialog, click *Uninstall*. diff --git a/modules/cluster-logging-updating-logging-to-5-1.adoc b/modules/cluster-logging-updating-logging-to-5-1.adoc index 8077f9311f..e6dde6db04 100644 --- a/modules/cluster-logging-updating-logging-to-5-1.adoc +++ b/modules/cluster-logging-updating-logging-to-5-1.adoc @@ -27,7 +27,7 @@ If you update the operators in the wrong order, Kibana does not update and the K . Update the OpenShift Elasticsearch Operator: -.. From the web console, click *Operators* -> *Installed Operators*. +.. From the web console, click *Ecosystem* -> *Installed Operators*. .. Select the `openshift-operators-redhat` project. @@ -37,7 +37,7 @@ If you update the operators in the wrong order, Kibana does not update and the K .. In the *Change Subscription Update Channel* window, select *stable-5.x* and click *Save*. -.. Wait for a few seconds, then click *Operators* -> *Installed Operators*. +.. Wait for a few seconds, then click *Ecosystem* -> *Installed Operators*. + Verify that the OpenShift Elasticsearch Operator version is 5.x.x. + @@ -45,7 +45,7 @@ Wait for the *Status* field to report *Succeeded*. . Update the Red Hat OpenShift Logging Operator: -.. From the web console, click *Operators* -> *Installed Operators*. +.. From the web console, click *Ecosystem* -> *Installed Operators*. .. Select the `openshift-logging` project. @@ -55,7 +55,7 @@ Wait for the *Status* field to report *Succeeded*. .. In the *Change Subscription Update Channel* window, select *stable-5.x* and click *Save*. -.. Wait for a few seconds, then click *Operators* -> *Installed Operators*. +.. Wait for a few seconds, then click *Ecosystem* -> *Installed Operators*. + Verify that the Red Hat OpenShift Logging Operator version is 5.x.x. + diff --git a/modules/cluster-logging-upgrading-elasticsearch.adoc b/modules/cluster-logging-upgrading-elasticsearch.adoc index 47450c4af8..7e26116b98 100644 --- a/modules/cluster-logging-upgrading-elasticsearch.adoc +++ b/modules/cluster-logging-upgrading-elasticsearch.adoc @@ -30,10 +30,10 @@ If you update the Operators in the wrong order, Kibana does not update and the K .Procedure ifndef::openshift-rosa,openshift-dedicated[] -. In the {product-title} web console, click *Operators* -> *Installed Operators*. +. In the {product-title} web console, click *Ecosystem* -> *Installed Operators*. endif::[] ifdef::openshift-rosa,openshift-dedicated[] -. In the {hybrid-console}, click *Operators* -> *Installed Operators*. +. In the {hybrid-console}, click *Ecosystem* -> *Installed Operators*. endif::[] . Select the *openshift-operators-redhat* project. @@ -44,9 +44,9 @@ endif::[] . In the *Change Subscription Update Channel* window, select *stable-5.y* and click *Save*. Note the `elasticsearch-operator.v5.y.z` version. -. Wait for a few seconds, then click *Operators* -> *Installed Operators*. Verify that the {es-op} version matches the latest `elasticsearch-operator.v5.y.z` version. +. Wait for a few seconds, then click *Ecosystem* -> *Installed Operators*. Verify that the {es-op} version matches the latest `elasticsearch-operator.v5.y.z` version. -. On the *Operators* -> *Installed Operators* page, wait for the *Status* field to report *Succeeded*. +. On the *Ecosystem* -> *Installed Operators* page, wait for the *Status* field to report *Succeeded*. .Verification diff --git a/modules/cnf-image-based-upgrade-installing-lifecycle-agent-using-web-console.adoc b/modules/cnf-image-based-upgrade-installing-lifecycle-agent-using-web-console.adoc index 86e615bc7b..7d4e75217d 100644 --- a/modules/cnf-image-based-upgrade-installing-lifecycle-agent-using-web-console.adoc +++ b/modules/cnf-image-based-upgrade-installing-lifecycle-agent-using-web-console.adoc @@ -13,7 +13,7 @@ You can use the {product-title} web console to install the {lcao}. .Procedure -. In the {product-title} web console, navigate to *Operators* -> *OperatorHub*. +. In the {product-title} web console, navigate to *Ecosystem* -> *Software Catalog*. . Search for the *{lcao}* from the list of available Operators, and then click *Install*. . On the *Install Operator* page, under *A specific namespace on the cluster* select *openshift-lifecycle-agent*. . Click *Install*. @@ -22,7 +22,7 @@ You can use the {product-title} web console to install the {lcao}. . To confirm that the installation is successful: -.. Click *Operators* -> *Installed Operators*. +.. Click *Ecosystem* -> *Installed Operators*. .. Ensure that the {lcao} is listed in the *openshift-lifecycle-agent* project with a *Status* of *InstallSucceeded*. + [NOTE] @@ -32,5 +32,5 @@ During installation an Operator might display a *Failed* status. If the installa If the Operator is not installed successfully: -. Click *Operators* -> *Installed Operators*, and inspect the *Operator Subscriptions* and *Install Plans* tabs for any failure or errors under *Status*. -. Click *Workloads* -> *Pods*, and check the logs for pods in the *openshift-lifecycle-agent* project. \ No newline at end of file +. Click *Ecosystem* -> *Installed Operators*, and inspect the *Operator Subscriptions* and *Install Plans* tabs for any failure or errors under *Status*. +. Click *Workloads* -> *Pods*, and check the logs for pods in the *openshift-lifecycle-agent* project. diff --git a/modules/cnf-installing-numa-resources-operator-console.adoc b/modules/cnf-installing-numa-resources-operator-console.adoc index a272c4c0a8..37712f8722 100644 --- a/modules/cnf-installing-numa-resources-operator-console.adoc +++ b/modules/cnf-installing-numa-resources-operator-console.adoc @@ -18,7 +18,7 @@ As a cluster administrator, you can install the NUMA Resources Operator using th . Install the NUMA Resources Operator: -.. In the {product-title} web console, click *Operators* -> *OperatorHub*. +.. In the {product-title} web console, click *Ecosystem* -> *Software Catalog*. .. Choose *numaresources-operator* from the list of available Operators, and then click *Install*. @@ -26,7 +26,7 @@ As a cluster administrator, you can install the NUMA Resources Operator using th . Optional: Verify that the NUMA Resources Operator installed successfully: -.. Switch to the *Operators* -> *Installed Operators* page. +.. Switch to the *Ecosystem* -> *Installed Operators* page. .. Ensure that *NUMA Resources Operator* is listed in the `openshift-numaresources` namespace with a *Status* of *InstallSucceeded*. + @@ -37,5 +37,5 @@ During installation an Operator might display a *Failed* status. If the installa + If the Operator does not appear as installed, to troubleshoot further: + -* Go to the *Operators* -> *Installed Operators* page and inspect the *Operator Subscriptions* and *Install Plans* tabs for any failure or errors under *Status*. +* Go to the *Ecosystem* -> *Installed Operators* page and inspect the *Operator Subscriptions* and *Install Plans* tabs for any failure or errors under *Status*. * Go to the *Workloads* -> *Pods* page and check the logs for pods in the `default` project. diff --git a/modules/cnf-topology-aware-lifecycle-manager-installation-web-console.adoc b/modules/cnf-topology-aware-lifecycle-manager-installation-web-console.adoc index 4e0742670d..c253e36a38 100644 --- a/modules/cnf-topology-aware-lifecycle-manager-installation-web-console.adoc +++ b/modules/cnf-topology-aware-lifecycle-manager-installation-web-console.adoc @@ -19,7 +19,7 @@ You can use the {product-title} web console to install the {cgu-operator-full}. .Procedure -. In the {product-title} web console, navigate to *Operators* -> *OperatorHub*. +. In the {product-title} web console, navigate to *Ecosystem* -> *Software Catalog*. . Search for the *{cgu-operator-full}* from the list of available Operators, and then click *Install*. . Keep the default selection of *Installation mode* ["All namespaces on the cluster (default)"] and *Installed Namespace* ("openshift-operators") to ensure that the Operator is installed properly. . Click *Install*. @@ -28,10 +28,10 @@ You can use the {product-title} web console to install the {cgu-operator-full}. To confirm that the installation is successful: -. Navigate to the *Operators* -> *Installed Operators* page. +. Navigate to the *Ecosystem* -> *Installed Operators* page. . Check that the Operator is installed in the `All Namespaces` namespace and its status is `Succeeded`. If the Operator is not installed successfully: -. Navigate to the *Operators* -> *Installed Operators* page and inspect the `Status` column for any errors or failures. +. Navigate to the *Ecosystem* -> *Installed Operators* page and inspect the `Status` column for any errors or failures. . Navigate to the *Workloads* -> *Pods* page and check the logs in any containers in the `cluster-group-upgrades-controller-manager` pod that are reporting issues. diff --git a/modules/compliance-operator-console-installation.adoc b/modules/compliance-operator-console-installation.adoc index 0005fe1dc9..aad69b981c 100644 --- a/modules/compliance-operator-console-installation.adoc +++ b/modules/compliance-operator-console-installation.adoc @@ -13,7 +13,7 @@ .Procedure -. In the {product-title} web console, navigate to *Operators* -> *OperatorHub*. +. In the {product-title} web console, navigate to *Ecosystem* -> *Software Catalog*. . Search for the Compliance Operator, then click *Install*. . Keep the default selection of *Installation mode* and *namespace* to ensure that the Operator will be installed to the `openshift-compliance` namespace. . Click *Install*. @@ -22,10 +22,10 @@ To confirm that the installation is successful: -. Navigate to the *Operators* -> *Installed Operators* page. +. Navigate to the *Ecosystem* -> *Installed Operators* page. . Check that the Compliance Operator is installed in the `openshift-compliance` namespace and its status is `Succeeded`. If the Operator is not installed successfully: -. Navigate to the *Operators* -> *Installed Operators* page and inspect the `Status` column for any errors or failures. +. Navigate to the *Ecosystem* -> *Installed Operators* page and inspect the `Status` column for any errors or failures. . Navigate to the *Workloads* -> *Pods* page and check the logs in any pods in the `openshift-compliance` project that are reporting issues. diff --git a/modules/compliance-operator-hcp-install.adoc b/modules/compliance-operator-hcp-install.adoc index a04177485e..819878076d 100644 --- a/modules/compliance-operator-hcp-install.adoc +++ b/modules/compliance-operator-hcp-install.adoc @@ -6,7 +6,7 @@ [id="installing-compliance-operator-hcp_{context}"] = Installing the Compliance Operator on Hypershift {hcp} -The Compliance Operator can be installed in {hcp} using the OperatorHub by creating a `Subscription` file. +The Compliance Operator can be installed in {hcp} using the software catalog by creating a `Subscription` file. :FeatureName: {hcp-capital} include::snippets/technology-preview.adoc[] diff --git a/modules/compliance-operator-uninstall.adoc b/modules/compliance-operator-uninstall.adoc index 9bc5e8f653..88a72c7c5d 100644 --- a/modules/compliance-operator-uninstall.adoc +++ b/modules/compliance-operator-uninstall.adoc @@ -17,13 +17,13 @@ To remove the Compliance Operator, you must first delete the objects in the name To remove the Compliance Operator by using the {product-title} web console: -. Go to the *Operators* -> *Installed Operators* -> *Compliance Operator* page. +. Go to the *Ecosystem* -> *Installed Operators* -> *Compliance Operator* page. .. Click *All instances*. .. In *All namespaces*, click the Options menu {kebab} and delete all ScanSettingBinding, ComplainceSuite, ComplianceScan, and ProfileBundle objects. -. Switch to the *Administration* -> *Operators* -> *Installed Operators* page. +. Switch to the *Administration* -> *Ecosystem* -> *Installed Operators* page. . Click the Options menu {kebab} on the *Compliance Operator* entry and select *Uninstall Operator*. @@ -33,4 +33,4 @@ To remove the Compliance Operator by using the {product-title} web console: . Click the Options menu {kebab} next to the *openshift-compliance* project, and select *Delete Project*. -.. Confirm the deletion by typing `openshift-compliance` in the dialog box, and click *Delete*. \ No newline at end of file +.. Confirm the deletion by typing `openshift-compliance` in the dialog box, and click *Delete*. diff --git a/modules/coo-dashboard-ui-plugin-install.adoc b/modules/coo-dashboard-ui-plugin-install.adoc index 5280f691a8..bddd7ce692 100644 --- a/modules/coo-dashboard-ui-plugin-install.adoc +++ b/modules/coo-dashboard-ui-plugin-install.adoc @@ -15,7 +15,7 @@ .Procedure -. In the {product-title} web console, click *Operators* -> *Installed Operators* and select {coo-full}. +. In the {product-title} web console, click *Ecosystem* -> *Installed Operators* and select {coo-full}. . Choose the *UI Plugin* tab (at the far right of the tab list) and press *Create UIPlugin*. . Select *YAML view*, enter the following content, and then press *Create*: + @@ -27,4 +27,4 @@ metadata: name: dashboards spec: type: Dashboards ----- \ No newline at end of file +---- diff --git a/modules/coo-distributed-tracing-ui-plugin-install.adoc b/modules/coo-distributed-tracing-ui-plugin-install.adoc index 1a7cfb6137..22b6b11eb1 100644 --- a/modules/coo-distributed-tracing-ui-plugin-install.adoc +++ b/modules/coo-distributed-tracing-ui-plugin-install.adoc @@ -15,7 +15,7 @@ .Procedure -. In the {product-title} web console, click *Operators* -> *Installed Operators* and select {coo-full} +. In the {product-title} web console, click *Ecosystem* -> *Installed Operators* and select {coo-full} . Choose the *UI Plugin* tab (at the far right of the tab list) and press *Create UIPlugin* . Select *YAML view*, enter the following content, and then press *Create*: + @@ -27,4 +27,4 @@ metadata: name: distributed-tracing spec: type: DistributedTracing ----- \ No newline at end of file +---- diff --git a/modules/coo-logging-ui-plugin-install.adoc b/modules/coo-logging-ui-plugin-install.adoc index 96dfefa4a6..ee6a49d609 100644 --- a/modules/coo-logging-ui-plugin-install.adoc +++ b/modules/coo-logging-ui-plugin-install.adoc @@ -14,7 +14,7 @@ .Procedure -. In the {product-title} web console, click *Operators* -> *Installed Operators* and select {coo-full}. +. In the {product-title} web console, click *Ecosystem* -> *Installed Operators* and select {coo-full}. . Choose the *UI Plugin* tab (at the far right of the tab list) and click *Create UIPlugin*. . Select *YAML view*, enter the following content, and then click *Create*: + diff --git a/modules/coo-monitoring-ui-plugin-install.adoc b/modules/coo-monitoring-ui-plugin-install.adoc index 8ab8cd1d71..5a3a183883 100644 --- a/modules/coo-monitoring-ui-plugin-install.adoc +++ b/modules/coo-monitoring-ui-plugin-install.adoc @@ -16,7 +16,7 @@ The monitoring UI plugin adds monitoring related UI features to the OpenShift we .Procedure -. In the {product-title} web console, click *Operators* -> *Installed Operators* and select {coo-full} +. In the {product-title} web console, click *Ecosystem* -> *Installed Operators* and select {coo-full} . Choose the *UI Plugin* tab (at the far right of the tab list) and press *Create UIPlugin* . Select *YAML view*, enter the following content, and then press *Create*: + @@ -39,4 +39,4 @@ spec: enabled: true ---- <1> Enable {rh-rhacm} features. You must configure the Alertmanager and ThanosQuerier Service endpoints. -<2> Enable incident detection features. \ No newline at end of file +<2> Enable incident detection features. diff --git a/modules/coo-troubleshooting-ui-plugin-install.adoc b/modules/coo-troubleshooting-ui-plugin-install.adoc index 0d379738b5..4904bd92b1 100644 --- a/modules/coo-troubleshooting-ui-plugin-install.adoc +++ b/modules/coo-troubleshooting-ui-plugin-install.adoc @@ -12,7 +12,7 @@ * You have installed the {coo-full} .Procedure -. In the {product-title} web console, click *Operators* -> *Installed Operators* and select {coo-full} +. In the {product-title} web console, click *Ecosystem* -> *Installed Operators* and select {coo-full} . Choose the *UI Plugin* tab (at the far right of the tab list) and press *Create UIPlugin* . Select *YAML view*, enter the following content, and then press *Create*: + @@ -24,4 +24,4 @@ metadata: name: troubleshooting-panel spec: type: TroubleshootingPanel ----- \ No newline at end of file +---- diff --git a/modules/create-lokistack-cr-console.adoc b/modules/create-lokistack-cr-console.adoc index cbccb37ff6..cb0d2d6b4a 100644 --- a/modules/create-lokistack-cr-console.adoc +++ b/modules/create-lokistack-cr-console.adoc @@ -16,7 +16,7 @@ You can create a `LokiStack` custom resource (CR) by using the {product-title} w .Procedure -. Go to the *Operators* -> *Installed Operators* page. Click the *All instances* tab. +. Go to the *Ecosystem* -> *Installed Operators* page. Click the *All instances* tab. . From the *Create new* drop-down list, select *LokiStack*. diff --git a/modules/creating-nfd-cr-web-console.adoc b/modules/creating-nfd-cr-web-console.adoc index 6352d186ed..8a757f3591 100644 --- a/modules/creating-nfd-cr-web-console.adoc +++ b/modules/creating-nfd-cr-web-console.adoc @@ -16,7 +16,7 @@ As a cluster administrator, you can create a `NodeFeatureDiscovery` CR by using .Procedure -. Navigate to the *Operators* -> *Installed Operators* page. +. Navigate to the *Ecosystem* -> *Installed Operators* page. . In the *Node Feature Discovery* section, under *Provided APIs*, click *Create instance*. . Edit the values of the `NodeFeatureDiscovery` CR. . Click *Create*. diff --git a/modules/das-operator-installing-web-console.adoc b/modules/das-operator-installing-web-console.adoc index 8af6c61070..c417630e79 100644 --- a/modules/das-operator-installing-web-console.adoc +++ b/modules/das-operator-installing-web-console.adoc @@ -21,7 +21,7 @@ As a cluster administrator, you can install the Dynamic Accelerator Slicer (DAS) . Configure the NVIDIA GPU Operator for MIG support: -.. In the {product-title} web console, navigate to *Operators* -> *Installed Operators*. +.. In the {product-title} web console, navigate to *Ecosystem* -> *Installed Operators*. .. Select the *NVIDIA GPU Operator* from the list of installed operators. @@ -141,7 +141,7 @@ spec: .. Wait for the NVIDIA GPU Operator cluster policy to reach the `Ready` state. You can monitor this by: + -... Navigating to *Operators* -> *Installed Operators* -> *NVIDIA GPU Operator*. +... Navigating to *Ecosystem* -> *Installed Operators* -> *NVIDIA GPU Operator*. ... Clicking the *ClusterPolicy* tab and checking that the status shows `ready`. .. Verify that all pods in the NVIDIA GPU Operator namespace are running by selecting the `nvidia-gpu-operator` namespace and navigating to *Workloads* -> *Pods*. @@ -162,7 +162,7 @@ After applying the MIG label, the labeled nodes will reboot to enable MIG mode. .. Verify that MIG mode is successfully enabled on the GPU nodes by checking that the `nvidia.com/mig.config=all-enabled` label appears in the *Labels* section. To locate the label, navigate to *Compute → Nodes*, select the GPU node, and click the *Details* tab. -. In the {product-title} web console, click *Operators* -> *OperatorHub*. +. In the {product-title} web console, click *Ecosystem* -> *Software Catalog*. . Search for *Dynamic Accelerator Slicer* or *DAS* in the filter box to locate the DAS Operator. @@ -177,7 +177,7 @@ After applying the MIG label, the labeled nodes will reboot to enable MIG mode. . Click *Install*. -. In the {product-title} web console, click *Operators* -> *Installed Operators*. +. In the {product-title} web console, click *Ecosystem* -> *Installed Operators*. . Select *DAS Operator* from the list. @@ -208,7 +208,7 @@ spec: To verify that the DAS Operator installed successfully: -. Navigate to the *Operators* -> *Installed Operators* page. +. Navigate to the *Ecosystem* -> *Installed Operators* page. . Ensure that *Dynamic Accelerator Slicer* is listed in the `das-operator` namespace with a *Status* of *Succeeded*. To verify that the `DASOperator` CR installed successfully: @@ -252,5 +252,5 @@ The `das-daemonset` pods will only appear on nodes that have MIG-compatible GPU .Troubleshooting Use the following procedure if the Operator does not appear to be installed: -. Navigate to the *Operators* -> *Installed Operators* page and inspect the *Operator Subscriptions* and *Install Plans* tabs for any failure or errors under *Status*. -. Navigate to the *Workloads* -> *Pods* page and check the logs for pods in the `das-operator` namespace. \ No newline at end of file +. Navigate to the *Ecosystem* -> *Installed Operators* page and inspect the *Operator Subscriptions* and *Install Plans* tabs for any failure or errors under *Status*. +. Navigate to the *Workloads* -> *Pods* page and check the logs for pods in the `das-operator` namespace. diff --git a/modules/das-operator-uninstalling-web-console.adoc b/modules/das-operator-uninstalling-web-console.adoc index f9d49eb835..5269e46215 100644 --- a/modules/das-operator-uninstalling-web-console.adoc +++ b/modules/das-operator-uninstalling-web-console.adoc @@ -15,7 +15,7 @@ You can uninstall the Dynamic Accelerator Slicer (DAS) Operator using the {produ .Procedure -. In the {product-title} web console, navigate to *Operators* -> *Installed Operators*. +. In the {product-title} web console, navigate to *Ecosystem* -> *Installed Operators*. . Locate the *Dynamic Accelerator Slicer* in the list of installed Operators. @@ -34,7 +34,7 @@ You can uninstall the Dynamic Accelerator Slicer (DAS) Operator using the {produ .Verification -. Navigate to the *Operators* -> *Installed Operators* page. +. Navigate to the *Ecosystem* -> *Installed Operators* page. . Verify that the Dynamic Accelerator Slicer (DAS) Operator is no longer listed. . Optional. Verify that the `das-operator` namespace and its resources have been removed by running the following command: + @@ -48,4 +48,4 @@ The command should return an error indicating that the namespace is not found. [WARNING] ==== Uninstalling the DAS Operator removes all GPU slice allocations and might cause running workloads that depend on GPU slices to fail. Ensure that no critical workloads are using GPU slices before proceeding with the uninstallation. -==== \ No newline at end of file +==== diff --git a/modules/dedicated-cluster-install-deploy.adoc b/modules/dedicated-cluster-install-deploy.adoc index f10bdd3eb6..740ad83f55 100644 --- a/modules/dedicated-cluster-install-deploy.adoc +++ b/modules/dedicated-cluster-install-deploy.adoc @@ -29,18 +29,18 @@ production deployments. .Procedure -. Install the OpenShift Elasticsearch Operator from the OperatorHub: +. Install the OpenShift Elasticsearch Operator from the software catalog: -.. In the {product-title} web console, click *Operators* -> *OperatorHub*. +.. In the {product-title} web console, click *Ecosystem* -> *Software Catalog*. .. Choose *OpenShift Elasticsearch Operator* from the list of available Operators, and click *Install*. .. On the *Install Operator* page, under *A specific namespace on the cluster* select *openshift-logging*. Then, click *Install*. -. Install the Red Hat OpenShift Logging Operator from the OperatorHub: +. Install the Red Hat OpenShift Logging Operator from the software catalog: -.. In the {product-title} web console, click *Operators* -> *OperatorHub*. +.. In the {product-title} web console, click *Ecosystem* -> *Software Catalog*. .. Choose *Red Hat OpenShift Logging* from the list of available Operators, and click *Install*. @@ -49,7 +49,7 @@ Then, click *Install*. . Verify the operator installations: -.. Switch to the *Operators* → *Installed Operators* page. +.. Switch to the *Ecosystem* -> *Installed Operators* page. .. Ensure that *Red Hat OpenShift Logging* and *OpenShift Elasticsearch* Operators are listed in the *openshift-logging* project with a *Status* of *InstallSucceeded*. @@ -62,14 +62,14 @@ you can safely ignore the *Failed* message. + If either operator does not appear as installed, to troubleshoot further: + -* Switch to the *Operators* → *Installed Operators* page and inspect +* Switch to the *Ecosystem* -> *Installed Operators* page and inspect the *Status* column for any errors or failures. * Switch to the *Workloads* → *Pods* page and check the logs in each pod in the `openshift-logging` project that is reporting issues. . Create and deploy an OpenShift Logging instance: -.. Switch to the *Operators* → *Installed Operators* page. +.. Switch to the *Ecosystem* -> *Installed Operators* page. .. Click the installed *Red Hat OpenShift Logging* Operator. diff --git a/modules/distr-tracing-config-security-ossm-web.adoc b/modules/distr-tracing-config-security-ossm-web.adoc index 1cf5ee9b57..f92912daf3 100644 --- a/modules/distr-tracing-config-security-ossm-web.adoc +++ b/modules/distr-tracing-config-security-ossm-web.adoc @@ -19,7 +19,7 @@ You can modify the Jaeger resource to configure {JaegerShortName} security for u . Log in to the {product-title} web console as a user with the `cluster-admin` role. -. Navigate to *Operators* -> *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . Click the *Project* menu and select the project where your `ServiceMeshControlPlane` resource is deployed from the list, for example `istio-system`. diff --git a/modules/distr-tracing-deploy-default.adoc b/modules/distr-tracing-deploy-default.adoc index ae5d4921ef..cde76b82d3 100644 --- a/modules/distr-tracing-deploy-default.adoc +++ b/modules/distr-tracing-deploy-default.adoc @@ -38,7 +38,7 @@ If you are installing as part of Service Mesh, the {DTShortName} resources must .. Click *Create*. -. Navigate to *Operators* -> *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . If necessary, select `tracing-system` from the *Project* menu. You may have to wait a few moments for the Operators to be copied to the new project. diff --git a/modules/distr-tracing-deploy-production-es.adoc b/modules/distr-tracing-deploy-production-es.adoc index edceee6e8b..fb9213de1d 100644 --- a/modules/distr-tracing-deploy-production-es.adoc +++ b/modules/distr-tracing-deploy-production-es.adoc @@ -34,7 +34,7 @@ If you are installing as part of Service Mesh, the {DTShortName} resources must .. Click *Create*. -. Navigate to *Operators* -> *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . If necessary, select `tracing-system` from the *Project* menu. You may have to wait a few moments for the Operators to be copied to the new project. diff --git a/modules/distr-tracing-deploy-streaming.adoc b/modules/distr-tracing-deploy-streaming.adoc index 800c8b8a36..3c64d90b7d 100644 --- a/modules/distr-tracing-deploy-streaming.adoc +++ b/modules/distr-tracing-deploy-streaming.adoc @@ -46,7 +46,7 @@ If you are installing as part of Service Mesh, the {DTShortName} resources must .. Click *Create*. -. Navigate to *Operators* -> *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . If necessary, select `tracing-system` from the *Project* menu. You may have to wait a few moments for the Operators to be copied to the new project. diff --git a/modules/distr-tracing-install-elasticsearch.adoc b/modules/distr-tracing-install-elasticsearch.adoc index 9fcaa6afa6..cb32bdb94d 100644 --- a/modules/distr-tracing-install-elasticsearch.adoc +++ b/modules/distr-tracing-install-elasticsearch.adoc @@ -26,7 +26,7 @@ If you have already installed the {es-op} as part of OpenShift Logging, you do n . Log in to the {product-title} web console as a user with the `cluster-admin` role. If you use {product-dedicated}, you must have an account with the `dedicated-admin` role. -. Navigate to *Operators* -> *OperatorHub*. +. Navigate to *Ecosystem* -> *Software Catalog*. . Type *Elasticsearch* into the filter box to locate the {es-op}. diff --git a/modules/distr-tracing-install-jaeger-operator.adoc b/modules/distr-tracing-install-jaeger-operator.adoc index 869d51425f..ad9eb3549b 100644 --- a/modules/distr-tracing-install-jaeger-operator.adoc +++ b/modules/distr-tracing-install-jaeger-operator.adoc @@ -6,7 +6,7 @@ [id="distr-tracing-jaeger-operator-install_{context}"] = Installing the {JaegerOperator} Operator -You can install the {JaegerOperator} Operator through the link:https://operatorhub.io/[*OperatorHub*]. +You can install the {JaegerOperator} Operator through the software catalog. By default, the Operator is installed in the `openshift-operators` project. @@ -19,7 +19,7 @@ By default, the Operator is installed in the `openshift-operators` project. . Log in to the {product-title} web console as a user with the `cluster-admin` role. If you use {product-dedicated}, you must have an account with the `dedicated-admin` role. -. Navigate to *Operators* -> *OperatorHub*. +. Navigate to *Ecosystem* -> *Software Catalog*. . Search for the {JaegerOperator} Operator by entering *distributed tracing platform* in the search field. @@ -43,6 +43,6 @@ If you select *Manual* updates, the OLM creates an update request when a new ver . Click *Install*. -. Navigate to *Operators* -> *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . On the *Installed Operators* page, select the `openshift-operators` project. Wait for the *Succeeded* status of the {JaegerOperator} Operator before continuing. diff --git a/modules/distr-tracing-install-overview.adoc b/modules/distr-tracing-install-overview.adoc index 1143133f9c..abf4c863d7 100644 --- a/modules/distr-tracing-install-overview.adoc +++ b/modules/distr-tracing-install-overview.adoc @@ -10,9 +10,9 @@ The steps for installing {DTProductName} are as follows: * Review the documentation and determine your deployment strategy. -* If your deployment strategy requires persistent storage, install the {es-op} via the OperatorHub. +* If your deployment strategy requires persistent storage, install the {es-op} via the software catalog. -* Install the {JaegerName} Operator via the OperatorHub. +* Install the {JaegerName} Operator via the software catalog. * Modify the custom resource YAML file to support your deployment strategy. diff --git a/modules/distr-tracing-removing-instance.adoc b/modules/distr-tracing-removing-instance.adoc index a93f0f419b..9f2c55da10 100644 --- a/modules/distr-tracing-removing-instance.adoc +++ b/modules/distr-tracing-removing-instance.adoc @@ -21,7 +21,7 @@ When deleting an instance that uses in-memory storage, all data is irretrievably . Log in to the {product-title} web console. -. Navigate to *Operators* -> *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . Select the name of the project where the Operators are installed from the *Project* menu, for example, `openshift-operators`. diff --git a/modules/distr-tracing-tempo-install-tempomonolithic-web-console.adoc b/modules/distr-tracing-tempo-install-tempomonolithic-web-console.adoc index 738a3f2a07..dba9299665 100644 --- a/modules/distr-tracing-tempo-install-tempomonolithic-web-console.adoc +++ b/modules/distr-tracing-tempo-install-tempomonolithic-web-console.adoc @@ -45,7 +45,7 @@ include::snippets/distr-tracing-tempo-secret-example.adoc[] You can create multiple `TempoMonolithic` instances in separate projects on the same cluster. ==== -.. Go to *Operators* -> *Installed Operators*. +.. Go to *Ecosystem* -> *Installed Operators*. .. Select *TempoMonolithic* -> *Create TempoMonolithic* -> *YAML view*. @@ -61,7 +61,7 @@ include::snippets/distr-tracing-tempo-tempomonolithic-custom-resource.adoc[] . Use the *Project:* dropdown list to select the project of the `TempoMonolithic` instance. -. Go to *Operators* -> *Installed Operators* to verify that the *Status* of the `TempoMonolithic` instance is *Condition: Ready*. +. Go to *Ecosystem* -> *Installed Operators* to verify that the *Status* of the `TempoMonolithic` instance is *Condition: Ready*. . Go to *Workloads* -> *Pods* to verify that the pod of the `TempoMonolithic` instance is running. diff --git a/modules/distr-tracing-tempo-install-tempostack-web-console.adoc b/modules/distr-tracing-tempo-install-tempostack-web-console.adoc index 490a22c1de..0f602a921e 100644 --- a/modules/distr-tracing-tempo-install-tempostack-web-console.adoc +++ b/modules/distr-tracing-tempo-install-tempostack-web-console.adoc @@ -40,7 +40,7 @@ include::snippets/distr-tracing-tempo-secret-example.adoc[] You can create multiple `TempoStack` instances in separate projects on the same cluster. ==== -.. Go to *Operators* -> *Installed Operators*. +.. Go to *Ecosystem* -> *Installed Operators*. .. Select *TempoStack* -> *Create TempoStack* -> *YAML view*. @@ -57,7 +57,7 @@ include::snippets/distr-tracing-tempo-tempostack-custom-resource.adoc[] . Use the *Project:* dropdown list to select the project of the `TempoStack` instance. -. Go to *Operators* -> *Installed Operators* to verify that the *Status* of the `TempoStack` instance is *Condition: Ready*. +. Go to *Ecosystem* -> *Installed Operators* to verify that the *Status* of the `TempoStack` instance is *Condition: Ready*. . Go to *Workloads* -> *Pods* to verify that all the component pods of the `TempoStack` instance are running. diff --git a/modules/distr-tracing-tempo-install-web-console.adoc b/modules/distr-tracing-tempo-install-web-console.adoc index 52180f570b..5711332b5b 100644 --- a/modules/distr-tracing-tempo-install-web-console.adoc +++ b/modules/distr-tracing-tempo-install-web-console.adoc @@ -23,7 +23,7 @@ Object storage is required and not included with the {TempoShortName}. You must .Procedure -. Go to *Operators* -> *OperatorHub* and search for `{TempoOperator}`. +. Go to *Ecosystem* -> *Software Catalog* and search for `{TempoOperator}`. . Select the *{TempoOperator}* that is *provided by Red Hat*. + diff --git a/modules/distr-tracing-tempo-remove-web-console.adoc b/modules/distr-tracing-tempo-remove-web-console.adoc index fc9058a8dd..3303c4de0d 100644 --- a/modules/distr-tracing-tempo-remove-web-console.adoc +++ b/modules/distr-tracing-tempo-remove-web-console.adoc @@ -16,7 +16,7 @@ You can remove a TempoStack instance in the *Administrator* view of the web cons .Procedure -. Go to *Operators* -> *Installed Operators* -> *{TempoOperator}* -> *TempoStack*. +. Go to *Ecosystem* -> *Installed Operators* -> *{TempoOperator}* -> *TempoStack*. . To remove the TempoStack instance, select {kebab} -> *Delete TempoStack* -> *Delete*. diff --git a/modules/eco-node-health-check-operator-installation-web-console.adoc b/modules/eco-node-health-check-operator-installation-web-console.adoc index 849a9f553c..d756621d9e 100644 --- a/modules/eco-node-health-check-operator-installation-web-console.adoc +++ b/modules/eco-node-health-check-operator-installation-web-console.adoc @@ -14,7 +14,7 @@ You can use the {product-title} web console to install the Node Health Check Ope .Procedure -. In the {product-title} web console, navigate to *Operators* -> *OperatorHub*. +. In the {product-title} web console, navigate to *Ecosystem* -> *Software Catalog*. . Search for the Node Health Check Operator, then click *Install*. . Keep the default selection of *Installation mode* and *namespace* to ensure that the Operator will be installed to the `openshift-operators` namespace. . Ensure that the *Console plug-in* is set to `Enable`. @@ -24,10 +24,10 @@ You can use the {product-title} web console to install the Node Health Check Ope To confirm that the installation is successful: -. Navigate to the *Operators* -> *Installed Operators* page. +. Navigate to the *Ecosystem* -> *Installed Operators* page. . Check that the Operator is installed in the `openshift-operators` namespace and that its status is `Succeeded`. If the Operator is not installed successfully: -. Navigate to the *Operators* -> *Installed Operators* page and inspect the `Status` column for any errors or failures. +. Navigate to the *Ecosystem* -> *Installed Operators* page and inspect the `Status` column for any errors or failures. . Navigate to the *Workloads* -> *Pods* page and check the logs in any pods in the `openshift-operators` project that are reporting issues. diff --git a/modules/eco-node-maintenance-operator-installation-web-console.adoc b/modules/eco-node-maintenance-operator-installation-web-console.adoc index 666f7672b0..2ae47f0c5f 100644 --- a/modules/eco-node-maintenance-operator-installation-web-console.adoc +++ b/modules/eco-node-maintenance-operator-installation-web-console.adoc @@ -14,7 +14,7 @@ You can use the {product-title} web console to install the Node Maintenance Oper .Procedure -. In the {product-title} web console, navigate to *Operators* -> *OperatorHub*. +. In the {product-title} web console, navigate to *Ecosystem* -> *Software Catalog*. . Search for the Node Maintenance Operator, then click *Install*. . Keep the default selection of *Installation mode* and *namespace* to ensure that the Operator will be installed to the `openshift-operators` namespace. . Click *Install*. @@ -23,11 +23,11 @@ You can use the {product-title} web console to install the Node Maintenance Oper To confirm that the installation is successful: -. Navigate to the *Operators* -> *Installed Operators* page. +. Navigate to the *Ecosystem* -> *Installed Operators* page. . Check that the Operator is installed in the `openshift-operators` namespace and that its status is `Succeeded`. If the Operator is not installed successfully: -. Navigate to the *Operators* -> *Installed Operators* page and inspect the `Status` column for any errors or failures. -. Navigate to the *Operators* -> *Installed Operators* -> *Node Maintenance Operator* -> *Details* page, and inspect the `Conditions` section for errors before pod creation. +. Navigate to the *Ecosystem* -> *Installed Operators* page and inspect the `Status` column for any errors or failures. +. Navigate to the *Ecosystem* -> *Installed Operators* -> *Node Maintenance Operator* -> *Details* page, and inspect the `Conditions` section for errors before pod creation. . Navigate to the *Workloads* -> *Pods* page, search for the `Node Maintenance Operator` pod in the installed namespace, and check the logs in the `Logs` tab. diff --git a/modules/eco-resuming-node-maintenance-cr-web-console.adoc b/modules/eco-resuming-node-maintenance-cr-web-console.adoc index a50be7b130..95ff6b47b9 100644 --- a/modules/eco-resuming-node-maintenance-cr-web-console.adoc +++ b/modules/eco-resuming-node-maintenance-cr-web-console.adoc @@ -11,11 +11,11 @@ To resume a node from maintenance mode, you can delete a `NodeMaintenance` custo .Prerequisites * Log in as a user with `cluster-admin` privileges. -* Install the Node Maintenance Operator from the *OperatorHub*. +* Install the Node Maintenance Operator from the software catalog. .Procedure -. From the *Administrator* perspective in the web console, navigate to *Operators* → *Installed Operators*. +. From the *Administrator* perspective in the web console, navigate to *Ecosystem* -> *Installed Operators*. . Select the Node Maintenance Operator from the list of Operators. diff --git a/modules/eco-self-node-remediation-operator-installation-web-console.adoc b/modules/eco-self-node-remediation-operator-installation-web-console.adoc index b44562724b..c4f95c2bd8 100644 --- a/modules/eco-self-node-remediation-operator-installation-web-console.adoc +++ b/modules/eco-self-node-remediation-operator-installation-web-console.adoc @@ -19,7 +19,7 @@ The Node Health Check Operator also installs the Self Node Remediation Operator .Procedure -. In the {product-title} web console, navigate to *Operators* -> *OperatorHub*. +. In the {product-title} web console, to *Ecosystem* -> *Software Catalog*. . Search for the Self Node Remediation Operator from the list of available Operators, and then click *Install*. . Keep the default selection of *Installation mode* and *namespace* to ensure that the Operator is installed to the `openshift-operators` namespace. . Click *Install*. @@ -28,10 +28,10 @@ The Node Health Check Operator also installs the Self Node Remediation Operator To confirm that the installation is successful: -. Navigate to the *Operators* -> *Installed Operators* page. +. Navigate to the *Ecosystem* -> *Installed Operators* page. . Check that the Operator is installed in the `openshift-operators` namespace and its status is `Succeeded`. If the Operator is not installed successfully: -. Navigate to the *Operators* -> *Installed Operators* page and inspect the `Status` column for any errors or failures. +. Navigate to the *Ecosystem* -> *Installed Operators* page and inspect the `Status` column for any errors or failures. . Navigate to the *Workloads* -> *Pods* page and check the logs in any pods in the `self-node-remediation-controller-manager` project that are reporting issues. diff --git a/modules/eco-setting-node-maintenance-cr-web-console.adoc b/modules/eco-setting-node-maintenance-cr-web-console.adoc index 2e2a5489a3..26b7aa95d4 100644 --- a/modules/eco-setting-node-maintenance-cr-web-console.adoc +++ b/modules/eco-setting-node-maintenance-cr-web-console.adoc @@ -11,11 +11,11 @@ To set a node to maintenance mode, you can create a `NodeMaintenance` custom res .Prerequisites * Log in as a user with `cluster-admin` privileges. -* Install the Node Maintenance Operator from the *OperatorHub*. +* Install the Node Maintenance Operator from the software catalog. .Procedure -. From the *Administrator* perspective in the web console, navigate to *Operators* → *Installed Operators*. +. From the *Administrator* perspective in the web console, navigate to *Ecosystem* -> *Installed Operators*. . Select the Node Maintenance Operator from the list of Operators. diff --git a/modules/enabling-log-console-plugin.adoc b/modules/enabling-log-console-plugin.adoc index 2f684ea001..8317c8769f 100644 --- a/modules/enabling-log-console-plugin.adoc +++ b/modules/enabling-log-console-plugin.adoc @@ -16,7 +16,7 @@ You can enable the {log-plug} as part of the {clo} installation, but you can als .Procedure -. In the {product-title} web console *Administrator* perspective, navigate to *Operators* -> *Installed Operators*. +. In the {product-title} web console *Administrator* perspective, navigate to *Ecosystem* -> *Installed Operators*. . Click *Red Hat OpenShift Logging*. This takes you to the Operator *Details* page. . In the *Details* page, click *Disabled* for the *Console plugin* option. . In the *Console plugin enablement* dialog, select *Enable*. diff --git a/modules/external-secrets-operator-install-console.adoc b/modules/external-secrets-operator-install-console.adoc index 1286cb38c7..a3e5e1d601 100644 --- a/modules/external-secrets-operator-install-console.adoc +++ b/modules/external-secrets-operator-install-console.adoc @@ -17,7 +17,7 @@ You can use the web console to install the {external-secrets-operator}. . Log in to the {product-title} web console. -. Navigate to *Operators* -> *OperatorHub*. +. Navigate to *Ecosystem* -> *Software Catalog*. . Enter *{external-secrets-operator-short}* in the search box. @@ -51,6 +51,6 @@ You can use the web console to install the {external-secrets-operator}. .Verification -. Navigate to *Operators* -> *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . Verify that *{external-secrets-operator-short}* is listed with a *Status* of *Succeeded* in the `external-secrets-operator` namespace. diff --git a/modules/external-secrets-operator-uninstall-console.adoc b/modules/external-secrets-operator-uninstall-console.adoc index 0cbecf2e58..fc785edf17 100644 --- a/modules/external-secrets-operator-uninstall-console.adoc +++ b/modules/external-secrets-operator-uninstall-console.adoc @@ -20,7 +20,7 @@ You can uninstall the {external-secrets-operator} by using the web console. . Uninstall the {external-secrets-operator} using the following steps: -.. Navigate to *Operators* -> *Installed Operators*. +.. Navigate to *Ecosystem* -> *Installed Operators*. .. Click the Options menu {kebab} next to the *{external-secrets-operator}* entry and click *Uninstall Operator*. diff --git a/modules/file-integrity-operator-installing-web-console.adoc b/modules/file-integrity-operator-installing-web-console.adoc index 60ddb01d33..119d7b95f5 100644 --- a/modules/file-integrity-operator-installing-web-console.adoc +++ b/modules/file-integrity-operator-installing-web-console.adoc @@ -12,7 +12,7 @@ .Procedure -. In the {product-title} web console, navigate to *Operators* -> *OperatorHub*. +. In the {product-title} web console, navigate to *Ecosystem* -> *Software Catalog*. . Search for the File Integrity Operator, then click *Install*. . Keep the default selection of *Installation mode* and *namespace* to ensure that the Operator will be installed to the `openshift-file-integrity` namespace. . Click *Install*. @@ -21,10 +21,10 @@ To confirm that the installation is successful: -. Navigate to the *Operators* -> *Installed Operators* page. +. Navigate to the *Ecosystem* -> *Installed Operators* page. . Check that the Operator is installed in the `openshift-file-integrity` namespace and its status is `Succeeded`. If the Operator is not installed successfully: -. Navigate to the *Operators* -> *Installed Operators* page and inspect the `Status` column for any errors or failures. +. Navigate to the *Ecosystem* -> *Installed Operators* page and inspect the `Status` column for any errors or failures. . Navigate to the *Workloads* -> *Pods* page and check the logs in any pods in the `openshift-file-integrity` project that are reporting issues. diff --git a/modules/fio-uninstall-console.adoc b/modules/fio-uninstall-console.adoc index c3d3103da4..fd3f68d233 100644 --- a/modules/fio-uninstall-console.adoc +++ b/modules/fio-uninstall-console.adoc @@ -15,13 +15,13 @@ To remove the File Integrity Operator, you must first delete the `FileIntegrity` .Procedure -. Navigate to the *Operators* -> *Installed Operators* -> *File Integrity Operator* page. +. Navigate to the *Ecosystem* -> *Installed Operators* -> *File Integrity Operator* page. . From the *File Integrity* tab, ensure the *Show operands in: All namespaces* default option is selected to list all `FileIntegrity` objects in all namespaces. . Click the Options menu {kebab} and then click *Delete FileIntegrity* to delete a `FileIntegrity` object. Ensure all `FileIntegrity` objects are deleted. -. Go to the *Administration* -> *Operators* -> *Installed Operators* page. +. Go to the *Administration* -> *Ecosystem* -> *Installed Operators* page. . Click the Options menu {kebab} on the *File Integrity Operator* entry and select *Uninstall Operator*. diff --git a/modules/gitops-argo-cd-installation.adoc b/modules/gitops-argo-cd-installation.adoc index 63f6ecc840..42bc306032 100644 --- a/modules/gitops-argo-cd-installation.adoc +++ b/modules/gitops-argo-cd-installation.adoc @@ -11,7 +11,7 @@ To manage cluster configurations or deploy applications, you can install and dep .Procedure . Log in to the {product-title} web console. -. Click *Operators* -> *Installed Operators*. +. Click *Ecosystem* -> *Installed Operators*. . Create or select the project where you want to install the Argo CD instance from the *Project* drop-down menu. @@ -23,4 +23,4 @@ To manage cluster configurations or deploy applications, you can install and dep .. Create an external OS Route to access Argo CD server. Click *Server* -> *Route* and check *Enabled*. -. To open the Argo CD web UI, click the route by navigating to **Networking -> Routes -> -server** in the project where the Argo CD instance is installed. \ No newline at end of file +. To open the Argo CD web UI, click the route by navigating to **Networking -> Routes -> -server** in the project where the Argo CD instance is installed. diff --git a/modules/gitops-creating-rolloutmanager-custom-resource.adoc b/modules/gitops-creating-rolloutmanager-custom-resource.adoc index e8f615d0e1..0bffcdc0d2 100644 --- a/modules/gitops-creating-rolloutmanager-custom-resource.adoc +++ b/modules/gitops-creating-rolloutmanager-custom-resource.adoc @@ -16,7 +16,7 @@ To manage progressive delivery of deployments by using Argo Rollouts in {gitops- . Log in to the {product-title} web console as a cluster administrator. -. In the *Administrator* perspective, click *Operators* -> *Installed Operators*. +. In the *Administrator* perspective, click *Ecosystem* -> *Installed Operators*. . Create or select the project where you want to create and configure a `RolloutManager` custom resource (CR) from the *Project* drop-down menu. diff --git a/modules/gitops-deleting-rolloutmanager-custom-resource.adoc b/modules/gitops-deleting-rolloutmanager-custom-resource.adoc index 9fcf1b1d29..5ad0ae6108 100644 --- a/modules/gitops-deleting-rolloutmanager-custom-resource.adoc +++ b/modules/gitops-deleting-rolloutmanager-custom-resource.adoc @@ -17,7 +17,7 @@ Uninstalling the {gitops-title} Operator does not remove the resources that were . Log in to the {product-title} web console as a cluster administrator. -. In the *Administrator* perspective, click *Operators* -> *Installed Operators*. +. In the *Administrator* perspective, click *Ecosystem* -> *Installed Operators*. . Click the *Project* drop-down menu and select the project that contains the `RolloutManager` CR. diff --git a/modules/gitops-release-notes-1-3-2.adoc b/modules/gitops-release-notes-1-3-2.adoc index 835b5a4a92..905ee77323 100644 --- a/modules/gitops-release-notes-1-3-2.adoc +++ b/modules/gitops-release-notes-1-3-2.adoc @@ -21,7 +21,7 @@ In addition to the fixes and stability improvements, the following sections high The following issues have been resolved in the current release: -* Previously, in the OperatorHub UI under the *Infrastructure Features* section, when you filtered by `Disconnected` the {gitops-title} Operator did not show in the search results, as the Operator did not have the related annotation set in its CSV file. With this update, the `Disconnected Cluster` annotation has been added to the {gitops-title} Operator as an infrastructure feature. link:https://issues.redhat.com/browse/GITOPS-1539[GITOPS-1539] +* Previously, in the software catalog UI under the *Infrastructure Features* section, when you filtered by `Disconnected` the {gitops-title} Operator did not show in the search results, as the Operator did not have the related annotation set in its CSV file. With this update, the `Disconnected Cluster` annotation has been added to the {gitops-title} Operator as an infrastructure feature. link:https://issues.redhat.com/browse/GITOPS-1539[GITOPS-1539] * When using an `Namespace-scoped` Argo CD instance, for example, an Argo CD instance that is not scoped to *All Namepsaces* in a cluster, {gitops-title} dynamically maintains a list of managed namespaces. These namespaces include the `argocd.argoproj.io/managed-by` label. This list of namespaces is stored in a cache in *Argo CD -> Settings -> Clusters -> "in-cluster" -> NAMESPACES*. Before this update, if you deleted one of these namespaces, the Operator ignored that, and the namespace remained in the list. This behavior broke the *CONNECTION STATE* in that cluster configuration, and all sync attempts resulted in errors. For example: + @@ -34,4 +34,4 @@ This bug is fixed. link:https://issues.redhat.com/browse/GITOPS-1521[GITOPS-1521 * With this update, the {gitops-title} Operator has been annotated with the *Deep Insights* capability level. link:https://issues.redhat.com/browse/GITOPS-1519[GITOPS-1519] -* Previously, the Argo CD Operator managed the `resource.exclusion` field by itself but ignored the `resource.inclusion` field. This prevented the `resource.inclusion` field configured in the `Argo CD` CR to generate in the `argocd-cm` configuration map. This bug is fixed. link:https://issues.redhat.com/browse/GITOPS-1518[GITOPS-1518] \ No newline at end of file +* Previously, the Argo CD Operator managed the `resource.exclusion` field by itself but ignored the `resource.inclusion` field. This prevented the `resource.inclusion` field configured in the `Argo CD` CR to generate in the `argocd-cm` configuration map. This bug is fixed. link:https://issues.redhat.com/browse/GITOPS-1518[GITOPS-1518] diff --git a/modules/go-health-monitoring.adoc b/modules/go-health-monitoring.adoc index 950d54cafa..70bb5aabf1 100644 --- a/modules/go-health-monitoring.adoc +++ b/modules/go-health-monitoring.adoc @@ -6,7 +6,7 @@ The {gitops-title} Operator will install the GitOps backend service in the `open .Prerequisites -* The {gitops-title} Operator is installed from *OperatorHub*. +* The {gitops-title} Operator is installed from the software catalog. * Ensure that your applications are synchronized by Argo CD. .Procedure @@ -21,4 +21,4 @@ The {gitops-title} Operator will install the GitOps backend service in the `open ** A broken heart indicates that resource issues have degraded the application's performance. ** A yellow yield sign indicates that resource issues have delayed data about the application's health. -. To view the deployment history of an application, click the *Deployment History* tab. The page includes details such as the *Last deployment*, *Description* (commit message), *Environment*, *Author*, and *Revision*. \ No newline at end of file +. To view the deployment history of an application, click the *Deployment History* tab. The page includes details such as the *Last deployment*, *Description* (commit message), *Environment*, *Author*, and *Revision*. diff --git a/modules/go-uninstalling-gitops-operator.adoc b/modules/go-uninstalling-gitops-operator.adoc index 5a5bb6f4a3..2c16517db8 100644 --- a/modules/go-uninstalling-gitops-operator.adoc +++ b/modules/go-uninstalling-gitops-operator.adoc @@ -8,7 +8,7 @@ .Procedure -. From the *Operators* -> *OperatorHub* page, use the *Filter by keyword* box to search for `{gitops-title} Operator` tile. +. From the *Ecosystem* -> *Software Catalog* page, use the *Filter by keyword* box to search for `{gitops-title} Operator` tile. . Click the *Red Hat OpenShift GitOps Operator* tile. The Operator tile indicates it is installed. diff --git a/modules/hcp-aws-prereqs.adoc b/modules/hcp-aws-prereqs.adoc index e861acd086..40c7a51438 100644 --- a/modules/hcp-aws-prereqs.adoc +++ b/modules/hcp-aws-prereqs.adoc @@ -8,7 +8,7 @@ You must have the following prerequisites to configure the management cluster: -* You have installed the {mce} 2.5 and later on an {product-title} cluster. The {mce-short} is automatically installed when you install {rh-rhacm-first}. The {mce-short} can also be installed without {rh-rhacm} as an Operator from the {product-title} OperatorHub. +* You have installed the {mce} 2.5 and later on an {product-title} cluster. The {mce-short} is automatically installed when you install {rh-rhacm-first}. The {mce-short} can also be installed without {rh-rhacm} as an Operator from the {product-title} software catalog. * You have at least one managed {product-title} cluster for the {mce-short}. The `local-cluster` is automatically imported in the {mce-short} version 2.5 and later. You can check the status of your hub cluster by running the following command: + diff --git a/modules/hcp-bm-prereqs.adoc b/modules/hcp-bm-prereqs.adoc index b26c24c13c..9f756c0ada 100644 --- a/modules/hcp-bm-prereqs.adoc +++ b/modules/hcp-bm-prereqs.adoc @@ -6,7 +6,7 @@ [id="hcp-bm-prereqs_{context}"] = Prerequisites to configure a management cluster -* You need the {mce} 2.2 and later installed on an {product-title} cluster. You can install {mce-short} as an Operator from the {product-title} OperatorHub. +* You need the {mce} 2.2 and later installed on an {product-title} cluster. You can install {mce-short} as an Operator from the {product-title} software catalog. * The {mce-short} must have at least one managed {product-title} cluster. The `local-cluster` is automatically imported in {mce-short} 2.2 and later. For more information about the `local-cluster`, see _Advanced configuration_ in the Red{nbsp}Hat Advanced Cluster Management documentation. You can check the status of your hub cluster by running the following command: + diff --git a/modules/hcp-cli-console.adoc b/modules/hcp-cli-console.adoc index d3ddbec7ff..d3f8e99991 100644 --- a/modules/hcp-cli-console.adoc +++ b/modules/hcp-cli-console.adoc @@ -9,7 +9,7 @@ You can install the {hcp} command-line interface (CLI), `hcp`, by using the {pro .Prerequisites -* On an {product-title} cluster, you have installed {mce} 2.5 or later. The {mce-short} is automatically installed when you install Red{nbsp}Hat Advanced Cluster Management. You can also install {mce-short} without Red{nbsp}Hat Advanced Management as an Operator from {product-title} OperatorHub. +* On an {product-title} cluster, you have installed {mce} 2.5 or later. The {mce-short} is automatically installed when you install Red{nbsp}Hat Advanced Cluster Management. You can also install {mce-short} without Red{nbsp}Hat Advanced Management as an Operator from the {product-title} software catalog. .Procedure diff --git a/modules/hcp-cli-gateway.adoc b/modules/hcp-cli-gateway.adoc index 1e8eeac20a..2fd9bd4448 100644 --- a/modules/hcp-cli-gateway.adoc +++ b/modules/hcp-cli-gateway.adoc @@ -9,7 +9,7 @@ You can install the {hcp} command-line interface (CLI), `hcp`, by using the cont .Prerequisites -* On an {product-title} cluster, you have installed {mce} 2.5 or later. The {mce-short} is automatically installed when you install Red{nbsp}Hat Advanced Cluster Management. You can also install {mce-short} without Red{nbsp}Hat Advanced Management as an Operator from {product-title} OperatorHub. +* On an {product-title} cluster, you have installed {mce} 2.5 or later. The {mce-short} is automatically installed when you install Red{nbsp}Hat Advanced Cluster Management. You can also install {mce-short} without Red{nbsp}Hat Advanced Management as an Operator from the {product-title} software catalog. .Procedure diff --git a/modules/hcp-cli-terminal.adoc b/modules/hcp-cli-terminal.adoc index 84ccfd7e74..75fedaaa55 100644 --- a/modules/hcp-cli-terminal.adoc +++ b/modules/hcp-cli-terminal.adoc @@ -9,7 +9,7 @@ You can install the {hcp} command-line interface (CLI), `hcp`, from the terminal .Prerequisites -* On an {product-title} cluster, you have installed {mce} 2.5 or later. The {mce-short} is automatically installed when you install Red{nbsp}Hat Advanced Cluster Management. You can also install {mce-short} without Red{nbsp}Hat Advanced Management as an Operator from {product-title} OperatorHub. +* On an {product-title} cluster, you have installed {mce} 2.5 or later. The {mce-short} is automatically installed when you install Red{nbsp}Hat Advanced Cluster Management. You can also install {mce-short} without Red{nbsp}Hat Advanced Management as an Operator from the {product-title} software catalog. .Procedure diff --git a/modules/hcp-dc-mgmt-cluster.adoc b/modules/hcp-dc-mgmt-cluster.adoc index 3f81e3fb4b..4e2bd73b54 100644 --- a/modules/hcp-dc-mgmt-cluster.adoc +++ b/modules/hcp-dc-mgmt-cluster.adoc @@ -24,10 +24,10 @@ To set up an {product-title} management cluster, you need to ensure that the {mc .Procedure -. Install {mce-short} 2.4 or later on an {product-title} cluster. You can install {mce-short} as an Operator from the {product-title} OperatorHub. The HyperShift Operator is included with {mce-short}. For more information about installing {mce-short}, see "Installing and upgrading multicluster engine operator" in the Red{nbsp}Hat Advanced Cluster Management documentation. +. Install {mce-short} 2.4 or later on an {product-title} cluster. You can install {mce-short} as an Operator from the {product-title} software catalog. The HyperShift Operator is included with {mce-short}. For more information about installing {mce-short}, see "Installing and upgrading multicluster engine operator" in the Red{nbsp}Hat Advanced Cluster Management documentation. . Ensure that the HyperShift Operator is installed. The HyperShift Operator is automatically included with {mce-short}, but if you need to manually install it, follow the steps in "Manually enabling the hypershift-addon managed cluster add-on for local-cluster". .Next steps -Next, configure the web server. \ No newline at end of file +Next, configure the web server. diff --git a/modules/hcp-get-upgrade-versions.adoc b/modules/hcp-get-upgrade-versions.adoc index ee9f0b6a8e..b0207e9807 100644 --- a/modules/hcp-get-upgrade-versions.adoc +++ b/modules/hcp-get-upgrade-versions.adoc @@ -17,7 +17,7 @@ To keep your hosted cluster fully operational during an update process, the cont [id="hcp-mce-hub-cluster_{context}"] == The {mce-short} hub management cluster -The {mce} requires a specific {product-title} version for the management cluster to remain in a supported state. You can install the {mce-short} from OperatorHub in the {product-title} web console. +The {mce} requires a specific {product-title} version for the management cluster to remain in a supported state. You can install the {mce-short} from the software catalog in the {product-title} web console. See the following support matrices for the {mce-short} versions: diff --git a/modules/hcp-ibm-power-prereqs.adoc b/modules/hcp-ibm-power-prereqs.adoc index 4083594466..a6f3f8c464 100644 --- a/modules/hcp-ibm-power-prereqs.adoc +++ b/modules/hcp-ibm-power-prereqs.adoc @@ -6,7 +6,7 @@ [id="hcp-ibm-power-prereqs_{context}"] = Prerequisites to configure {hcp} on {ibm-power-title} -* The {mce} version 2.7 and later installed on an {product-title} cluster. The {mce-short} is automatically installed when you install {rh-rhacm-first}. You can also install the {mce-short} without {rh-rhacm} as an Operator from the {product-title} OperatorHub. +* The {mce} version 2.7 and later installed on an {product-title} cluster. The {mce-short} is automatically installed when you install {rh-rhacm-first}. You can also install the {mce-short} without {rh-rhacm} as an Operator from the {product-title} software catalog. * The {mce-short} must have at least one managed {product-title} cluster. The `local-cluster` managed hub cluster is automatically imported in the {mce-short} version 2.7 and later. For more information about `local-cluster`, see _Advanced configuration_ in the {rh-rhacm} documentation. You can check the status of your hub cluster by running the following command: + diff --git a/modules/hcp-ibm-z-prereqs.adoc b/modules/hcp-ibm-z-prereqs.adoc index 841c3ad602..fa21f0bad0 100644 --- a/modules/hcp-ibm-z-prereqs.adoc +++ b/modules/hcp-ibm-z-prereqs.adoc @@ -6,7 +6,7 @@ [id="hcp-ibm-z-prereqs_{context}"] = Prerequisites to configure {hcp} on {ibm-z-title} -* The {mce} version 2.5 or later must be installed on an {product-title} cluster. You can install {mce-short} as an Operator from the {product-title} OperatorHub. +* The {mce} version 2.5 or later must be installed on an {product-title} cluster. You can install {mce-short} as an Operator from the {product-title} software catalog. * The {mce-short} must have at least one managed {product-title} cluster. The `local-cluster` is automatically imported in {mce-short} 2.5 and later. For more information about the `local-cluster`, see _Advanced configuration_ in the Red{nbsp}Hat Advanced Cluster Management documentation. You can check the status of your hub cluster by running the following command: + @@ -19,4 +19,4 @@ $ oc get managedclusters local-cluster * You need to enable the central infrastructure management service. For more information, see _Enabling the central infrastructure management service_. -* You need to install the hosted control plane command-line interface. For more information, see _Installing the hosted control plane command-line interface_. \ No newline at end of file +* You need to install the hosted control plane command-line interface. For more information, see _Installing the hosted control plane command-line interface_. diff --git a/modules/hcp-non-bm-prereqs.adoc b/modules/hcp-non-bm-prereqs.adoc index b99570020a..1f8d22d9e6 100644 --- a/modules/hcp-non-bm-prereqs.adoc +++ b/modules/hcp-non-bm-prereqs.adoc @@ -8,7 +8,7 @@ Before you deploy {hcp} on non-bare-metal agent machines, ensure you meet the following prerequisites: -* You must have {mce} 2.5 or later installed on an {product-title} cluster. You can install the {mce-short} as an Operator from the {product-title} OperatorHub. +* You must have {mce} 2.5 or later installed on an {product-title} cluster. You can install the {mce-short} as an Operator from the {product-title} software catalog. * You must have at least one managed {product-title} cluster for the {mce-short}. The `local-cluster` management cluster is automatically imported. For more information about the `local-cluster`, see link:https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_management_for_kubernetes/2.14/html/clusters/cluster_mce_overview#advanced-config-engine[Advanced configuration] in the {rh-rhacm-title} documentation. You can check the status of your management cluster by running the following command: + @@ -23,4 +23,4 @@ $ oc get managedclusters local-cluster * Your hosted cluster has a cluster-wide unique name. -* You are running the management cluster and workers on the same infrastructure. \ No newline at end of file +* You are running the management cluster and workers on the same infrastructure. diff --git a/modules/hosted-control-planes-concepts-personas.adoc b/modules/hosted-control-planes-concepts-personas.adoc index da9798d2c6..8a2cfe8081 100644 --- a/modules/hosted-control-planes-concepts-personas.adoc +++ b/modules/hosted-control-planes-concepts-personas.adoc @@ -36,7 +36,7 @@ node pool:: A resource that manages a set of compute nodes that are associated w cluster instance administrator:: Users who assume this role are the equivalent of administrators in standalone {product-title}. This user has the `cluster-admin` role in the provisioned cluster, but might not have power over when or how the cluster is updated or configured. This user might have read-only access to see some configuration projected into the cluster. -cluster instance user:: Users who assume this role are the equivalent of developers in standalone {product-title}. This user does not have a view into OperatorHub or machines. +cluster instance user:: Users who assume this role are the equivalent of developers in standalone {product-title}. This user does not have a view into the software catalog or machines. cluster service consumer:: Users who assume this role can request control planes and worker nodes, drive updates, or modify externalized configurations. Typically, this user does not manage or access cloud credentials or infrastructure encryption keys. The cluster service consumer persona can request hosted clusters and interact with node pools. Users who assume this role have RBAC to create, read, update, or delete hosted clusters and node pools within a logical boundary. diff --git a/modules/hosted-control-planes-version-support.adoc b/modules/hosted-control-planes-version-support.adoc index c1ac9a5bc9..4727f757f7 100644 --- a/modules/hosted-control-planes-version-support.adoc +++ b/modules/hosted-control-planes-version-support.adoc @@ -19,7 +19,7 @@ The {hcp} feature includes the following components, which might require indepen [id="hcp-versioning-mgmt_{context}"] == Management cluster -In management clusters for production use, you need {mce}, which is available through OperatorHub. The {mce-short} bundles a supported build of the HyperShift Operator. For your management clusters to remain supported, you must use the version of {product-title} that {mce-short} runs on. In general, a new release of {mce-short} runs on the following versions of {product-title}: +In management clusters for production use, you need {mce}, which is available through the software catalog. The {mce-short} bundles a supported build of the HyperShift Operator. For your management clusters to remain supported, you must use the version of {product-title} that {mce-short} runs on. In general, a new release of {mce-short} runs on the following versions of {product-title}: * The latest General Availability version of {product-title} * Two versions before the latest General Availability version of {product-title} diff --git a/modules/ibi-install-lcao-console.adoc b/modules/ibi-install-lcao-console.adoc index 0eac8afb3f..2b956a6562 100644 --- a/modules/ibi-install-lcao-console.adoc +++ b/modules/ibi-install-lcao-console.adoc @@ -14,7 +14,7 @@ You can use the {product-title} web console to install the {lcao} from the 4.15 .Procedure -. In the {product-title} web console, navigate to *Operators* → *OperatorHub*. +. In the {product-title} web console, navigate to *Ecosystem* -> *Software Catalog*. . Search for the *{lcao}* from the list of available Operators, and then click *Install*. . On the *Install Operator* page, under *A specific namespace on the cluster* select *openshift-lifecycle-agent*. Then, click Install. . Click *Install*. @@ -23,7 +23,7 @@ You can use the {product-title} web console to install the {lcao} from the 4.15 To confirm that the installation is successful: -. Navigate to the *Operators* → *Installed Operators* page. +. Navigate to the *Ecosystem* -> *Installed Operators* page. . Ensure that the {lcao} is listed in the *openshift-lifecycle-agent* project with a *Status* of *InstallSucceeded*. [NOTE] @@ -33,5 +33,5 @@ During installation an Operator might display a *Failed* status. If the installa If the Operator is not installed successfully: -. Go to the *Operators* → *Installed Operators* page and inspect the *Operator Subscriptions* and *Install Plans* tabs for any failure or errors under *Status*. -. Go to the *Workloads* → *Pods* page and check the logs for pods in the *openshift-lifecycle-agent* project. \ No newline at end of file +. Go to the *Ecosystem* -> *Installed Operators* page and inspect the *Operator Subscriptions* and *Install Plans* tabs for any failure or errors under *Status*. +. Go to the *Workloads* → *Pods* page and check the logs for pods in the *openshift-lifecycle-agent* project. diff --git a/modules/installing-aws-load-balancer-operator.adoc b/modules/installing-aws-load-balancer-operator.adoc index 1ffdc33033..99e7d42fae 100644 --- a/modules/installing-aws-load-balancer-operator.adoc +++ b/modules/installing-aws-load-balancer-operator.adoc @@ -18,7 +18,7 @@ You can install the AWS Load Balancer Operator by using the web console. .Procedure -. Navigate to *Operators* → *OperatorHub* in the {product-title} web console. +. Navigate to *Ecosystem* -> *Software Catalog* in the {product-title} web console. . Select the *AWS Load Balancer Operator*. You can use the *Filter by keyword* text box or use the filter list to search for the AWS Load Balancer Operator from the list of Operators. . Select the `aws-load-balancer-operator` namespace. . On the *Install Operator* page, select the following options: @@ -30,4 +30,4 @@ You can install the AWS Load Balancer Operator by using the web console. .Verification -* Verify that the AWS Load Balancer Operator shows the *Status* as *Succeeded* on the Installed Operators dashboard. \ No newline at end of file +* Verify that the AWS Load Balancer Operator shows the *Status* as *Succeeded* on the Installed Operators dashboard. diff --git a/modules/installing-gitops-operator-in-web-console.adoc b/modules/installing-gitops-operator-in-web-console.adoc index ccf482d82e..099fd68de4 100644 --- a/modules/installing-gitops-operator-in-web-console.adoc +++ b/modules/installing-gitops-operator-in-web-console.adoc @@ -8,7 +8,7 @@ .Procedure -. Open the *Administrator* perspective of the web console and navigate to *Operators* → *OperatorHub* in the menu on the left. +. Open the *Administrator* perspective of the web console and navigate to *Ecosystem* -> *Software Catalog* in the menu on the left. . Search for `OpenShift GitOps`, click the *{gitops-title}* tile, and then click *Install*. + diff --git a/modules/installing-gitops-operator-using-cli.adoc b/modules/installing-gitops-operator-using-cli.adoc index 5187cd6560..afbcd33c97 100644 --- a/modules/installing-gitops-operator-using-cli.adoc +++ b/modules/installing-gitops-operator-using-cli.adoc @@ -7,7 +7,7 @@ = Installing {gitops-title} Operator using CLI [role="_abstract"] -You can install {gitops-title} Operator from the OperatorHub using the CLI. +You can install {gitops-title} Operator from the software catalog using the CLI. .Procedure @@ -31,7 +31,7 @@ spec: <1> Specify the channel name from where you want to subscribe the Operator. <2> Specify the name of the Operator to subscribe to. <3> Specify the name of the CatalogSource that provides the Operator. -<4> The namespace of the CatalogSource. Use `openshift-marketplace` for the default OperatorHub CatalogSources. +<4> The namespace of the CatalogSource. Use `openshift-marketplace` for the default software catalog CatalogSources. + . Apply the `Subscription` to the cluster: + @@ -57,4 +57,4 @@ openshift-gitops-applicationset-controller-6447b8dfdd-5ckgh 1/1 Running 0 openshift-gitops-redis-74bd8d7d96-49bjf 1/1 Running 0 65m openshift-gitops-repo-server-c999f75d5-l4rsg 1/1 Running 0 65m openshift-gitops-server-5785f7668b-wj57t 1/1 Running 0 53m ----- \ No newline at end of file +---- diff --git a/modules/installing-oadp-aws-sts.adoc b/modules/installing-oadp-aws-sts.adoc index bd9247aefc..fa74e5f52c 100644 --- a/modules/installing-oadp-aws-sts.adoc +++ b/modules/installing-oadp-aws-sts.adoc @@ -67,13 +67,13 @@ $ oc -n openshift-adp create secret generic cloud-credentials \ ==== In {product-title} versions 4.14 and later, the OADP Operator supports a new standardized {sts-short} workflow through the Operator Lifecycle Manager (OLM) and Cloud Credentials Operator (CCO). In this workflow, you do not need to create the above -secret, you only need to supply the role ARN during the installation of OLM-managed operators using the {product-title} web console, for more information see _Installing from OperatorHub using the web console_. +secret, you only need to supply the role ARN during the installation of OLM-managed operators using the {product-title} web console, for more information see _Installing from the software catalog using the web console_. The preceding secret is created automatically by CCO. ==== . Install the OADP Operator: -.. In the {product-title} web console, browse to *Operators* -> *OperatorHub*. +.. In the {product-title} web console, browse to *Ecosystem* -> *Software Catalog*. .. Search for the *OADP Operator*. .. In the *role_ARN* field, paste the role_arn that you created previously and click *Install*. diff --git a/modules/installing-oadp-rosa-sts.adoc b/modules/installing-oadp-rosa-sts.adoc index 4add72887c..7cbf3f74bb 100644 --- a/modules/installing-oadp-rosa-sts.adoc +++ b/modules/installing-oadp-rosa-sts.adoc @@ -78,13 +78,13 @@ $ oc -n openshift-adp create secret generic cloud-credentials \ + [NOTE] ==== -In {product-title} versions 4.15 and later, the OADP Operator supports a new standardized {sts-short} workflow through the Operator Lifecycle Manager (OLM) and Cloud Credentials Operator (CCO). In this workflow, you do not need to create the above secret, you only need to supply the role ARN during the installation of OLM-managed operators using the {product-title} web console, for more information see _Installing from OperatorHub using the web console_. +In {product-title} versions 4.15 and later, the OADP Operator supports a new standardized {sts-short} workflow through the Operator Lifecycle Manager (OLM) and Cloud Credentials Operator (CCO). In this workflow, you do not need to create the above secret, you only need to supply the role ARN during the installation of OLM-managed operators using the {product-title} web console, for more information see _Installing from software catalog using the web console_. The preceding secret is created automatically by CCO. ==== . Install the OADP Operator: -.. In the {product-title} web console, browse to *Operators* -> *OperatorHub*. +.. In the {product-title} web console, browse to *Ecosystem* -> *Software Catalog*. .. Search for the *OADP Operator*. .. In the *role_ARN* field, paste the role_arn that you created previously and click *Install*. diff --git a/modules/installing-operator-oadp.adoc b/modules/installing-operator-oadp.adoc index eeb9013703..68ed877509 100644 --- a/modules/installing-operator-oadp.adoc +++ b/modules/installing-operator-oadp.adoc @@ -10,8 +10,8 @@ Use the following procedure to install the {oadp-short} Operator. .Procedure -. In the {product-title} web console, click *Operators* -> *OperatorHub*. +. In the {product-title} web console, click *Ecosystem* -> *Software Catalog*. . Use the *Filter by keyword* field to find the *OADP Operator*. . Select the *OADP Operator* and click *Install*. . Click *Install* to install the Operator in the `openshift-adp` project. -. Click *Operators* -> *Installed Operators* to verify the installation. \ No newline at end of file +. Click *Ecosystem* -> *Installed Operators* to verify the installation. diff --git a/modules/installing-wmco-using-cli.adoc b/modules/installing-wmco-using-cli.adoc index fdcbdb097f..a2910898b1 100644 --- a/modules/installing-wmco-using-cli.adoc +++ b/modules/installing-wmco-using-cli.adoc @@ -96,7 +96,7 @@ spec: <1> Specify `stable` as the channel. <2> Set an approval strategy. You can set `Automatic` or `Manual`. <3> Specify the `redhat-operators` catalog source, which contains the `windows-machine-config-operator` package manifests. If your {product-title} is installed on a restricted network, also known as a disconnected cluster, specify the name of the `CatalogSource` object you created when you configured the Operator LifeCycle Manager (OLM). -<4> Namespace of the catalog source. Use `openshift-marketplace` for the default OperatorHub catalog sources. +<4> Namespace of the catalog source. Use `openshift-marketplace` for the default software catalog sources. .. Create the subscription: + diff --git a/modules/installing-wmco-using-web-console.adoc b/modules/installing-wmco-using-web-console.adoc index 4c44cfb6c0..e3a499d319 100644 --- a/modules/installing-wmco-using-web-console.adoc +++ b/modules/installing-wmco-using-web-console.adoc @@ -15,7 +15,7 @@ Dual NIC is not supported on WMCO-managed Windows instances. .Procedure -. From the *Administrator* perspective in the {product-title} web console, navigate to the *Operators -> OperatorHub* page. +. From the *Administrator* perspective in the {product-title} web console, navigate to the *Ecosystem* -> *Software Catalog* page. . Use the *Filter by keyword* box to search for `Windows Machine Config Operator` in the catalog. Click the *Windows Machine Config Operator* tile. diff --git a/modules/k8s-nmstate-installing-the-kubernetes-nmstate-operator.adoc b/modules/k8s-nmstate-installing-the-kubernetes-nmstate-operator.adoc index ba630d6686..ac419d5c1c 100644 --- a/modules/k8s-nmstate-installing-the-kubernetes-nmstate-operator.adoc +++ b/modules/k8s-nmstate-installing-the-kubernetes-nmstate-operator.adoc @@ -15,7 +15,7 @@ You can install the Kubernetes NMState Operator by using the web console. After .Procedure -. Select *Operators* -> *OperatorHub*. +. Select *Ecosystem* -> *Software Catalog*. . In the search field below *All Items*, enter `nmstate` and click *Enter* to search for the Kubernetes NMState Operator. diff --git a/modules/kmm-hub-running-kmm-on-the-spoke.adoc b/modules/kmm-hub-running-kmm-on-the-spoke.adoc index 5f8836cdd4..f68cc2cffb 100644 --- a/modules/kmm-hub-running-kmm-on-the-spoke.adoc +++ b/modules/kmm-hub-running-kmm-on-the-spoke.adoc @@ -8,7 +8,7 @@ After installing Kernel Module Management (KMM) on the spoke, no further action is required. Create a `ManagedClusterModule` object from the hub to deploy kernel modules on spoke clusters. -You can install KMM on the spokes cluster through a RHACM `Policy` object. In addition to installing KMM from the OperatorHub and running it in a lightweight spoke mode, the `Policy` configures additional RBAC required for the RHACM agent to be able to manage `Module` resources. +You can install KMM on the spokes cluster through a RHACM `Policy` object. In addition to installing KMM from the software catalog and running it in a lightweight spoke mode, the `Policy` configures additional RBAC required for the RHACM agent to be able to manage `Module` resources. .Procedure diff --git a/modules/kmm-installing-using-web-console.adoc b/modules/kmm-installing-using-web-console.adoc index c9730b7868..8464ccdfd9 100644 --- a/modules/kmm-installing-using-web-console.adoc +++ b/modules/kmm-installing-using-web-console.adoc @@ -12,7 +12,7 @@ As a cluster administrator, you can install the Kernel Module Management (KMM) O . Log in to the {product-title} web console. . Install the Kernel Module Management Operator: -.. In the {product-title} web console, click *Operators* -> *OperatorHub*. +.. In the {product-title} web console, click *Ecosystem* -> *Software Catalog*. .. Select *Kernel Module Management Operator* from the list of available Operators, and then click *Install*. @@ -24,7 +24,7 @@ As a cluster administrator, you can install the Kernel Module Management (KMM) O To verify that KMM Operator installed successfully: -. Navigate to the *Operators* -> *Installed Operators* page. +. Navigate to the *Ecosystem* -> *Installed Operators* page. . Ensure that *Kernel Module Management Operator* is listed in the *openshift-kmm* project with a *Status* of *InstallSucceeded*. + [NOTE] @@ -35,5 +35,5 @@ During installation, an Operator might display a *Failed* status. If the install .Troubleshooting . To troubleshoot issues with Operator installation: + -.. Navigate to the *Operators* -> *Installed Operators* page and inspect the *Operator Subscriptions* and *Install Plans* tabs for any failure or errors under *Status*. +.. Navigate to the *Ecosystem* -> *Installed Operators* page and inspect the *Operator Subscriptions* and *Install Plans* tabs for any failure or errors under *Status*. .. Navigate to the *Workloads* -> *Pods* page and check the logs for pods in the `openshift-kmm` project. diff --git a/modules/log6x-quickstart-opentelemetry.adoc b/modules/log6x-quickstart-opentelemetry.adoc index 460aeff72f..cffe95d761 100644 --- a/modules/log6x-quickstart-opentelemetry.adoc +++ b/modules/log6x-quickstart-opentelemetry.adoc @@ -18,7 +18,7 @@ To configure OTLP ingestion and enable the OpenTelemetry data model, follow thes .Procedure -. Install the `{clo}`, `{loki-op}`, and `{coo-first}` from OperatorHub. +. Install the `{clo}`, `{loki-op}`, and `{coo-first}` from the software catalog. . Create a `LokiStack` custom resource (CR) in the `openshift-logging` namespace: + diff --git a/modules/log6x-quickstart-viaq.adoc b/modules/log6x-quickstart-viaq.adoc index 4195b211e0..a641a28c69 100644 --- a/modules/log6x-quickstart-viaq.adoc +++ b/modules/log6x-quickstart-viaq.adoc @@ -15,7 +15,7 @@ To use the default ViaQ data model, follow these steps: .Procedure -. Install the `{clo}`, `{loki-op}`, and `{coo-first}` from OperatorHub. +. Install the `{clo}`, `{loki-op}`, and `{coo-first}` from the software catalog. . Create a `LokiStack` custom resource (CR) in the `openshift-logging` namespace: + diff --git a/modules/logging-create-loki-cr-console.adoc b/modules/logging-create-loki-cr-console.adoc index c7ec21a2a4..bf9f85916a 100644 --- a/modules/logging-create-loki-cr-console.adoc +++ b/modules/logging-create-loki-cr-console.adoc @@ -16,7 +16,7 @@ You can create a `LokiStack` custom resource (CR) by using the {product-title} w .Procedure -. Go to the *Operators* -> *Installed Operators* page. Click the *All instances* tab. +. Go to the *Ecosystem* -> *Installed Operators* page. Click the *All instances* tab. . From the *Create new* drop-down list, select *LokiStack*. diff --git a/modules/logging-es-deploy-console.adoc b/modules/logging-es-deploy-console.adoc index 8529008fd7..1da27fe652 100644 --- a/modules/logging-es-deploy-console.adoc +++ b/modules/logging-es-deploy-console.adoc @@ -35,7 +35,7 @@ To install the OpenShift Elasticsearch Operator and Red Hat OpenShift Logging Op . Install the OpenShift Elasticsearch Operator: -.. In the {product-title} web console, click *Operators* -> *OperatorHub*. +.. In the {product-title} web console, click *Ecosystem* -> *Software Catalog*. .. Choose *OpenShift Elasticsearch Operator* from the list of available Operators, and click *Install*. @@ -66,13 +66,13 @@ include::snippets/logging-stable-updates-snip.adoc[] .. Click *Install*. -.. Verify that the OpenShift Elasticsearch Operator installed by switching to the *Operators* → *Installed Operators* page. +.. Verify that the OpenShift Elasticsearch Operator installed by switching to the *Ecosystem* -> *Installed Operators* page. .. Ensure that *OpenShift Elasticsearch Operator* is listed in all projects with a *Status* of *Succeeded*. . Install the Red Hat OpenShift Logging Operator: -.. In the {product-title} web console, click *Operators* -> *OperatorHub*. +.. In the {product-title} web console, click *Ecosystem* -> *Software Catalog*. .. Choose *Red Hat OpenShift Logging* from the list of available Operators, and click *Install*. @@ -96,13 +96,13 @@ scrapes the `openshift-logging` namespace. .. Click *Install*. -.. Verify that the Red Hat OpenShift Logging Operator installed by switching to the *Operators* → *Installed Operators* page. +.. Verify that the Red Hat OpenShift Logging Operator installed by switching to the *Ecosystem* -> *Installed Operators* page. .. Ensure that *Red Hat OpenShift Logging* is listed in the *openshift-logging* project with a *Status* of *Succeeded*. + If the Operator does not appear as installed, to troubleshoot further: + -* Switch to the *Operators* → *Installed Operators* page and inspect +* Switch to the *Ecosystem* -> *Installed Operators* page and inspect the *Status* column for any errors or failures. * Switch to the *Workloads* → *Pods* page and check the logs in any pods in the `openshift-logging` project that are reporting issues. diff --git a/modules/logging-in-to-the-argo-cd-instance-by-using-the-argo-cd-admin-account.adoc b/modules/logging-in-to-the-argo-cd-instance-by-using-the-argo-cd-admin-account.adoc index 0371238031..2078c01909 100644 --- a/modules/logging-in-to-the-argo-cd-instance-by-using-the-argo-cd-admin-account.adoc +++ b/modules/logging-in-to-the-argo-cd-instance-by-using-the-argo-cd-admin-account.adoc @@ -15,7 +15,7 @@ .Procedure -. In the *Administrator* perspective of the web console, navigate to *Operators* -> *Installed Operators* to verify that the {gitops-title} Operator is installed. +. In the *Administrator* perspective of the web console, navigate to *Ecosystem* -> *Installed Operators* to verify that the {gitops-title} Operator is installed. . Navigate to the {rh-app-icon} menu -> *OpenShift GitOps* -> *Cluster Argo CD*. The login page of the Argo CD UI is displayed in a new window. . Optional: To log in with your {product-title} credentials, ensure you are a user of the `cluster-admins` group and then select the `LOG IN VIA OPENSHIFT` option in the Argo CD user interface. + diff --git a/modules/logging-install-es-operator.adoc b/modules/logging-install-es-operator.adoc index 53b9aa54d4..7246b320b7 100644 --- a/modules/logging-install-es-operator.adoc +++ b/modules/logging-install-es-operator.adoc @@ -26,7 +26,7 @@ If you use a local volume for persistent storage, do not use a raw block volume, .Procedure -. In the {product-title} web console, click *Operators* -> *OperatorHub*. +. In the {product-title} web console, click *Ecosystem* -> *Software Catalog*. . Click *OpenShift Elasticsearch Operator* from the list of available Operators, and click *Install*. . Ensure that the *All namespaces on the cluster* is selected under *Installation mode*. . Ensure that *openshift-operators-redhat* is selected under *Installed Namespace*. @@ -48,5 +48,5 @@ This option sets the `openshift.io/cluster-monitoring: "true"` label in the `Nam .Verification -. Verify that the OpenShift Elasticsearch Operator installed by switching to the *Operators* → *Installed Operators* page. +. Verify that the OpenShift Elasticsearch Operator installed by switching to the *Ecosystem* -> *Installed Operators* page. . Ensure that *OpenShift Elasticsearch Operator* is listed in all projects with a *Status* of *Succeeded*. diff --git a/modules/logging-loki-gui-install.adoc b/modules/logging-loki-gui-install.adoc index e723a6795e..1db5023445 100644 --- a/modules/logging-loki-gui-install.adoc +++ b/modules/logging-loki-gui-install.adoc @@ -6,7 +6,7 @@ [id="logging-loki-gui-install_{context}"] = Installing {logging-uc} and the {loki-op} using the web console -To install and configure logging on your {product-title} cluster, an Operator such as {loki-op} for log storage must be installed first. This can be done from the OperatorHub within the web console. +To install and configure logging on your {product-title} cluster, an Operator such as {loki-op} for log storage must be installed first. This can be done from the software catalog within the web console. .Prerequisites @@ -16,7 +16,7 @@ To install and configure logging on your {product-title} cluster, an Operator su .Procedure -. In the {product-title} web console *Administrator* perspective, go to *Operators* -> *OperatorHub*. +. In the {product-title} web console *Administrator* perspective, go to *Ecosystem* -> *Software Catalog*. . Type {loki-op} in the *Filter by keyword* field. Click *{loki-op}* in the list of available Operators, and then click *Install*. + @@ -43,7 +43,7 @@ If the approval strategy in the subscription is set to *Automatic*, the update p . Install the Red{nbsp}Hat OpenShift Logging Operator: -.. In the {product-title} web console, click *Operators* -> *OperatorHub*. +.. In the {product-title} web console, click *Ecosystem* -> *Software Catalog*. .. Choose *Red{nbsp}Hat OpenShift Logging* from the list of available Operators, and click *Install*. @@ -67,7 +67,7 @@ scrapes the `openshift-logging` namespace. .. Click *Install*. -. Go to the *Operators* -> *Installed Operators* page. Click the *All instances* tab. +. Go to the *Ecosystem* -> *Installed Operators* page. Click the *All instances* tab. . From the *Create new* drop-down list, select *LokiStack*. @@ -162,7 +162,7 @@ spec: .Verification -. Go to *Operators* -> *Installed Operators*. +. Go to *Ecosystem* -> *Installed Operators*. . Make sure the *openshift-logging* project is selected. . In the *Status* column, verify that you see green checkmarks with *InstallSucceeded* and the text *Up to date*. diff --git a/modules/logging-upgrading-clo.adoc b/modules/logging-upgrading-clo.adoc index f8ee1f5a4d..1145ca310f 100644 --- a/modules/logging-upgrading-clo.adoc +++ b/modules/logging-upgrading-clo.adoc @@ -16,7 +16,7 @@ To update the {clo} to a new major release version, you must modify the update c .Procedure -. Navigate to *Operators* -> *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . Select the *openshift-logging* project. @@ -26,8 +26,8 @@ To update the {clo} to a new major release version, you must modify the update c . In the *Change Subscription Update Channel* window, select the latest major version update channel, *stable-5.9*, and click *Save*. Note the `cluster-logging.v5.9.` version. -. Wait for a few seconds, and then go to *Operators* -> *Installed Operators* to verify that the {clo} version matches the latest `cluster-logging.v5.9.` version. +. Wait for a few seconds, and then go to *Ecosystem* -> *Installed Operators* to verify that the {clo} version matches the latest `cluster-logging.v5.9.` version. -. On the *Operators* -> *Installed Operators* page, wait for the *Status* field to report *Succeeded*. +. On the *Ecosystem* -> *Installed Operators* page, wait for the *Status* field to report *Succeeded*. . Check if the `LokiStack` custom resource contains the `v13` schema version and add it if it is missing. For correctly adding the `v13` schema version, see "Upgrading the LokiStack storage schema". diff --git a/modules/logging-upgrading-loki.adoc b/modules/logging-upgrading-loki.adoc index af27f00ccc..62f30fb382 100644 --- a/modules/logging-upgrading-loki.adoc +++ b/modules/logging-upgrading-loki.adoc @@ -16,7 +16,7 @@ To update the {loki-op} to a new major release version, you must modify the upda .Procedure -. Navigate to *Operators* -> *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . Select the *openshift-operators-redhat* project. @@ -26,8 +26,8 @@ To update the {loki-op} to a new major release version, you must modify the upda . In the *Change Subscription Update Channel* window, select the latest major version update channel, *stable-5.y*, and click *Save*. Note the `loki-operator.v5.y.z` version. -. Wait for a few seconds, then click *Operators* -> *Installed Operators*. Verify that the {loki-op} version matches the latest `loki-operator.v5.y.z` version. +. Wait for a few seconds, then click *Ecosystem* -> *Installed Operators*. Verify that the {loki-op} version matches the latest `loki-operator.v5.y.z` version. -. On the *Operators* -> *Installed Operators* page, wait for the *Status* field to report *Succeeded*. +. On the *Ecosystem* -> *Installed Operators* page, wait for the *Status* field to report *Succeeded*. . Check if the `LokiStack` custom resource contains the `v13` schema version and add it if it is missing. For correctly adding the `v13` schema version, see "Upgrading the LokiStack storage schema". diff --git a/modules/lvms-creating-lvms-cluster-using-web-console.adoc b/modules/lvms-creating-lvms-cluster-using-web-console.adoc index 72f3e387d0..5d22ca15f4 100644 --- a/modules/lvms-creating-lvms-cluster-using-web-console.adoc +++ b/modules/lvms-creating-lvms-cluster-using-web-console.adoc @@ -26,7 +26,7 @@ You can only create a single instance of the `LVMCluster` custom resource (CR) o .Procedure . Log in to the {product-title} web console. -. Click *Operators* -> *Installed Operators*. +. Click *Ecosystem* -> *Installed Operators*. . In the `openshift-lvm-storage` namespace, click *{lvms}*. . Click *Create LVMCluster* and select either *Form view* or *YAML view*. . Configure the required `LVMCluster` CR parameters. diff --git a/modules/lvms-deleting-lvmcluster-using-web-console.adoc b/modules/lvms-deleting-lvmcluster-using-web-console.adoc index 6807d17430..cb58e3dd1b 100644 --- a/modules/lvms-deleting-lvmcluster-using-web-console.adoc +++ b/modules/lvms-deleting-lvmcluster-using-web-console.adoc @@ -16,7 +16,7 @@ You can delete the `LVMCluster` custom resource (CR) using the {product-title} w .Procedure . Log in to the {product-title} web console. -. Click *Operators* → *Installed Operators* to view all the installed Operators. +. Click *Ecosystem* -> *Installed Operators* to view all the installed Operators. . Click *{lvms}* in the `openshift-lvm-storage` namespace. . Click the *LVMCluster* tab. . From the *Actions*, select *Delete LVMCluster*. diff --git a/modules/lvms-installing-logical-volume-manager-operator-using-openshift-web-console.adoc b/modules/lvms-installing-logical-volume-manager-operator-using-openshift-web-console.adoc index 440abde16d..b10ba5c225 100644 --- a/modules/lvms-installing-logical-volume-manager-operator-using-openshift-web-console.adoc +++ b/modules/lvms-installing-logical-volume-manager-operator-using-openshift-web-console.adoc @@ -21,8 +21,8 @@ The default namespace for the {lvms} Operator is `openshift-lvm-storage`. .Procedure . Log in to the {product-title} web console. -. Click *Operators -> OperatorHub*. -. Click *LVM Storage* on the *OperatorHub* page. +. Click *Ecosystem* -> *Software Catalog*. +. Click *LVM Storage* on the software catalog page. . Set the following options on the *Operator Installation* page: .. *Update Channel* as *stable-{product-version}*. .. *Installation Mode* as *A specific namespace on the cluster*. diff --git a/modules/lvms-scaling-storage-of-clusters-using-web-console.adoc b/modules/lvms-scaling-storage-of-clusters-using-web-console.adoc index fa01e0dcd5..4c41da4946 100644 --- a/modules/lvms-scaling-storage-of-clusters-using-web-console.adoc +++ b/modules/lvms-scaling-storage-of-clusters-using-web-console.adoc @@ -16,7 +16,7 @@ You can scale up the storage capacity of the worker nodes on a cluster by using .Procedure . Log in to the {product-title} web console. -. Click *Operators* -> *Installed Operators*. +. Click *Ecosystem* -> *Installed Operators*. . Click *{lvms}* in the `openshift-lvm-storage` namespace. . Click the *LVMCluster* tab to view the `LVMCluster` CR created on the cluster. . From the *Actions* menu, select *Edit LVMCluster*. @@ -24,4 +24,4 @@ You can scale up the storage capacity of the worker nodes on a cluster by using . Edit the `LVMCluster` CR to add the new device path in the `deviceSelector` field: + include::snippets/lvms-scaling-up-storage-lvmcluster-cr-snippet.adoc[] -. Click *Save*. \ No newline at end of file +. Click *Save*. diff --git a/modules/lvms-uninstalling-logical-volume-manager-operator-using-openshift-web-console.adoc b/modules/lvms-uninstalling-logical-volume-manager-operator-using-openshift-web-console.adoc index 42c775cdcd..e466c4967d 100644 --- a/modules/lvms-uninstalling-logical-volume-manager-operator-using-openshift-web-console.adoc +++ b/modules/lvms-uninstalling-logical-volume-manager-operator-using-openshift-web-console.adoc @@ -18,9 +18,9 @@ You can uninstall {lvms} using the {product-title} web console. .Procedure . Log in to the {product-title} web console. -. Click *Operators* → *Installed Operators*. +. Click *Ecosystem* -> *Installed Operators*. . Click *{lvms}* in the `openshift-lvm-storage` namespace. . Click the *Details* tab. . From the *Actions* menu, select *Uninstall Operator*. . Optional: When prompted, select the *Delete all operand instances for this operator* checkbox to delete the operand instances for {lvms}. -. Click *Uninstall*. \ No newline at end of file +. Click *Uninstall*. diff --git a/modules/metallb-installing-using-web-console.adoc b/modules/metallb-installing-using-web-console.adoc index 93093f0481..33ce583414 100644 --- a/modules/metallb-installing-using-web-console.adoc +++ b/modules/metallb-installing-using-web-console.adoc @@ -4,7 +4,7 @@ :_mod-docs-content-type: PROCEDURE [id="installing-the-metallb-operator-using-web-console_{context}"] -= Installing the MetalLB Operator from the OperatorHub using the web console += Installing the MetalLB Operator from the software catalog using the web console As a cluster administrator, you can install the MetalLB Operator by using the {product-title} web console. @@ -14,7 +14,7 @@ As a cluster administrator, you can install the MetalLB Operator by using the {p .Procedure -. In the {product-title} web console, navigate to *Operators* -> *OperatorHub*. +. In the {product-title} web console, navigate to *Ecosystem* -> *Software Catalog*. . Type a keyword into the *Filter by keyword* box or scroll to find the Operator you want. For example, type `metallb` to find the MetalLB Operator. + @@ -26,12 +26,12 @@ You can also filter options by *Infrastructure Features*. For example, select *D . To confirm that the installation is successful: -.. Navigate to the *Operators* -> *Installed Operators* page. +.. Navigate to the *Ecosystem* -> *Installed Operators* page. .. Check that the Operator is installed in the `openshift-operators` namespace and that its status is `Succeeded`. . If the Operator is not installed successfully, check the status of the Operator and review the logs: -.. Navigate to the *Operators* -> *Installed Operators* page and inspect the `Status` column for any errors or failures. +.. Navigate to the *Ecosystem* -> *Installed Operators* page and inspect the `Status` column for any errors or failures. .. Navigate to the *Workloads* -> *Pods* page and check the logs in any pods in the `openshift-operators` project that are reporting issues. diff --git a/modules/metering-debugging.adoc b/modules/metering-debugging.adoc index dab3a52a1e..4f55c18856 100644 --- a/modules/metering-debugging.adoc +++ b/modules/metering-debugging.adoc @@ -9,7 +9,7 @@ Debugging metering is much easier when you interact directly with the various co [NOTE] ==== -All of the commands in this section assume you have installed metering through OperatorHub in the `openshift-metering` namespace. +All of the commands in this section assume you have installed metering through the software catalog in the `openshift-metering` namespace. ==== [id="metering-get-reporting-operator-logs_{context}"] diff --git a/modules/metering-install-operator.adoc b/modules/metering-install-operator.adoc index daeee992cc..2417a72c53 100644 --- a/modules/metering-install-operator.adoc +++ b/modules/metering-install-operator.adoc @@ -40,13 +40,13 @@ metadata: <1> It is strongly recommended to deploy metering in the `openshift-metering` namespace. <2> Include this annotation before configuring specific node selectors for the operand pods. -. In the {product-title} web console, click *Operators* -> *OperatorHub*. Filter for `metering` to find the Metering Operator. +. In the {product-title} web console, click *Ecosystem* -> *Software Catalog*. Filter for `metering` to find the Metering Operator. . Click the *Metering* card, review the package description, and then click *Install*. . Select an *Update Channel*, *Installation Mode*, and *Approval Strategy*. . Click *Install*. -. Verify that the Metering Operator is installed by switching to the *Operators* -> *Installed Operators* page. The Metering Operator has a *Status* of *Succeeded* when the installation is complete. +. Verify that the Metering Operator is installed by switching to the *Ecosystem* -> *Installed Operators* page. The Metering Operator has a *Status* of *Succeeded* when the installation is complete. + [NOTE] ==== diff --git a/modules/metering-install-verify.adoc b/modules/metering-install-verify.adoc index 9b575dfa79..b5ade5db2e 100644 --- a/modules/metering-install-verify.adoc +++ b/modules/metering-install-verify.adoc @@ -11,7 +11,7 @@ You can verify the metering installation by performing any of the following chec + -- .Procedure (UI) - . Navigate to *Operators* -> *Installed Operators* in the `openshift-metering` namespace. + . Navigate to *Ecosystem* -> *Installed Operators* in the `openshift-metering` namespace. . Click *Metering Operator*. . Click *Subscription* for *Subscription Details*. . Check the *Installed Version*. diff --git a/modules/migration-error-messages.adoc b/modules/migration-error-messages.adoc index 5159b9f023..39d2ea36f5 100644 --- a/modules/migration-error-messages.adoc +++ b/modules/migration-error-messages.adoc @@ -83,7 +83,7 @@ The default value of `restic_timeout` is one hour. You can increase this paramet .Procedure -. In the {product-title} web console, navigate to *Operators* -> *Installed Operators*. +. In the {product-title} web console, navigate to *Ecosystem* -> *Installed Operators*. . Click *{mtc-full} Operator*. . In the *MigrationController* tab, click *migration-controller*. . In the *YAML* tab, update the following parameter value: diff --git a/modules/migration-installing-mtc-on-ocp-4.adoc b/modules/migration-installing-mtc-on-ocp-4.adoc index 0056c3829f..2819ce447f 100644 --- a/modules/migration-installing-mtc-on-ocp-4.adoc +++ b/modules/migration-installing-mtc-on-ocp-4.adoc @@ -20,7 +20,7 @@ endif::[] .Procedure -. In the {product-title} web console, click *Operators* -> *OperatorHub*. +. In the {product-title} web console, click *Ecosystem* -> *Software Catalog*. . Use the *Filter by keyword* field to find the *{mtc-full} Operator*. . Select the *{mtc-full} Operator* and click *Install*. . Click *Install*. diff --git a/modules/migration-upgrading-mtc-on-ocp-4.adoc b/modules/migration-upgrading-mtc-on-ocp-4.adoc index 2972390c74..2ad28e1f33 100644 --- a/modules/migration-upgrading-mtc-on-ocp-4.adoc +++ b/modules/migration-upgrading-mtc-on-ocp-4.adoc @@ -31,7 +31,7 @@ When upgrading the {mtc-short} by using the Operator Lifecycle Manager, you must .Procedure -. In the {product-title} console, navigate to *Operators* -> *Installed Operators*. +. In the {product-title} console, navigate to *Ecosystem* -> *Installed Operators*. + Operators that have a pending upgrade display an *Upgrade available* status. @@ -39,5 +39,5 @@ Operators that have a pending upgrade display an *Upgrade available* status. . Click the *Subscription* tab. Any upgrades requiring approval are displayed next to *Upgrade Status*. For example, it might display *1 requires approval*. . Click *1 requires approval*, then click *Preview Install Plan*. . Review the resources that are listed as available for upgrade and click *Approve*. -. Navigate back to the *Operators -> Installed Operators* page to monitor the progress of the upgrade. When complete, the status changes to *Succeeded* and *Up to date*. +. Navigate back to the *Ecosystem* -> *Installed Operators* page to monitor the progress of the upgrade. When complete, the status changes to *Succeeded* and *Up to date*. . Click *Workloads* -> *Pods* to verify that the {mtc-short} pods are running. diff --git a/modules/monitoring-installing-cluster-observability-operator-using-the-web-console.adoc b/modules/monitoring-installing-cluster-observability-operator-using-the-web-console.adoc index 362a575cea..8f6695599e 100644 --- a/modules/monitoring-installing-cluster-observability-operator-using-the-web-console.adoc +++ b/modules/monitoring-installing-cluster-observability-operator-using-the-web-console.adoc @@ -5,7 +5,7 @@ :_mod-docs-content-type: PROCEDURE [id="installing-the-cluster-observability-operator-in-the-web-console-_{context}"] = Installing the {coo-full} in the web console -Install the {coo-first} from OperatorHub by using the {product-title} web console. +Install the {coo-first} from the software catalog by using the {product-title} web console. .Prerequisites @@ -14,7 +14,7 @@ Install the {coo-first} from OperatorHub by using the {product-title} web consol .Procedure -. In the {product-title} web console, click *Operators* -> *OperatorHub*. +. In the {product-title} web console, click *Ecosystem* -> *Software Catalog*. . Type `cluster observability operator` in the *Filter by keyword* box. . Click *{coo-full}* in the list of results. . Read the information about the Operator, and configure the following installation settings: @@ -32,4 +32,4 @@ For example, you can select to subscribe to a different update channel, to insta .Verification -* Go to *Operators* -> *Installed Operators*, and verify that the *{coo-full}* entry appears in the list. +* Go to *Ecosystem* -> *Installed Operators*, and verify that the *{coo-full}* entry appears in the list. diff --git a/modules/monitoring-uninstalling-cluster-observability-operator-using-the-web-console.adoc b/modules/monitoring-uninstalling-cluster-observability-operator-using-the-web-console.adoc index 840fcc6ac7..c7db4eca02 100644 --- a/modules/monitoring-uninstalling-cluster-observability-operator-using-the-web-console.adoc +++ b/modules/monitoring-uninstalling-cluster-observability-operator-using-the-web-console.adoc @@ -5,7 +5,7 @@ :_mod-docs-content-type: PROCEDURE [id="uninstalling-the-cluster-observability-operator-using-the-web-console_{context}"] = Uninstalling the {coo-full} using the web console -If you have installed the {coo-first} by using OperatorHub, you can uninstall it in the {product-title} web console. +If you have installed the {coo-first} by using the software catalog, you can uninstall it in the {product-title} web console. .Prerequisites @@ -14,7 +14,7 @@ If you have installed the {coo-first} by using OperatorHub, you can uninstall it .Procedure -. Go to *Operators* -> *Installed Operators*. +. Go to *Ecosystem* -> *Installed Operators*. . Locate the *{coo-full}* entry in the list. @@ -22,4 +22,4 @@ If you have installed the {coo-first} by using OperatorHub, you can uninstall it .Verification -* Go to *Operators* -> *Installed Operators*, and verify that the *{coo-full}* entry no longer appears in the list. +* Go to *Ecosystem* -> *Installed Operators*, and verify that the *{coo-full}* entry no longer appears in the list. diff --git a/modules/multi-arch-creating-podplacment-config-using-web-console.adoc b/modules/multi-arch-creating-podplacment-config-using-web-console.adoc index c601ddc0eb..58c28d26f8 100644 --- a/modules/multi-arch-creating-podplacment-config-using-web-console.adoc +++ b/modules/multi-arch-creating-podplacment-config-using-web-console.adoc @@ -19,7 +19,7 @@ To deploy the pod placement operand that enables architecture-aware workload sch . Log in to the {product-title} web console. -. Navigate to *Operators* → *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . On the *Installed Operators* page, click *Multiarch Tuning Operator*. diff --git a/modules/multi-arch-deleting-podplacment-config-using-web-console.adoc b/modules/multi-arch-deleting-podplacment-config-using-web-console.adoc index b2a69615e4..c1e762c273 100644 --- a/modules/multi-arch-deleting-podplacment-config-using-web-console.adoc +++ b/modules/multi-arch-deleting-podplacment-config-using-web-console.adoc @@ -21,7 +21,7 @@ You can delete this object by using the {product-title} web console. . Log in to the {product-title} web console. -. Navigate to *Operators* → *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . On the *Installed Operators* page, click *Multiarch Tuning Operator*. diff --git a/modules/multi-arch-installing-using-cli.adoc b/modules/multi-arch-installing-using-cli.adoc index b9fbe4d3a3..81dacd465b 100644 --- a/modules/multi-arch-installing-using-cli.adoc +++ b/modules/multi-arch-installing-using-cli.adoc @@ -76,7 +76,7 @@ $ oc create -f <1> [NOTE] ==== -For more details about configuring the `Subscription` object and `OperatorGroup` object, see "Installing from OperatorHub by using the CLI". +For more details about configuring the `Subscription` object and `OperatorGroup` object, see "Installing from the software catalog by using the CLI". ==== .Verification diff --git a/modules/multi-arch-installing-using-web-console.adoc b/modules/multi-arch-installing-using-web-console.adoc index dc75d25936..754b897ee2 100644 --- a/modules/multi-arch-installing-using-web-console.adoc +++ b/modules/multi-arch-installing-using-web-console.adoc @@ -17,7 +17,7 @@ You can install the Multiarch Tuning Operator by using the {product-title} web c .Procedure . Log in to the {product-title} web console. -. Navigate to *Operators -> OperatorHub*. +. Navigate to *Ecosystem* -> *Software Catalog*. . Enter *Multiarch Tuning Operator* in the search field. . Click *Multiarch Tuning Operator*. . Select the *Multiarch Tuning Operator* version from the *Version* list. @@ -42,5 +42,5 @@ As a cluster administrator, you must manually approve the update request to upda .Verification -. Navigate to *Operators* → *Installed Operators*. -. Verify that the *Multiarch Tuning Operator* is listed with the *Status* field as *Succeeded* in the `openshift-multiarch-tuning-operator` namespace. \ No newline at end of file +. Navigate to *Ecosystem* -> *Installed Operators*. +. Verify that the *Multiarch Tuning Operator* is listed with the *Status* field as *Succeeded* in the `openshift-multiarch-tuning-operator` namespace. diff --git a/modules/multi-arch-uninstalling-using-web-console.adoc b/modules/multi-arch-uninstalling-using-web-console.adoc index 4f0168e9fb..9249a1f976 100644 --- a/modules/multi-arch-uninstalling-using-web-console.adoc +++ b/modules/multi-arch-uninstalling-using-web-console.adoc @@ -21,7 +21,7 @@ You must delete the `ClusterPodPlacementConfig` object before uninstalling the M .Procedure . Log in to the {product-title} web console. -. Navigate to *Operators -> OperatorHub*. +. Navigate to *Ecosystem* -> *Software Catalog*. . Enter *Multiarch Tuning Operator* in the search field. . Click *Multiarch Tuning Operator*. . Click the *Details* tab. @@ -30,5 +30,5 @@ You must delete the `ClusterPodPlacementConfig` object before uninstalling the M .Verification -. Navigate to *Operators* → *Installed Operators*. -. On the *Installed Operators* page, verify that the *Multiarch Tuning Operator* is not listed. \ No newline at end of file +. Navigate to *Ecosystem* -> *Installed Operators*. +. On the *Installed Operators* page, verify that the *Multiarch Tuning Operator* is not listed. diff --git a/modules/nbde-tang-server-operator-deploying.adoc b/modules/nbde-tang-server-operator-deploying.adoc index 877c15c110..4c5324a452 100644 --- a/modules/nbde-tang-server-operator-deploying.adoc +++ b/modules/nbde-tang-server-operator-deploying.adoc @@ -15,7 +15,7 @@ You can deploy and quickly configure one or more Tang servers using the NBDE Tan .Procedure -. In the {product-title} web console, navigate to *Operators* -> *OperatorHub*. +. In the {product-title} web console, navigate to *Ecosystem* -> *Software Catalog*. . Select *Project*, and click *Create Project*: + image::nbde-tang-server-operator-07-create-project.png[Create Project in the web console] @@ -30,7 +30,7 @@ image::nbde-tang-server-operator-11-pvc.png[PersistentVolumeClaims in the Storag . On the `Create PersistentVolumeClaim` page, select a storage that fits your deployment scenario. Consider how often you want to rotate the encryption keys. Name your PVC and choose the claimed storage capacity, for example: + image::nbde-tang-server-operator-13-create-pvc.png[Create PersistentVolumeClaims page] -. Navigate to *Operators* -> *Installed Operators*, and click *NBDE Tang Server*. +. Navigate to *Ecosystem* -> *Installed Operators*, and click *NBDE Tang Server*. . Click *Create instance*. + image::nbde-tang-server-operator-15-create-instance.png[Create NBDE Tang Server instance] diff --git a/modules/nbde-tang-server-operator-identifying-url-cli.adoc b/modules/nbde-tang-server-operator-identifying-url-cli.adoc index d48efd04a3..f6438b2bba 100644 --- a/modules/nbde-tang-server-operator-identifying-url-cli.adoc +++ b/modules/nbde-tang-server-operator-identifying-url-cli.adoc @@ -6,7 +6,7 @@ [id="identifying-url-nbde-tang-server-operator-using-cli_{context}"] = Identifying URL of the NBDE Tang Server Operator using CLI -You can identify the URLs of Tang servers deployed with the NBDE Tang Server Operator from the OperatorHub by using the CLI. After you identify the URLs, you use the `clevis luks bind` command on your clients containing LUKS-encrypted volumes that you want to unlock automatically by using keys advertised by the Tang servers. See the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/security_hardening/configuring-automated-unlocking-of-encrypted-volumes-using-policy-based-decryption_security-hardening#configuring-manual-enrollment-of-volumes-using-clevis_configuring-automated-unlocking-of-encrypted-volumes-using-policy-based-decryption[Configuring manual enrollment of LUKS-encrypted volumes] section in the RHEL 9 Security hardening document for detailed steps describing the configuration of clients with Clevis. +You can identify the URLs of Tang servers deployed with the NBDE Tang Server Operator from the software catalog by using the CLI. After you identify the URLs, you use the `clevis luks bind` command on your clients containing LUKS-encrypted volumes that you want to unlock automatically by using keys advertised by the Tang servers. See the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/security_hardening/configuring-automated-unlocking-of-encrypted-volumes-using-policy-based-decryption_security-hardening#configuring-manual-enrollment-of-volumes-using-clevis_configuring-automated-unlocking-of-encrypted-volumes-using-policy-based-decryption[Configuring manual enrollment of LUKS-encrypted volumes] section in the RHEL 9 Security hardening document for detailed steps describing the configuration of clients with Clevis. .Prerequisites diff --git a/modules/nbde-tang-server-operator-identifying-url-web-console.adoc b/modules/nbde-tang-server-operator-identifying-url-web-console.adoc index a76e135738..fc2d3f6fd1 100644 --- a/modules/nbde-tang-server-operator-identifying-url-web-console.adoc +++ b/modules/nbde-tang-server-operator-identifying-url-web-console.adoc @@ -6,7 +6,7 @@ [id="identifying-url-nbde-tang-server-operator-using-web-console_{context}"] = Identifying URL of the NBDE Tang Server Operator using the web console -You can identify the URLs of Tang servers deployed with the NBDE Tang Server Operator from the OperatorHub by using the {product-title} web console. After you identify the URLs, you use the `clevis luks bind` command on your clients containing LUKS-encrypted volumes that you want to unlock automatically by using keys advertised by the Tang servers. See the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/security_hardening/configuring-automated-unlocking-of-encrypted-volumes-using-policy-based-decryption_security-hardening#configuring-manual-enrollment-of-volumes-using-clevis_configuring-automated-unlocking-of-encrypted-volumes-using-policy-based-decryption[Configuring manual enrollment of LUKS-encrypted volumes] section in the RHEL 9 Security hardening document for detailed steps describing the configuration of clients with Clevis. +You can identify the URLs of Tang servers deployed with the NBDE Tang Server Operator from the software catalog by using the {product-title} web console. After you identify the URLs, you use the `clevis luks bind` command on your clients containing LUKS-encrypted volumes that you want to unlock automatically by using keys advertised by the Tang servers. See the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/security_hardening/configuring-automated-unlocking-of-encrypted-volumes-using-policy-based-decryption_security-hardening#configuring-manual-enrollment-of-volumes-using-clevis_configuring-automated-unlocking-of-encrypted-volumes-using-policy-based-decryption[Configuring manual enrollment of LUKS-encrypted volumes] section in the RHEL 9 Security hardening document for detailed steps describing the configuration of clients with Clevis. .Prerequisites @@ -15,7 +15,7 @@ You can identify the URLs of Tang servers deployed with the NBDE Tang Server Ope .Procedure -. In the {product-title} web console, navigate to *Operators* -> *Installed Operators* -> *Tang Server*. +. In the {product-title} web console, navigate to *Ecosystem* -> *Installed Operators* -> *Tang Server*. . On the NBDE Tang Server Operator details page, select *Tang Server*. + diff --git a/modules/nbde-tang-server-operator-installing-cli.adoc b/modules/nbde-tang-server-operator-installing-cli.adoc index 01bd729413..a32ede38e9 100644 --- a/modules/nbde-tang-server-operator-installing-cli.adoc +++ b/modules/nbde-tang-server-operator-installing-cli.adoc @@ -6,7 +6,7 @@ [id="installing-nbde-tang-server-operator-using-cli_{context}"] = Installing the NBDE Tang Server Operator using CLI -You can install the NBDE Tang Server Operator from the OperatorHub using the CLI. +You can install the NBDE Tang Server Operator from the software catalog using the CLI. .Prerequisites @@ -15,7 +15,7 @@ You can install the NBDE Tang Server Operator from the OperatorHub using the CLI .Procedure -. Use the following command to list available Operators on OperatorHub, and limit the output to Tang-related results: +. Use the following command to list available Operators in the software catalog, and limit the output to Tang-related results: + [source,terminal] ---- @@ -50,7 +50,7 @@ spec: <1> Specify the channel name from where you want to subscribe the Operator. <2> Specify the name of the Operator to subscribe to. <3> Specify the name of the CatalogSource that provides the Operator. -<4> The namespace of the CatalogSource. Use `openshift-marketplace` for the default OperatorHub CatalogSources. +<4> The namespace of the CatalogSource. Use `openshift-marketplace` for the default software catalog sources. . Apply the `Subscription` to the cluster: + diff --git a/modules/nbde-tang-server-operator-installing-web-console.adoc b/modules/nbde-tang-server-operator-installing-web-console.adoc index 473dccb60e..bac9e672e6 100644 --- a/modules/nbde-tang-server-operator-installing-web-console.adoc +++ b/modules/nbde-tang-server-operator-installing-web-console.adoc @@ -6,7 +6,7 @@ [id="installing-nbde-tang-server-operator-using-web-console_{context}"] = Installing the NBDE Tang Server Operator using the web console -You can install the NBDE Tang Server Operator from the OperatorHub using the web console. +You can install the NBDE Tang Server Operator from the software catalog using the web console. .Prerequisites @@ -14,10 +14,10 @@ You can install the NBDE Tang Server Operator from the OperatorHub using the web .Procedure -. In the {product-title} web console, navigate to *Operators* -> *OperatorHub*. +. In the {product-title} web console, navigate to *Ecosystem* -> *Software Catalog*. . Search for the NBDE Tang Server Operator: + -image::nbde-tang-server-operator-01-operatorhub.png[NBDE Tang Server Operator in OperatorHub] +image::nbde-tang-server-operator-01-operatorhub.png[NBDE Tang Server Operator in the software catalog] . Click *Install*. . On the *Operator Installation* screen, keep the *Update channel*, *Version*, *Installation mode*, *Installed Namespace*, and *Update approval* fields on the default values. . After you confirm the installation options by clicking *Install*, the console displays the installation confirmation. @@ -26,7 +26,7 @@ image::nbde-tang-server-operator-03-confirmation.png[Confirmation of a NBDE Tang .Verification -. Navigate to the *Operators* -> *Installed Operators* page. +. Navigate to the *Ecosystem* -> *Installed Operators* page. . Check that the NBDE Tang Server Operator is installed and its status is `Succeeded`. + image::nbde-tang-server-operator-05-succeeded.png[NBDE Tang Server Operator status] diff --git a/modules/network-observability-RTT.adoc b/modules/network-observability-RTT.adoc index 538e8be524..4ebd6be1ba 100644 --- a/modules/network-observability-RTT.adoc +++ b/modules/network-observability-RTT.adoc @@ -9,7 +9,7 @@ You can track RTT by editing the `FlowCollector` to the specifications in the following YAML example. .Procedure -. In the web console, navigate to *Operators* -> *Installed Operators*. +. In the web console, navigate to *Ecosystem* -> *Installed Operators*. . In the *Provided APIs* heading for the *NetObserv Operator*, select *Flow Collector*. . Select *cluster*, and then select the *YAML* tab. . Configure the `FlowCollector` custom resource for RTT tracing, for example: @@ -45,4 +45,4 @@ When you refresh the *Network Traffic* page, the *Overview*, *Traffic Flow*, and ... Remove the *Protocol* filter. ... Filter for *Flow RTT* values greater than 0 in the *Common* filters. -.. In the *Topology* view, click the Display option dropdown. Then click *RTT* in the *edge labels* drop-down list. \ No newline at end of file +.. In the *Topology* view, click the Display option dropdown. Then click *RTT* in the *edge labels* drop-down list. diff --git a/modules/network-observability-SRIOV-configuration.adoc b/modules/network-observability-SRIOV-configuration.adoc index a47021b232..47f5002855 100644 --- a/modules/network-observability-SRIOV-configuration.adoc +++ b/modules/network-observability-SRIOV-configuration.adoc @@ -13,7 +13,7 @@ In order to collect traffic from a cluster with a Single Root I/O Virtualization * The `SRIOVNetwork` custom resource (CR) `spec.ipam` configuration must be set with an IP address from the range that the interface lists or from other plugins. .Procedure -. In the web console, navigate to *Operators* -> *Installed Operators*. +. In the web console, navigate to *Ecosystem* -> *Installed Operators*. . Under the *Provided APIs* heading for the *NetObserv Operator*, select *Flow Collector*. . Select *cluster* and then select the *YAML* tab. . Configure the `FlowCollector` custom resource. A sample configuration is as follows: @@ -33,4 +33,4 @@ spec: ebpf: privileged: true <1> ---- -<1> The `spec.agent.ebpf.privileged` field value must be set to `true` to enable SR-IOV monitoring. \ No newline at end of file +<1> The `spec.agent.ebpf.privileged` field value must be set to `true` to enable SR-IOV monitoring. diff --git a/modules/network-observability-configuring-custom-metrics.adoc b/modules/network-observability-configuring-custom-metrics.adoc index 766e70857b..f59faa3fca 100644 --- a/modules/network-observability-configuring-custom-metrics.adoc +++ b/modules/network-observability-configuring-custom-metrics.adoc @@ -10,7 +10,7 @@ You can configure the `FlowMetric` API to create custom metrics by using flowlog .Procedure -. In the web console, navigate to *Operators* -> *Installed Operators*. +. In the web console, navigate to *Ecosystem* -> *Installed Operators*. . In the *Provided APIs* heading for the *NetObserv Operator*, select *FlowMetric*. . In the *Project:* dropdown list, select the project of the Network Observability Operator instance. . Click *Create FlowMetric*. diff --git a/modules/network-observability-creating-metrics-network-events.adoc b/modules/network-observability-creating-metrics-network-events.adoc index 59e0d5ce2c..f18c835974 100644 --- a/modules/network-observability-creating-metrics-network-events.adoc +++ b/modules/network-observability-creating-metrics-network-events.adoc @@ -19,7 +19,7 @@ You can create a `FlowMetric` resource to generate metrics for nested or array f * A network policy specified. .Procedure -. In the web console, navigate to *Operators* -> *Installed Operators*. +. In the web console, navigate to *Ecosystem* -> *Installed Operators*. . In the *Provided APIs* heading for the *NetObserv Operator*, select *FlowMetric*. . In the *Project* dropdown list, select the project of the Network Observability Operator instance. . Click *Create FlowMetric*. @@ -53,4 +53,4 @@ spec: .Verification . In the web console, navigate to *Observe* -> *Dashboards* and scroll down to see the *Network Policy* tab. -. You should begin seeing metrics filter in based on the metric you created along with the network policy specifications. \ No newline at end of file +. You should begin seeing metrics filter in based on the metric you created along with the network policy specifications. diff --git a/modules/network-observability-deploy-network-policy.adoc b/modules/network-observability-deploy-network-policy.adoc index f5b7e348e0..6b4b03a81f 100644 --- a/modules/network-observability-deploy-network-policy.adoc +++ b/modules/network-observability-deploy-network-policy.adoc @@ -17,7 +17,7 @@ If you have installed Loki, Kafka or any exporter in a different namespace that * If you are using Loki and including it in the policy target, connection to an external object storage (as defined in your `LokiStack` related secret) .Procedure -. In the web console, go to *Operators* -> *Installed Operators* page. +. In the web console, go to *Ecosystem* -> *Installed Operators* page. . Under the *Provided APIs* heading for *Network Observability*, select *Flow Collector*. . Select *cluster* then select the *YAML* tab. . Configure the `FlowCollector` CR. A sample configuration is as follows: @@ -38,4 +38,4 @@ spec: # ... ---- <1> By default, the `enable` value is `false`. -<2> Default values are `["openshift-console", "openshift-monitoring"]`. \ No newline at end of file +<2> Default values are `["openshift-console", "openshift-monitoring"]`. diff --git a/modules/network-observability-disabling-health-alerts.adoc b/modules/network-observability-disabling-health-alerts.adoc index 8bf2025fe0..d43661dc20 100644 --- a/modules/network-observability-disabling-health-alerts.adoc +++ b/modules/network-observability-disabling-health-alerts.adoc @@ -8,7 +8,7 @@ You can opt out of health alerting by editing the `FlowCollector` resource: -. In the web console, navigate to *Operators* -> *Installed Operators*. +. In the web console, navigate to *Ecosystem* -> *Installed Operators*. . Under the *Provided APIs* heading for the *NetObserv Operator*, select *Flow Collector*. . Select *cluster* then select the *YAML* tab. . Add `spec.processor.metrics.disableAlerts` to disable health alerts, as in the following YAML sample: @@ -24,4 +24,4 @@ spec: metrics: disableAlerts: [NetObservLokiError, NetObservNoFlows] <1> ---- -<1> You can specify one or a list with both types of alerts to disable. \ No newline at end of file +<1> You can specify one or a list with both types of alerts to disable. diff --git a/modules/network-observability-dns-tracking.adoc b/modules/network-observability-dns-tracking.adoc index d37240ce0d..63afbf4c66 100644 --- a/modules/network-observability-dns-tracking.adoc +++ b/modules/network-observability-dns-tracking.adoc @@ -13,7 +13,7 @@ Using DNS tracking, you can monitor your network, conduct security analysis, and CPU and memory usage increases are observed in the eBPF agent when this feature is enabled. ==== .Procedure -. In the web console, navigate to *Operators* -> *Installed Operators*. +. In the web console, navigate to *Ecosystem* -> *Installed Operators*. . Under the *Provided APIs* heading for *Network Observability*, select *Flow Collector*. . Select *cluster* then select the *YAML* tab. . Configure the `FlowCollector` custom resource. A sample configuration is as follows: @@ -46,4 +46,4 @@ spec: [NOTE] ==== TCP handshake packets do not have DNS headers. TCP protocol flows without DNS headers are shown in the traffic flow data with *DNS Latency*, *ID*, and *Response code* values of "n/a". You can filter out flow data to view only flows that have DNS headers using the *Common* filter "DNSError" equal to "0". -==== \ No newline at end of file +==== diff --git a/modules/network-observability-ebpf-agent-alert.adoc b/modules/network-observability-ebpf-agent-alert.adoc index b4a94ea692..c67bd15d76 100644 --- a/modules/network-observability-ebpf-agent-alert.adoc +++ b/modules/network-observability-ebpf-agent-alert.adoc @@ -14,7 +14,7 @@ Increasing the `cacheMaxFlows` might increase the memory usage of the eBPF agent .Procedure -. In the web console, navigate to *Operators* -> *Installed Operators*. +. In the web console, navigate to *Ecosystem* -> *Installed Operators*. . Under the *Provided APIs* heading for the *Network Observability Operator*, select *Flow Collector*. diff --git a/modules/network-observability-ebpf-manager-operator.adoc b/modules/network-observability-ebpf-manager-operator.adoc index c88cd002d8..9af4a2a655 100644 --- a/modules/network-observability-ebpf-manager-operator.adoc +++ b/modules/network-observability-ebpf-manager-operator.adoc @@ -12,7 +12,7 @@ The eBPF Manager Operator reduces the attack surface and ensures compliance, sec include::snippets/technology-preview.adoc[] .Procedure -. In the web console, navigate to *Operators* -> *Operator Hub*. +. In the web console, navigate to *Ecosystem* -> *Operator Hub*. . Install *eBPF Manager*. . Check *Workloads* -> *Pods* in the `bpfman` namespace to make sure they are all up and running. . Configure the `FlowCollector` custom resource to use the eBPF Manager Operator: @@ -32,7 +32,7 @@ spec: ---- .Verification -. In the web console, navigate to *Operators* -> *Installed Operators*. +. In the web console, navigate to *Ecosystem* -> *Installed Operators*. . Click *eBPF Manager Operator* -> *All instances* tab. + -For each node, verify that a `BpfApplication` named `netobserv` and a pair of `BpfProgram` objects, one for Traffic Control (TCx) ingress and another for TCx egress, exist. If you enable other eBPF Agent features, you might have more objects. \ No newline at end of file +For each node, verify that a `BpfApplication` named `netobserv` and a pair of `BpfProgram` objects, one for Traffic Control (TCx) ingress and another for TCx egress, exist. If you enable other eBPF Agent features, you might have more objects. diff --git a/modules/network-observability-enriched-flows.adoc b/modules/network-observability-enriched-flows.adoc index cc4fdbeb32..6f4453f30b 100644 --- a/modules/network-observability-enriched-flows.adoc +++ b/modules/network-observability-enriched-flows.adoc @@ -13,7 +13,7 @@ You can send network flows to Kafka, IPFIX, the Red{nbsp}Hat build of OpenTeleme .Procedure -. In the web console, navigate to *Operators* -> *Installed Operators*. +. In the web console, navigate to *Ecosystem* -> *Installed Operators*. . Under the *Provided APIs* heading for the *NetObserv Operator*, select *Flow Collector*. . Select *cluster* and then select the *YAML* tab. . Edit the `FlowCollector` to configure `spec.exporters` as follows: diff --git a/modules/network-observability-filtering-ebpf-rule.adoc b/modules/network-observability-filtering-ebpf-rule.adoc index 0325f721e0..bf1d34005a 100644 --- a/modules/network-observability-filtering-ebpf-rule.adoc +++ b/modules/network-observability-filtering-ebpf-rule.adoc @@ -14,7 +14,7 @@ You can configure the `FlowCollector` custom resource to filter eBPF flows using ==== .Procedure -. In the web console, navigate to *Operators* -> *Installed Operators*. +. In the web console, navigate to *Ecosystem* -> *Installed Operators*. . Under the *Provided APIs* heading for *Network Observability*, select *Flow Collector*. . Select *cluster*, then select the *YAML* tab. . Configure the `FlowCollector` custom resource, similar to the following sample configurations: diff --git a/modules/network-observability-flowcollector-kafka-config.adoc b/modules/network-observability-flowcollector-kafka-config.adoc index 0a3d21281c..64355ac88a 100644 --- a/modules/network-observability-flowcollector-kafka-config.adoc +++ b/modules/network-observability-flowcollector-kafka-config.adoc @@ -12,7 +12,7 @@ You can configure the `FlowCollector` resource to use Kafka for high-throughput * Kafka is installed. Red Hat supports Kafka with AMQ Streams Operator. .Procedure -. In the web console, navigate to *Operators* → *Installed Operators*. +. In the web console, navigate to *Ecosystem* -> *Installed Operators*. . Under the *Provided APIs* heading for the Network Observability Operator, select *Flow Collector*. @@ -38,4 +38,4 @@ spec: <1> Set `spec.deploymentModel` to `Kafka` instead of `Direct` to enable the Kafka deployment model. <2> `spec.kafka.address` refers to the Kafka bootstrap server address. You can specify a port if needed, for instance `kafka-cluster-kafka-bootstrap.netobserv:9093` for using TLS on port 9093. <3> `spec.kafka.topic` should match the name of a topic created in Kafka. -<4> `spec.kafka.tls` can be used to encrypt all communications to and from Kafka with TLS or mTLS. When enabled, the Kafka CA certificate must be available as a ConfigMap or a Secret, both in the namespace where the `flowlogs-pipeline` processor component is deployed (default: `netobserv`) and where the eBPF agents are deployed (default: `netobserv-privileged`). It must be referenced with `spec.kafka.tls.caCert`. When using mTLS, client secrets must be available in these namespaces as well (they can be generated for instance using the AMQ Streams User Operator) and referenced with `spec.kafka.tls.userCert`. \ No newline at end of file +<4> `spec.kafka.tls` can be used to encrypt all communications to and from Kafka with TLS or mTLS. When enabled, the Kafka CA certificate must be available as a ConfigMap or a Secret, both in the namespace where the `flowlogs-pipeline` processor component is deployed (default: `netobserv`) and where the eBPF agents are deployed (default: `netobserv-privileged`). It must be referenced with `spec.kafka.tls.caCert`. When using mTLS, client secrets must be available in these namespaces as well (they can be generated for instance using the AMQ Streams User Operator) and referenced with `spec.kafka.tls.userCert`. diff --git a/modules/network-observability-flowcollector-view.adoc b/modules/network-observability-flowcollector-view.adoc index 3080c46126..03f72b9657 100644 --- a/modules/network-observability-flowcollector-view.adoc +++ b/modules/network-observability-flowcollector-view.adoc @@ -9,7 +9,7 @@ You can view and edit YAML directly in the {product-title} web console. .Procedure -. In the web console, navigate to *Operators* -> *Installed Operators*. +. In the web console, navigate to *Ecosystem* -> *Installed Operators*. . Under the *Provided APIs* heading for the *NetObserv Operator*, select *Flow Collector*. . Select *cluster* then select the *YAML* tab. There, you can modify the `FlowCollector` resource to configure the Network Observability Operator. diff --git a/modules/network-observability-flowmetrics-charts.adoc b/modules/network-observability-flowmetrics-charts.adoc index 8ae0f50855..134685a612 100644 --- a/modules/network-observability-flowmetrics-charts.adoc +++ b/modules/network-observability-flowmetrics-charts.adoc @@ -9,7 +9,7 @@ You can generate charts for dashboards in the {product-title} web console, which you can view as an administrator in the *Dashboard* menu by defining the `charts` section of the `FlowMetric` resource. .Procedure -. In the web console, navigate to *Operators* -> *Installed Operators*. +. In the web console, navigate to *Ecosystem* -> *Installed Operators*. . In the *Provided APIs* heading for the *NetObserv Operator*, select *FlowMetric*. . In the *Project:* dropdown list, select the project of the Network Observability Operator instance. . Click *Create FlowMetric*. diff --git a/modules/network-observability-loki-install.adoc b/modules/network-observability-loki-install.adoc index 52b83e133f..f0e08a097b 100644 --- a/modules/network-observability-loki-install.adoc +++ b/modules/network-observability-loki-install.adoc @@ -15,15 +15,15 @@ The link:https://catalog.redhat.com/software/containers/openshift-logging/loki-r * Linux kernel 4.18+ .Procedure -. In the {product-title} web console, click *Operators* -> *OperatorHub*. +. In the {product-title} web console, click *Ecosystem* -> *Software Catalog*. . Choose *{loki-op}* from the list of available Operators, and click *Install*. . Under *Installation Mode*, select *All namespaces on the cluster*. .Verification -. Verify that you installed the {loki-op}. Visit the *Operators* → *Installed Operators* page and look for *{loki-op}*. +. Verify that you installed the {loki-op}. Visit the *Ecosystem* -> *Installed Operators* page and look for *{loki-op}*. . Verify that *{loki-op}* is listed with *Status* as *Succeeded* in all the projects. [IMPORTANT] ==== To uninstall Loki, refer to the uninstallation process that corresponds with the method you used to install Loki. You might have remaining `ClusterRoles` and `ClusterRoleBindings`, data stored in object store, and persistent volume that must be removed. -==== \ No newline at end of file +==== diff --git a/modules/network-observability-lokistack-create.adoc b/modules/network-observability-lokistack-create.adoc index 024286c60c..1e40de384f 100644 --- a/modules/network-observability-lokistack-create.adoc +++ b/modules/network-observability-lokistack-create.adoc @@ -10,7 +10,7 @@ You can deploy a `LokiStack` custom resource (CR) by using the web console or {o .Procedure -. Navigate to *Operators* -> *Installed Operators*, viewing *All projects* from the *Project* dropdown. +. Navigate to *Ecosystem* -> *Installed Operators*, viewing *All projects* from the *Project* dropdown. . Look for *{loki-op}*. In the details, under *Provided APIs*, select *LokiStack*. . Click *Create LokiStack*. . Ensure the following fields are specified in either *Form View* or *YAML view*: diff --git a/modules/network-observability-netobserv-cli-install.adoc b/modules/network-observability-netobserv-cli-install.adoc index a88f9b3ce6..6a4a8d49a1 100644 --- a/modules/network-observability-netobserv-cli-install.adoc +++ b/modules/network-observability-netobserv-cli-install.adoc @@ -6,7 +6,7 @@ [id="network-observability-cli-install_{context}"] = Installing the Network Observability CLI -Installing the Network Observability CLI (`oc netobserv`) is a separate procedure from the Network Observability Operator installation. This means that, even if you have the Operator installed from OperatorHub, you need to install the CLI separately. +Installing the Network Observability CLI (`oc netobserv`) is a separate procedure from the Network Observability Operator installation. This means that, even if you have the Operator installed from the software catalog, you need to install the CLI separately. [NOTE] ==== diff --git a/modules/network-observability-operator-install.adoc b/modules/network-observability-operator-install.adoc index c39a6ab89d..fb84e43b46 100644 --- a/modules/network-observability-operator-install.adoc +++ b/modules/network-observability-operator-install.adoc @@ -28,10 +28,10 @@ Additionally, this installation example uses the `netobserv` namespace, which is .Procedure -. In the {product-title} web console, click *Operators* -> *OperatorHub*. -. Choose *Network Observability Operator* from the list of available Operators in the *OperatorHub*, and click *Install*. +. In the {product-title} web console, click *Ecosystem* -> *Software Catalog*. +. Choose *Network Observability Operator* from the list of available Operators in the software catalog, and click *Install*. . Select the checkbox `Enable Operator recommended cluster monitoring on this Namespace`. -. Navigate to *Operators* -> *Installed Operators*. Under Provided APIs for Network Observability, select the *Flow Collector* link. +. Navigate to *Ecosystem* -> *Installed Operators*. Under Provided APIs for Network Observability, select the *Flow Collector* link. . Navigate to the *Flow Collector* tab, and click *Create FlowCollector*. Make the following selections in the form view: .. *spec.agent.ebpf.Sampling*: Specify a sampling size for flows. Lower sampling sizes will have higher impact on resource utilization. For more information, see the "FlowCollector API reference", `spec.agent.ebpf`. .. If you are not using Loki, click *Loki client settings* and change *Enable* to *False*. The setting is *True* by default. @@ -46,4 +46,4 @@ Additionally, this installation example uses the `netobserv` namespace, which is To confirm this was successful, when you navigate to *Observe* you should see *Network Traffic* listed in the options. -In the absence of *Application Traffic* within the {product-title} cluster, default filters might show that there are "No results", which results in no visual flow. Beside the filter selections, select *Clear all filters* to see the flow. \ No newline at end of file +In the absence of *Application Traffic* within the {product-title} cluster, default filters might show that there are "No results", which results in no visual flow. Beside the filter selections, select *Clear all filters* to see the flow. diff --git a/modules/network-observability-operator-uninstall.adoc b/modules/network-observability-operator-uninstall.adoc index 408332f9f3..8763e9b76f 100644 --- a/modules/network-observability-operator-uninstall.adoc +++ b/modules/network-observability-operator-uninstall.adoc @@ -6,7 +6,7 @@ [id="network-observability-operator-uninstall_{context}"] = Uninstalling the Network Observability Operator -You can uninstall the Network Observability Operator using the {product-title} web console Operator Hub, working in the *Operators* -> *Installed Operators* area. +You can uninstall the Network Observability Operator using the {product-title} web console Operator Hub, working in the *Ecosystem* -> *Installed Operators* area. .Procedure @@ -14,7 +14,7 @@ You can uninstall the Network Observability Operator using the {product-title} w .. Click *Flow Collector*, which is next to the *Network Observability Operator* in the *Provided APIs* column. .. Click the Options menu {kebab} for the *cluster* and select *Delete FlowCollector*. . Uninstall the Network Observability Operator. -.. Navigate back to the *Operators* -> *Installed Operators* area. +.. Navigate back to the *Ecosystem* -> *Installed Operators* area. .. Click the Options menu {kebab} next to the *Network Observability Operator* and select *Uninstall Operator*. .. *Home* -> *Projects* and select `openshift-netobserv-operator` .. Navigate to *Actions* and select *Delete Project* diff --git a/modules/network-observability-packet-drops.adoc b/modules/network-observability-packet-drops.adoc index 439517cb2f..b2b903fc03 100644 --- a/modules/network-observability-packet-drops.adoc +++ b/modules/network-observability-packet-drops.adoc @@ -14,7 +14,7 @@ CPU and memory usage increases when this feature is enabled. ==== .Procedure -. In the web console, navigate to *Operators* -> *Installed Operators*. +. In the web console, navigate to *Ecosystem* -> *Installed Operators*. . Under the *Provided APIs* heading for the *NetObserv Operator*, select *Flow Collector*. . Select *cluster*, and then select the *YAML* tab. . Configure the `FlowCollector` custom resource for packet drops, for example: @@ -44,4 +44,4 @@ spec: .. Select new choices in *Manage panels* to choose which graphical visualizations of packet drops to display in the *Overview*. .. Select new choices in *Manage columns* to choose which packet drop information to display in the *Traffic flows* table. ... In the *Traffic Flows* view, you can also expand the side panel to view more information about packet drops. Host drops are prefixed with `SKB_DROP` and OVS drops are prefixed with `OVS_DROP`. -.. In the *Topology* view, red lines are displayed where drops are present. \ No newline at end of file +.. In the *Topology* view, red lines are displayed where drops are present. diff --git a/modules/network-observability-packet-translation.adoc b/modules/network-observability-packet-translation.adoc index f0008a9ec7..c1230e60e9 100644 --- a/modules/network-observability-packet-translation.adoc +++ b/modules/network-observability-packet-translation.adoc @@ -9,7 +9,7 @@ You can use network observability and eBPF to enrich network flows from a Kubernetes service with translated endpoint information, gaining insight into the endpoints serving traffic. .Procedure -. In the web console, navigate to *Operators* -> *Installed Operators*. +. In the web console, navigate to *Ecosystem* -> *Installed Operators*. . In the *Provided APIs* heading for the *NetObserv Operator*, select *Flow Collector*. . Select *cluster*, and then select the *YAML* tab. . Configure the `FlowCollector` custom resource for `PacketTranslation`, for example: diff --git a/modules/network-observability-proc_configuring-ipsec-with-flow-collector-resource.adoc b/modules/network-observability-proc_configuring-ipsec-with-flow-collector-resource.adoc index 5e8452943e..24476e5013 100644 --- a/modules/network-observability-proc_configuring-ipsec-with-flow-collector-resource.adoc +++ b/modules/network-observability-proc_configuring-ipsec-with-flow-collector-resource.adoc @@ -13,7 +13,7 @@ In {product-title}, IPsec is disabled by default. You can enable IPsec by follow * You have enabled IPsec encryption on {product-title}. .Procedure -. In the web console, navigate to *Operators* -> *Installed Operators*. +. In the web console, navigate to *Ecosystem* -> *Installed Operators*. . Under the *Provided APIs* heading for the *NetObserv Operator*, select *Flow Collector*. . Select *cluster* then select the *YAML* tab. . Configure the `FlowCollector` custom resource for IPsec: diff --git a/modules/network-observability-tcp-flag-syn-flood.adoc b/modules/network-observability-tcp-flag-syn-flood.adoc index 0be16d0c60..8762e1af60 100644 --- a/modules/network-observability-tcp-flag-syn-flood.adoc +++ b/modules/network-observability-tcp-flag-syn-flood.adoc @@ -9,7 +9,7 @@ You can create an `AlertingRule` resouce to alert for SYN flooding. .Procedure -. In the web console, navigate to *Operators* -> *Installed Operators*. +. In the web console, navigate to *Ecosystem* -> *Installed Operators*. . In the *Provided APIs* heading for the *NetObserv Operator*, select *FlowMetric*. . In the *Project* dropdown list, select the project of the Network Observability Operator instance. . Click *Create FlowMetric*. @@ -83,4 +83,4 @@ metadata: . In the web console, click *Manage Columns* in the *Network Traffic* table view and click *TCP flags*. . In the *Network Traffic* table view, filter on *TCP protocol SYN TCPFlag*. A large number of flows with the same *byteSize* indicates a SYN flood. . Go to *Observe* -> *Alerting* and select the *Alerting Rules* tab. -. Filter on *netobserv-synflood-in alert*. The alert should fire when SYN flooding occurs. \ No newline at end of file +. Filter on *netobserv-synflood-in alert*. The alert should fire when SYN flooding occurs. diff --git a/modules/network-observability-viewing-network-events.adoc b/modules/network-observability-viewing-network-events.adoc index 73d6d124e8..b23b1c2372 100644 --- a/modules/network-observability-viewing-network-events.adoc +++ b/modules/network-observability-viewing-network-events.adoc @@ -23,7 +23,7 @@ You can edit the `FlowCollector` to view information about network traffic event * You have created at least one of the following network APIs: `NetworkPolicy`, `AdminNetworkPolicy`, `BaselineNetworkPolicy`, `UserDefinedNetwork` isolation, multicast, or `EgressFirewall`. .Procedure -. In the web console, navigate to *Operators* -> *Installed Operators*. +. In the web console, navigate to *Ecosystem* -> *Installed Operators*. . In the *Provided APIs* heading for the *NetObserv Operator*, select *Flow Collector*. . Select *cluster*, and then select the *YAML* tab. . Configure the `FlowCollector` CR to enable viewing `NetworkEvents`, for example: diff --git a/modules/network-observability-virtualization-configuration.adoc b/modules/network-observability-virtualization-configuration.adoc index b2da29fbba..78ba0f5d1f 100644 --- a/modules/network-observability-virtualization-configuration.adoc +++ b/modules/network-observability-virtualization-configuration.adoc @@ -54,7 +54,7 @@ status: <3> The list of IPs used by the secondary network. <4> The MAC address used for secondary network. -. In the web console, navigate to *Operators* -> *Installed Operators*. +. In the web console, navigate to *Ecosystem* -> *Installed Operators*. . Under the *Provided APIs* heading for the *NetObserv Operator*, select *Flow Collector*. . Select *cluster* and then select the *YAML* tab. . Configure `FlowCollector` based on the information you found from the additional network investigation: diff --git a/modules/network-observability-working-with-conversations.adoc b/modules/network-observability-working-with-conversations.adoc index 688cdff84c..91dc5c1f28 100644 --- a/modules/network-observability-working-with-conversations.adoc +++ b/modules/network-observability-working-with-conversations.adoc @@ -15,7 +15,7 @@ As an administrator, you can group network flows that are part of the same conve .Procedure -. In the web console, navigate to *Operators* -> *Installed Operators*. +. In the web console, navigate to *Ecosystem* -> *Installed Operators*. . Under the *Provided APIs* heading for the *NetObserv Operator*, select *Flow Collector*. . Select *cluster* then select the *YAML* tab. . Configure the `FlowCollector` custom resource so that `spec.processor.logTypes`, `conversationEndTimeout`, and `conversationHeartbeatInterval` parameters are set according to your observation needs. A sample configuration is as follows: diff --git a/modules/network-observability-working-with-zones.adoc b/modules/network-observability-working-with-zones.adoc index a777cde6e7..c49e00edf3 100644 --- a/modules/network-observability-working-with-zones.adoc +++ b/modules/network-observability-working-with-zones.adoc @@ -9,7 +9,7 @@ You can configure the `FlowCollector` to collect information about the cluster availability zones. This allows you to enrich network flow data with the link:https://kubernetes.io/docs/reference/labels-annotations-taints/#topologykubernetesiozone[`topology.kubernetes.io/zone`] label value applied to the nodes. .Procedure -. In the web console, go to *Operators* -> *Installed Operators*. +. In the web console, go to *Ecosystem* -> *Installed Operators*. . Under the *Provided APIs* heading for the *NetObserv Operator*, select *Flow Collector*. . Select *cluster* then select the *YAML* tab. . Configure the `FlowCollector` custom resource so that the `spec.processor.addZone` parameter is set to `true`. A sample configuration is as follows: @@ -33,4 +33,4 @@ When you refresh the *Network Traffic* page, the *Overview*, *Traffic Flow*, and . In the *Overview* tab, you can see *Zones* as an available *Scope*. . In *Network Traffic* -> *Traffic flows*, *Zones* are viewable under the SrcK8S_Zone and DstK8S_Zone fields. -. In the *Topology* view, you can set *Zones* as *Scope* or *Group*. \ No newline at end of file +. In the *Topology* view, you can set *Zones* as *Scope* or *Group*. diff --git a/modules/node-observability-install-web-console.adoc b/modules/node-observability-install-web-console.adoc index 147740c9ed..e5c7484879 100644 --- a/modules/node-observability-install-web-console.adoc +++ b/modules/node-observability-install-web-console.adoc @@ -16,7 +16,7 @@ You can install the Node Observability Operator from the {product-title} web con .Procedure . Log in to the {product-title} web console. -. In the Administrator's navigation panel, expand *Operators* → *OperatorHub*. +. In the Administrator's navigation panel, select *Ecosystem* -> *Software Catalog*. . In the *All items* field, enter *Node Observability Operator* and select the *Node Observability Operator* tile. . Click *Install*. . On the *Install Operator* page, configure the following settings: @@ -27,5 +27,5 @@ You can install the Node Observability Operator from the {product-title} web con .. Click *Install*. .Verification -. In the Administrator's navigation panel, expand *Operators* → *Installed Operators*. +. In the Administrator's navigation panel, expand *Ecosystem* -> *Installed Operators*. . Verify that the Node Observability Operator is listed in the Operators list. diff --git a/modules/nodes-cluster-resource-override-deploy-console.adoc b/modules/nodes-cluster-resource-override-deploy-console.adoc index a9de13600e..957223ba56 100644 --- a/modules/nodes-cluster-resource-override-deploy-console.adoc +++ b/modules/nodes-cluster-resource-override-deploy-console.adoc @@ -29,7 +29,7 @@ To install the Cluster Resource Override Operator using the {product-title} web .. Click *Create*. -. Navigate to *Operators* -> *OperatorHub*. +. Navigate to *Ecosystem* -> *Software Catalog*. .. Choose *ClusterResourceOverride Operator* from the list of available Operators and click *Install*. diff --git a/modules/nodes-cma-autoscaling-custom-install.adoc b/modules/nodes-cma-autoscaling-custom-install.adoc index aa7609151e..551d805e31 100644 --- a/modules/nodes-cma-autoscaling-custom-install.adoc +++ b/modules/nodes-cma-autoscaling-custom-install.adoc @@ -40,7 +40,7 @@ $ oc create configmap -n openshift-keda thanos-cert --from-file=ca-cert.pem .Procedure -. In the {product-title} web console, click *Operators* -> *OperatorHub*. +. In the {product-title} web console, click *Ecosystem* -> *Software Catalog*. . Choose *Custom Metrics Autoscaler* from the list of available Operators, and click *Install*. diff --git a/modules/nodes-cma-autoscaling-custom-uninstalling.adoc b/modules/nodes-cma-autoscaling-custom-uninstalling.adoc index 02dfa066eb..526e00d95a 100644 --- a/modules/nodes-cma-autoscaling-custom-uninstalling.adoc +++ b/modules/nodes-cma-autoscaling-custom-uninstalling.adoc @@ -14,7 +14,7 @@ Use the following procedure to remove the custom metrics autoscaler from your {p .Procedure -. In the {product-title} web console, click *Operators* -> *Installed Operators*. +. In the {product-title} web console, click *Ecosystem* -> *Installed Operators*. ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] . Switch to the *openshift-keda* project. @@ -33,7 +33,7 @@ endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] . Remove the Custom Metrics Autoscaler Operator: -.. Click *Operators* -> *Installed Operators*. +.. Click *Ecosystem* -> *Installed Operators*. .. Find the *CustomMetricsAutoscaler* Operator and click the Options menu {kebab} and select *Uninstall Operator*. diff --git a/modules/nodes-cma-autoscaling-keda-controller-edit.adoc b/modules/nodes-cma-autoscaling-keda-controller-edit.adoc index 99d0c10cb7..a5ee2230a0 100644 --- a/modules/nodes-cma-autoscaling-keda-controller-edit.adoc +++ b/modules/nodes-cma-autoscaling-keda-controller-edit.adoc @@ -10,7 +10,7 @@ You can use the following procedure to modify the `KedaController` custom resour .Procedure -. In the {product-title} web console, click *Operators* -> *Installed Operators*. +. In the {product-title} web console, click *Ecosystem* -> *Installed Operators*. . Click *Custom Metrics Autoscaler*. diff --git a/modules/nodes-descheduler-installing.adoc b/modules/nodes-descheduler-installing.adoc index 38e8cb2114..33a457e0d6 100644 --- a/modules/nodes-descheduler-installing.adoc +++ b/modules/nodes-descheduler-installing.adoc @@ -14,7 +14,7 @@ endif::[] [id="nodes-descheduler-installing_{context}"] = Installing the descheduler -The descheduler is not available by default. To enable the descheduler, you must install the {descheduler-operator} from OperatorHub and enable one or more descheduler profiles. +The descheduler is not available by default. To enable the descheduler, you must install the {descheduler-operator} from the software catalog and enable one or more descheduler profiles. By default, the descheduler runs in predictive mode, which means that it only simulates pod evictions. You must change the mode to automatic for the descheduler to perform the pod evictions. @@ -45,14 +45,14 @@ endif::[] .. Navigate to *Administration* -> *Namespaces* and click *Create Namespace*. .. Enter `openshift-kube-descheduler-operator` in the *Name* field, enter `openshift.io/cluster-monitoring=true` in the *Labels* field to enable descheduler metrics, and click *Create*. . Install the {descheduler-operator}. -.. Navigate to *Operators* -> *OperatorHub*. +.. Navigate to *Ecosystem* -> *Software Catalog*. .. Type *{descheduler-operator}* into the filter box. .. Select the *{descheduler-operator}* and click *Install*. .. On the *Install Operator* page, select *A specific namespace on the cluster*. Select *openshift-kube-descheduler-operator* from the drop-down menu. .. Adjust the values for the *Update Channel* and *Approval Strategy* to the desired values. .. Click *Install*. . Create a descheduler instance. -.. From the *Operators* -> *Installed Operators* page, click the *{descheduler-operator}*. +.. From the *Ecosystem* -> *Installed Operators* page, click the *{descheduler-operator}*. .. Select the *Kube Descheduler* tab and click *Create KubeDescheduler*. .. Edit the settings as necessary. ... To evict pods instead of simulating the evictions, change the *Mode* field to *Automatic*. diff --git a/modules/nodes-descheduler-uninstalling.adoc b/modules/nodes-descheduler-uninstalling.adoc index 64ccb03447..6435922ea7 100644 --- a/modules/nodes-descheduler-uninstalling.adoc +++ b/modules/nodes-descheduler-uninstalling.adoc @@ -22,12 +22,12 @@ endif::openshift-rosa,openshift-dedicated[] . Log in to the {product-title} web console. . Delete the descheduler instance. -.. From the *Operators* -> *Installed Operators* page, click *{descheduler-operator}*. +.. From the *Ecosystem* -> *Installed Operators* page, click *{descheduler-operator}*. .. Select the *Kube Descheduler* tab. .. Click the Options menu {kebab} next to the *cluster* entry and select *Delete KubeDescheduler*. .. In the confirmation dialog, click *Delete*. . Uninstall the {descheduler-operator}. -.. Navigate to *Operators* -> *Installed Operators*. +.. Navigate to *Ecosystem* -> *Installed Operators*. .. Click the Options menu {kebab} next to the *{descheduler-operator}* entry and select *Uninstall Operator*. .. In the confirmation dialog, click *Uninstall*. . Delete the `openshift-kube-descheduler-operator` namespace. diff --git a/modules/nodes-pods-vertical-autoscaler-install.adoc b/modules/nodes-pods-vertical-autoscaler-install.adoc index 7fd3e200c3..e906e7db48 100644 --- a/modules/nodes-pods-vertical-autoscaler-install.adoc +++ b/modules/nodes-pods-vertical-autoscaler-install.adoc @@ -18,7 +18,7 @@ endif::[] .Procedure -. In the {product-title} web console, click *Operators* -> *OperatorHub*. +. In the {product-title} web console, click *Ecosystem* -> *Software Catalog*. . Choose *VerticalPodAutoscaler* from the list of available Operators, and click *Install*. diff --git a/modules/nodes-pods-vertical-autoscaler-uninstall.adoc b/modules/nodes-pods-vertical-autoscaler-uninstall.adoc index c1d928d102..e4bb3aa987 100644 --- a/modules/nodes-pods-vertical-autoscaler-uninstall.adoc +++ b/modules/nodes-pods-vertical-autoscaler-uninstall.adoc @@ -21,7 +21,7 @@ After removing the VPA, it is recommended that you remove the other components a .Procedure -. In the {product-title} web console, click *Operators* -> *Installed Operators*. +. In the {product-title} web console, click *Ecosystem* -> *Installed Operators*. . Switch to the *openshift-vertical-pod-autoscaler* project. diff --git a/modules/nodes-secondary-scheduler-configuring-console.adoc b/modules/nodes-secondary-scheduler-configuring-console.adoc index 00eb6e1639..0062d9a7c4 100644 --- a/modules/nodes-secondary-scheduler-configuring-console.adoc +++ b/modules/nodes-secondary-scheduler-configuring-console.adoc @@ -57,7 +57,7 @@ data: .. Click *Create*. . Create the `SecondaryScheduler` CR: -.. Navigate to *Operators* -> *Installed Operators*. +.. Navigate to *Ecosystem* -> *Installed Operators*. .. Select *{secondary-scheduler-operator-full}*. .. Select the *Secondary Scheduler* tab and click *Create SecondaryScheduler*. .. The *Name* field defaults to `cluster`; do not change this name. diff --git a/modules/nodes-secondary-scheduler-install-console.adoc b/modules/nodes-secondary-scheduler-install-console.adoc index b199a19906..b3095079ba 100644 --- a/modules/nodes-secondary-scheduler-install-console.adoc +++ b/modules/nodes-secondary-scheduler-install-console.adoc @@ -29,7 +29,7 @@ endif::openshift-rosa,openshift-dedicated[] // There are no metrics to collect for the secondary scheduler operator as of now, so no need to add the metrics label . Install the {secondary-scheduler-operator-full}. -.. Navigate to *Operators* -> *OperatorHub*. +.. Navigate to *Ecosystem* -> *Software Catalog*. .. Enter *{secondary-scheduler-operator-full}* into the filter box. .. Select the *{secondary-scheduler-operator-full}* and click *Install*. .. On the *Install Operator* page: @@ -43,5 +43,5 @@ endif::openshift-rosa,openshift-dedicated[] .Verification -. Navigate to *Operators* -> *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . Verify that *{secondary-scheduler-operator-full}* is listed with a *Status* of *Succeeded*. diff --git a/modules/nodes-secondary-scheduler-uninstall-console.adoc b/modules/nodes-secondary-scheduler-uninstall-console.adoc index 4ed59ce23c..d063dc91df 100644 --- a/modules/nodes-secondary-scheduler-uninstall-console.adoc +++ b/modules/nodes-secondary-scheduler-uninstall-console.adoc @@ -23,6 +23,6 @@ endif::openshift-rosa,openshift-dedicated[] . Log in to the {product-title} web console. . Uninstall the {secondary-scheduler-operator-full} Operator. -.. Navigate to *Operators* -> *Installed Operators*. +.. Navigate to *Ecosystem* -> *Installed Operators*. .. Click the Options menu {kebab} next to the *{secondary-scheduler-operator}* entry and click *Uninstall Operator*. .. In the confirmation dialog, click *Uninstall*. diff --git a/modules/nvidia-gpu-aws-deploying-the-node-feature-discovery-operator.adoc b/modules/nvidia-gpu-aws-deploying-the-node-feature-discovery-operator.adoc index b5d819142b..7a4073b0de 100644 --- a/modules/nvidia-gpu-aws-deploying-the-node-feature-discovery-operator.adoc +++ b/modules/nvidia-gpu-aws-deploying-the-node-feature-discovery-operator.adoc @@ -12,9 +12,9 @@ After the GPU-enabled node is created, you need to discover the GPU-enabled node .Procedure -. Install the Node Feature Discovery Operator from *OperatorHub* in the {product-title} console. +. Install the Node Feature Discovery Operator from the software catalog in the {product-title} console. -. After installing the NFD Operator into *OperatorHub*, select *Node Feature Discovery* from the installed Operators list and select *Create instance*. This installs the `nfd-master` and `nfd-worker` pods, one `nfd-worker` pod for each compute node, in the `openshift-nfd` namespace. +. After installing the NFD Operator, select *Node Feature Discovery* from the installed Operators list and select *Create instance*. This installs the `nfd-master` and `nfd-worker` pods, one `nfd-worker` pod for each compute node, in the `openshift-nfd` namespace. . Verify that the Operator is installed and running by running the following command: + diff --git a/modules/nw-autoscaling-ingress-controller.adoc b/modules/nw-autoscaling-ingress-controller.adoc index aa2d775f4a..32ed1b082c 100644 --- a/modules/nw-autoscaling-ingress-controller.adoc +++ b/modules/nw-autoscaling-ingress-controller.adoc @@ -15,7 +15,7 @@ The following procedure provides an example for scaling up the default Ingress C * You have the {oc-first} installed. * You have access to an {product-title} cluster as a user with the `cluster-admin` role. * You installed the Custom Metrics Autoscaler Operator and an associated KEDA Controller. -** You can install the Operator by using OperatorHub on the web console. After you install the Operator, you can create an instance of `KedaController`. +** You can install the Operator by using the software catalog on the web console. After you install the Operator, you can create an instance of `KedaController`. .Procedure diff --git a/modules/nw-aws-load-balancer-operator.adoc b/modules/nw-aws-load-balancer-operator.adoc index 0c95f99720..fc9d17a2cd 100644 --- a/modules/nw-aws-load-balancer-operator.adoc +++ b/modules/nw-aws-load-balancer-operator.adoc @@ -15,7 +15,7 @@ The AWS Load Balancer Operator supports the Kubernetes service resource of type .Procedure -. To deploy the AWS Load Balancer Operator on-demand from OperatorHub, create a `Subscription` object by running the following command: +. To deploy the AWS Load Balancer Operator on-demand from the software catalog, create a `Subscription` object by running the following command: + [source,terminal] ---- diff --git a/modules/nw-bpfman-operator-installing-console.adoc b/modules/nw-bpfman-operator-installing-console.adoc index 49a4fc4430..7f79bba853 100644 --- a/modules/nw-bpfman-operator-installing-console.adoc +++ b/modules/nw-bpfman-operator-installing-console.adoc @@ -21,7 +21,7 @@ As a cluster administrator, you can install the eBPF Manager Operator using the . Install the eBPF Manager Operator: -.. In the {product-title} web console, click *Operators* -> *OperatorHub*. +.. In the {product-title} web console, click *Ecosystem* -> *Software Catalog*. .. Select *eBPF Manager Operator* from the list of available Operators, and if prompted to *Show community Operator*, click *Continue*. @@ -33,7 +33,7 @@ As a cluster administrator, you can install the eBPF Manager Operator using the . Verify that the eBPF Manager Operator is installed successfully: -.. Navigate to the *Operators* -> *Installed Operators* page. +.. Navigate to the *Ecosystem* -> *Installed Operators* page. .. Ensure that *eBPF Manager Operator* is listed in the *openshift-ingress-node-firewall* project with a *Status* of *InstallSucceeded*. + diff --git a/modules/nw-dpu-installing-operator-ui.adoc b/modules/nw-dpu-installing-operator-ui.adoc index d65a028e9e..dbb60bd84a 100644 --- a/modules/nw-dpu-installing-operator-ui.adoc +++ b/modules/nw-dpu-installing-operator-ui.adoc @@ -16,7 +16,7 @@ As a cluster administrator, you can install the DPU Operator by using the web co .Procedure -. In the {product-title} web console, click *Operators* -> *OperatorHub*. +. In the {product-title} web console, click *Ecosystem* -> *Software Catalog*. . Select *DPU Operator* from the list of available Operators, and then click *Install*. @@ -26,7 +26,7 @@ As a cluster administrator, you can install the DPU Operator by using the web co .Verification -. Navigate to the *Operators* -> *Installed Operators* page. +. Navigate to the *Ecosystem* -> *Installed Operators* page. . Ensure that *DPU Operator* is listed in the *openshift-dpu-operator* project with a *Status* of *InstallSucceeded*. + diff --git a/modules/nw-external-dns-operator.adoc b/modules/nw-external-dns-operator.adoc index a3eed665dd..3406bef7ff 100644 --- a/modules/nw-external-dns-operator.adoc +++ b/modules/nw-external-dns-operator.adoc @@ -13,7 +13,7 @@ The External DNS Operator implements the External DNS API from the `olm.openshif .Procedure -You can deploy the External DNS Operator on demand from the OperatorHub. Deploying the External DNS Operator creates a `Subscription` object. +You can deploy the External DNS Operator on demand from the software catalog. Deploying the External DNS Operator creates a `Subscription` object. . Check the name of an install plan, such as `install-zcvlr`, by running the following command: + diff --git a/modules/nw-infw-operator-installing-console.adoc b/modules/nw-infw-operator-installing-console.adoc index bac96889d0..aa21c60e9c 100644 --- a/modules/nw-infw-operator-installing-console.adoc +++ b/modules/nw-infw-operator-installing-console.adoc @@ -18,7 +18,7 @@ As a cluster administrator, you can install the Operator using the web console. . Install the Ingress Node Firewall Operator: -.. In the {product-title} web console, click *Operators* -> *OperatorHub*. +.. In the {product-title} web console, click *Ecosystem* -> *Software Catalog*. .. Select *Ingress Node Firewall Operator* from the list of available Operators, and then click *Install*. @@ -28,7 +28,7 @@ As a cluster administrator, you can install the Operator using the web console. . Verify that the Ingress Node Firewall Operator is installed successfully: -.. Navigate to the *Operators* -> *Installed Operators* page. +.. Navigate to the *Ecosystem* -> *Installed Operators* page. .. Ensure that *Ingress Node Firewall Operator* is listed in the *openshift-ingress-node-firewall* project with a *Status* of *InstallSucceeded*. + diff --git a/modules/nw-installing-external-dns-operator.adoc b/modules/nw-installing-external-dns-operator.adoc index 2b77a9ad28..3b599c5df3 100644 --- a/modules/nw-installing-external-dns-operator.adoc +++ b/modules/nw-installing-external-dns-operator.adoc @@ -4,13 +4,13 @@ :_mod-docs-content-type: PROCEDURE [id="nw-installing-external-dns-operator_{context}"] -= Installing the External DNS Operator with OperatorHub += Installing the External DNS Operator with the software catalog -You can install the External DNS Operator by using the {product-title} OperatorHub. +You can install the External DNS Operator by using the {product-title} software catalog. .Procedure -. Click *Operators* -> *OperatorHub* in the {product-title} web console. +. Click *Ecosystem* -> *Software Catalog* in the {product-title} web console. . Click *External DNS Operator*. You can use the *Filter by keyword* text box or the filter list to search for External DNS Operator from the list of Operators. . Select the `external-dns-operator` namespace. diff --git a/modules/nw-metalLB-basic-upgrade-operator.adoc b/modules/nw-metalLB-basic-upgrade-operator.adoc index e527f3e71f..9d968eb3b2 100644 --- a/modules/nw-metalLB-basic-upgrade-operator.adoc +++ b/modules/nw-metalLB-basic-upgrade-operator.adoc @@ -11,7 +11,7 @@ To manually control upgrading the MetalLB Operator, you must edit the `Subscript .Prerequisites * You updated your cluster to the latest z-stream release. -* You used OperatorHub to install the MetalLB Operator. +* You used the software catalog to install the MetalLB Operator. * Access the cluster as a user with the `cluster-admin` role. .Procedure diff --git a/modules/nw-metallb-installing-operator-cli.adoc b/modules/nw-metallb-installing-operator-cli.adoc index 69eebc42a9..b8a0b5f329 100644 --- a/modules/nw-metallb-installing-operator-cli.adoc +++ b/modules/nw-metallb-installing-operator-cli.adoc @@ -4,9 +4,9 @@ :_mod-docs-content-type: PROCEDURE [id="nw-metallb-installing-operator-cli_{context}"] -= Installing from OperatorHub using the CLI += Installing from the software catalog using the CLI -Instead of using the {product-title} web console, you can install an Operator from OperatorHub using the CLI. You can use the OpenShift CLI (`oc`) to install the MetalLB Operator. +Instead of using the {product-title} web console, you can install an Operator from the software catalog using the CLI. You can use the OpenShift CLI (`oc`) to install the MetalLB Operator. It is recommended that when using the CLI you install the Operator in the `metallb-system` namespace. diff --git a/modules/nw-ptp-installing-operator-web-console.adoc b/modules/nw-ptp-installing-operator-web-console.adoc index 93726203d4..9b741bb315 100644 --- a/modules/nw-ptp-installing-operator-web-console.adoc +++ b/modules/nw-ptp-installing-operator-web-console.adoc @@ -18,7 +18,7 @@ in the previous section. . Install the PTP Operator using the {product-title} web console: -.. In the {product-title} web console, click *Operators* -> *OperatorHub*. +.. In the {product-title} web console, click *Ecosystem* -> *Software Catalog*. .. Choose *PTP Operator* from the list of available Operators, and then click *Install*. @@ -26,7 +26,7 @@ in the previous section. . Optional: Verify that the PTP Operator installed successfully: -.. Switch to the *Operators* -> *Installed Operators* page. +.. Switch to the *Ecosystem* -> *Installed Operators* page. .. Ensure that *PTP Operator* is listed in the *openshift-ptp* project with a *Status* of *InstallSucceeded*. + @@ -40,5 +40,5 @@ If the installation later succeeds with an *InstallSucceeded* message, you can i If the Operator does not appear as installed, to troubleshoot further: + -* Go to the *Operators* -> *Installed Operators* page and inspect the *Operator Subscriptions* and *Install Plans* tabs for any failure or errors under *Status*. +* Go to the *Ecosystem* -> *Installed Operators* page and inspect the *Operator Subscriptions* and *Install Plans* tabs for any failure or errors under *Status*. * Go to the *Workloads* -> *Pods* page and check the logs for pods in the `openshift-ptp` project. diff --git a/modules/nw-sriov-installing-operator.adoc b/modules/nw-sriov-installing-operator.adoc index 6aa6e07757..33691c82d8 100644 --- a/modules/nw-sriov-installing-operator.adoc +++ b/modules/nw-sriov-installing-operator.adoc @@ -113,7 +113,7 @@ As a cluster administrator, you can install the Operator using the web console. . Install the SR-IOV Network Operator: -.. In the {product-title} web console, click *Operators* -> *OperatorHub*. +.. In the {product-title} web console, click *Ecosystem* -> *Software Catalog*. .. Select *SR-IOV Network Operator* from the list of available Operators, and then click *Install*. @@ -123,7 +123,7 @@ As a cluster administrator, you can install the Operator using the web console. . Verify that the SR-IOV Network Operator is installed successfully: -.. Navigate to the *Operators* -> *Installed Operators* page. +.. Navigate to the *Ecosystem* -> *Installed Operators* page. .. Ensure that *SR-IOV Network Operator* is listed in the *openshift-sriov-network-operator* project with a *Status* of *InstallSucceeded*. + diff --git a/modules/oadp-installing-dpa-1-3.adoc b/modules/oadp-installing-dpa-1-3.adoc index ae2e0be5e3..f65bdb2e7b 100644 --- a/modules/oadp-installing-dpa-1-3.adoc +++ b/modules/oadp-installing-dpa-1-3.adoc @@ -39,7 +39,7 @@ If you do not want to specify backup or snapshot locations during the installati .Procedure -. Click *Operators* -> *Installed Operators* and select the OADP Operator. +. Click *Ecosystem* -> *Installed Operators* and select the OADP Operator. . Under *Provided APIs*, click *Create instance* in the *DataProtectionApplication* box. . Click *YAML View* and update the parameters of the `DataProtectionApplication` manifest: diff --git a/modules/oadp-installing-operator.adoc b/modules/oadp-installing-operator.adoc index 34dbe6561a..59a738ea4d 100644 --- a/modules/oadp-installing-operator.adoc +++ b/modules/oadp-installing-operator.adoc @@ -20,8 +20,8 @@ The OADP Operator installs link:https://{velero-domain}/docs/v{velero-version}/[ .Procedure -. In the {product-title} web console, click *Operators* -> *OperatorHub*. +. In the {product-title} web console, click *Ecosystem* -> *Software Catalog*. . Use the *Filter by keyword* field to find the *OADP Operator*. . Select the *OADP Operator* and click *Install*. . Click *Install* to install the Operator in the `openshift-adp` project. -. Click *Operators* -> *Installed Operators* to verify the installation. +. Click *Ecosystem* -> *Installed Operators* to verify the installation. diff --git a/modules/odc-creating-a-binding-connection-between-components.adoc b/modules/odc-creating-a-binding-connection-between-components.adoc index 656e29b562..e8dc043d6f 100644 --- a/modules/odc-creating-a-binding-connection-between-components.adoc +++ b/modules/odc-creating-a-binding-connection-between-components.adoc @@ -11,8 +11,8 @@ You can create a binding connection with Operator-backed components, as demonstr .Prerequisites * You created and deployed a Spring PetClinic sample application in the *Developer* perspective. -* You installed {servicebinding-title} from the *OperatorHub*. -* You installed the *Crunchy Postgres for Kubernetes* Operator from the OperatorHub in the `v5` *Update* channel. +* You installed {servicebinding-title} from the software catalog. +* You installed the *Crunchy Postgres for Kubernetes* Operator from the software catalog in the `v5` *Update* channel. * You created a *PostgresCluster* resource in the *Developer* perspective, which resulted in a Crunchy PostgreSQL database instance with the following components: `hippo-backup`, `hippo-instance`, `hippo-repo-host`, and `hippo-pgbouncer`. .Procedure @@ -67,4 +67,4 @@ image::odc_context_operator.png[] You can now view the Spring PetClinic sample application remotely to confirm that the application is now connected to the database service and that the data has been successfully projected to the application from the Crunchy PostgreSQL database service. -The Service Binding Operator has successfully created a working connection between the application and the database service. \ No newline at end of file +The Service Binding Operator has successfully created a working connection between the application and the database service. diff --git a/modules/olm-accessing-images-private-registries.adoc b/modules/olm-accessing-images-private-registries.adoc index 8683e9d5eb..80725a15f8 100644 --- a/modules/olm-accessing-images-private-registries.adoc +++ b/modules/olm-accessing-images-private-registries.adoc @@ -7,7 +7,7 @@ = Accessing images for Operators from private registries ifeval::["{context}" == "olm-managing-custom-catalogs"] -If certain images relevant to Operators managed by Operator Lifecycle Manager (OLM) are hosted in an authenticated container image registry, also known as a private registry, OLM and OperatorHub are unable to pull the images by default. To enable access, you can create a pull secret that contains the authentication credentials for the registry. By referencing one or more pull secrets in a catalog source, OLM can handle placing the secrets in the Operator and catalog namespace to allow installation. +If certain images relevant to Operators managed by Operator Lifecycle Manager (OLM) are hosted in an authenticated container image registry, also known as a private registry, OLM and the software catalog are unable to pull the images by default. To enable access, you can create a pull secret that contains the authentication credentials for the registry. By referencing one or more pull secrets in a catalog source, OLM can handle placing the secrets in the Operator and catalog namespace to allow installation. Other images required by an Operator or its Operands might require access to private registries as well. OLM does not handle placing the secrets in target tenant namespaces for this scenario, but authentication credentials can be added to the global cluster pull secret or individual namespace service accounts to enable the required access. diff --git a/modules/olm-approving-pending-upgrade.adoc b/modules/olm-approving-pending-upgrade.adoc index 31a9180bae..9c3cacbe64 100644 --- a/modules/olm-approving-pending-upgrade.adoc +++ b/modules/olm-approving-pending-upgrade.adoc @@ -15,7 +15,7 @@ If an installed Operator has the approval strategy in its subscription set to *M .Procedure -. In the *Administrator* perspective of the {product-title} web console, navigate to *Operators -> Installed Operators*. +. In the {product-title} web console, navigate to *Ecosystem* -> *Installed Operators*. . Operators that have a pending update display a status with *Upgrade available*. Click the name of the Operator you want to update. @@ -25,4 +25,4 @@ If an installed Operator has the approval strategy in its subscription set to *M . Review the resources that are listed as available for update. When satisfied, click *Approve*. -. Navigate back to the *Operators -> Installed Operators* page to monitor the progress of the update. When complete, the status changes to *Succeeded* and *Up to date*. +. Navigate back to the *Ecosystem* -> *Installed Operators* page to monitor the progress of the update. When complete, the status changes to *Succeeded* and *Up to date*. diff --git a/modules/olm-catalogsource.adoc b/modules/olm-catalogsource.adoc index 173cfdef94..9f1013518b 100644 --- a/modules/olm-catalogsource.adoc +++ b/modules/olm-catalogsource.adoc @@ -12,7 +12,7 @@ endif::[] [id="olm-catalogsource_{context}"] = Catalog source -A _catalog source_ represents a store of metadata, typically by referencing an _index image_ stored in a container registry. Operator Lifecycle Manager (OLM) queries catalog sources to discover and install Operators and their dependencies. OperatorHub in the {product-title} web console also displays the Operators provided by catalog sources. +A _catalog source_ represents a store of metadata, typically by referencing an _index image_ stored in a container registry. Operator Lifecycle Manager (OLM) queries catalog sources to discover and install Operators and their dependencies. The software catalog in the {product-title} web console also displays the Operators provided by catalog sources. [TIP] ==== diff --git a/modules/olm-changing-update-channel.adoc b/modules/olm-changing-update-channel.adoc index 20ba726ea3..5e4a81034c 100644 --- a/modules/olm-changing-update-channel.adoc +++ b/modules/olm-changing-update-channel.adoc @@ -19,7 +19,7 @@ If the approval strategy in the subscription is set to *Automatic*, the update p .Procedure -. In the *Administrator* perspective of the web console, navigate to *Operators -> Installed Operators*. +. In web console, navigate to *Ecosystem* -> *Installed Operators*. . Click the name of the Operator you want to change the update channel for. @@ -29,6 +29,6 @@ If the approval strategy in the subscription is set to *Automatic*, the update p . Click the newer update channel that you want to change to, then click *Save*. -. For subscriptions with an *Automatic* approval strategy, the update begins automatically. Navigate back to the *Operators -> Installed Operators* page to monitor the progress of the update. When complete, the status changes to *Succeeded* and *Up to date*. +. For subscriptions with an *Automatic* approval strategy, the update begins automatically. Navigate back to the *Ecosystem* -> *Installed Operators* page to monitor the progress of the update. When complete, the status changes to *Succeeded* and *Up to date*. + For subscriptions with a *Manual* approval strategy, you can manually approve the update from the *Subscription* tab. diff --git a/modules/olm-creating-catalog-from-index.adoc b/modules/olm-creating-catalog-from-index.adoc index 7bfa15e024..eab278e8d4 100644 --- a/modules/olm-creating-catalog-from-index.adoc +++ b/modules/olm-creating-catalog-from-index.adoc @@ -32,7 +32,7 @@ endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] ifdef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] Administrators with the `dedicated-admin` role endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -can create a `CatalogSource` object that references an index image. OperatorHub uses catalog sources to populate the user interface. +can create a `CatalogSource` object that references an index image. The software catalog uses catalog sources to populate the user interface. // In OSD/ROSA, a dedicated-admin can see catalog sources here, but can't add, edit, or delete them. ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] @@ -177,7 +177,7 @@ NAME CATALOG AGE jaeger-product My Operator Catalog 93s ---- -You can now install the Operators from the *OperatorHub* page on your {product-title} web console. +You can now install the Operators from the *Software Catalog* page on your {product-title} web console. :!index-image: :!tag: diff --git a/modules/olm-creating-etcd-cluster-from-operator.adoc b/modules/olm-creating-etcd-cluster-from-operator.adoc index 129eeb3b4b..7a9ea59b63 100644 --- a/modules/olm-creating-etcd-cluster-from-operator.adoc +++ b/modules/olm-creating-etcd-cluster-from-operator.adoc @@ -18,7 +18,7 @@ endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] . Create a new project in the {product-title} web console for this procedure. This example uses a project called `my-etcd`. -. Navigate to the *Operators -> Installed Operators* page. The Operators that have been installed to the cluster by the +. Navigate to the *Ecosystem* -> *Installed Operators* page. The Operators that have been installed to the cluster by the ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] cluster administrator endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] diff --git a/modules/olm-cs-health.adoc b/modules/olm-cs-health.adoc index 13eae138ff..8bc60cbc04 100644 --- a/modules/olm-cs-health.adoc +++ b/modules/olm-cs-health.adoc @@ -11,4 +11,4 @@ For example, if Catalog A is unhealthy, a subscription referencing Catalog A cou As a result, OLM requires that all catalogs with a given global namespace (for example, the default `openshift-marketplace` namespace or a custom global namespace) are healthy. When a catalog is unhealthy, all Operator installation or update operations within its shared global namespace will fail with a `CatalogSourcesUnhealthy` condition. If these operations were permitted in an unhealthy state, OLM might make resolution and installation decisions that were unexpected to the cluster administrator. -As a cluster administrator, if you observe an unhealthy catalog and want to consider the catalog as invalid and resume Operator installations, see the "Removing custom catalogs" or "Disabling the default OperatorHub catalog sources" sections for information about removing the unhealthy catalog. \ No newline at end of file +As a cluster administrator, if you observe an unhealthy catalog and want to consider the catalog as invalid and resume Operator installations, see the "Removing custom catalogs" or "Disabling the default software catalog sources" sections for information about removing the unhealthy catalog. diff --git a/modules/olm-deleting-operators-from-a-cluster-using-web-console.adoc b/modules/olm-deleting-operators-from-a-cluster-using-web-console.adoc index bd27e20284..6f0488e5b6 100644 --- a/modules/olm-deleting-operators-from-a-cluster-using-web-console.adoc +++ b/modules/olm-deleting-operators-from-a-cluster-using-web-console.adoc @@ -23,7 +23,7 @@ endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] .Procedure -. Navigate to the *Operators* → *Installed Operators* page. +. Navigate to the *Ecosystem* -> *Installed Operators* page. . Scroll or enter a keyword into the *Filter by name* field to find the Operator that you want to remove. Then, click on it. diff --git a/modules/olm-deprecations-schema.adoc b/modules/olm-deprecations-schema.adoc index d346f5bf0f..6338eca0b4 100644 --- a/modules/olm-deprecations-schema.adoc +++ b/modules/olm-deprecations-schema.adoc @@ -7,7 +7,7 @@ The optional `olm.deprecations` schema defines deprecation information for packages, bundles, and channels in a catalog. Operator authors can use this schema to provide relevant messages about their Operators, such as support status and recommended upgrade paths, to users running those Operators from a catalog. -When this schema is defined, the {product-title} web console displays warning badges for the affected elements of the Operator, including any custom deprecation messages, on both the pre- and post-installation pages of the OperatorHub. +When this schema is defined, the {product-title} web console displays warning badges for the affected elements of the Operator, including any custom deprecation messages, on both the pre- and post-installation pages of the software catalog. An `olm.deprecations` schema entry contains one or more of the following `reference` types, which indicates the deprecation scope. After the Operator is installed, any specified messages can be viewed as status conditions on the related `Subscription` object. @@ -79,4 +79,4 @@ my-catalog └── my-operator ├── index.yaml └── deprecations.yaml ----- \ No newline at end of file +---- diff --git a/modules/olm-filtering-fbc.adoc b/modules/olm-filtering-fbc.adoc index e720bde96c..ce59207b30 100644 --- a/modules/olm-filtering-fbc.adoc +++ b/modules/olm-filtering-fbc.adoc @@ -168,4 +168,4 @@ $ podman push //: + For more information, see "Adding a catalog source to a cluster" in the "Additional resources" of this section. -. After the catalog source is in a *READY* state, navigate to the *Operators* -> *OperatorHub* page and check that the changes you made are reflected in the list of Operators. \ No newline at end of file +. After the catalog source is in a *READY* state, navigate to the *Ecosystem* -> *Software Catalog* page. Select *Operators* under the *Type* heading and check that the changes you made are reflected in the list of Operators. diff --git a/modules/olm-installing-from-operatorhub-using-cli.adoc b/modules/olm-installing-from-operatorhub-using-cli.adoc index f3d3086824..a3d894ffd6 100644 --- a/modules/olm-installing-from-operatorhub-using-cli.adoc +++ b/modules/olm-installing-from-operatorhub-using-cli.adoc @@ -13,9 +13,9 @@ endif::[] :_mod-docs-content-type: PROCEDURE [id="olm-installing-operator-from-operatorhub-using-cli_{context}"] -= Installing from OperatorHub by using the CLI += Installing from the software catalog by using the CLI -Instead of using the {product-title} web console, you can install an Operator from OperatorHub by using the CLI. Use the `oc` command to create or update a `Subscription` object. +Instead of using the {product-title} web console, you can install an Operator from the software catalog by using the CLI. Use the `oc` command to create or update a `Subscription` object. For `SingleNamespace` install mode, you must also ensure an appropriate Operator group exists in the related namespace. An Operator group, defined by an `OperatorGroup` object, selects target namespaces in which to generate required RBAC access for all Operators in the same namespace as the Operator group. @@ -44,7 +44,7 @@ endif::[] .Procedure -. View the list of Operators available to the cluster from OperatorHub: +. View the list of Operators available to the cluster from the software catalog: + [source,terminal] ---- @@ -233,7 +233,7 @@ spec: <2> Name of the channel to subscribe to. <3> Name of the Operator to subscribe to. <4> Name of the catalog source that provides the Operator. -<5> Namespace of the catalog source. Use `openshift-marketplace` for the default OperatorHub catalog sources. +<5> Namespace of the catalog source. Use `openshift-marketplace` for the default software catalog sources. <6> The `env` parameter defines a list of environment variables that must exist in all containers in the pod created by OLM. <7> The `envFrom` parameter defines a list of sources to populate environment variables in the container. <8> The `volumes` parameter defines a list of volumes that must exist on the pod created by OLM. diff --git a/modules/olm-installing-from-operatorhub-using-web-console.adoc b/modules/olm-installing-from-operatorhub-using-web-console.adoc index 530ae37271..5b6e0118c7 100644 --- a/modules/olm-installing-from-operatorhub-using-web-console.adoc +++ b/modules/olm-installing-from-operatorhub-using-web-console.adoc @@ -30,9 +30,9 @@ endif::[] :_mod-docs-content-type: PROCEDURE [id="olm-installing-from-operatorhub-using-web-console_{context}"] -= Installing from OperatorHub by using the web console += Installing from the software catalog by using the web console -You can install and subscribe to an Operator from OperatorHub by using the {product-title} web console. +You can install and subscribe to an Operator from software catalog by using the {product-title} web console. .Prerequisites @@ -52,7 +52,7 @@ endif::[] .Procedure -. Navigate in the web console to the *Operators → OperatorHub* page. +. Navigate in the web console to the *Ecosystem* -> *Software Catalog* page. . Scroll or type a keyword into the *Filter by keyword* box to find the Operator you want. For example, type `{filter-type}` to find the {filter-operator} Operator. + @@ -124,7 +124,7 @@ After approving on the *Install Plan* page, the subscription upgrade status move .Verification -* After the upgrade status of the subscription is *Up to date*, select *Operators* -> *Installed Operators* to verify that the cluster service version (CSV) of the installed Operator eventually shows up. The *Status* should eventually resolve to *Succeeded* in the relevant namespace. +* After the upgrade status of the subscription is *Up to date*, select *Ecosystem* -> *Installed Operators* to verify that the cluster service version (CSV) of the installed Operator eventually shows up. The *Status* should eventually resolve to *Succeeded* in the relevant namespace. + [NOTE] ==== @@ -140,4 +140,4 @@ If it does not: [NOTE] ==== The *Channel* and *Version* dropdown menus are still available for viewing other version metadata in this catalog context. -==== \ No newline at end of file +==== diff --git a/modules/olm-installing-operators-from-operatorhub.adoc b/modules/olm-installing-operators-from-operatorhub.adoc index 236cba01e4..166e1c9a35 100644 --- a/modules/olm-installing-operators-from-operatorhub.adoc +++ b/modules/olm-installing-operators-from-operatorhub.adoc @@ -13,23 +13,23 @@ endif::[] :_mod-docs-content-type: CONCEPT [id="olm-installing-operators-from-operatorhub_{context}"] -= About Operator installation with OperatorHub += About Operator installation from the software catalog -OperatorHub is a user interface for discovering Operators; it works in conjunction with Operator Lifecycle Manager (OLM), which installs and manages Operators on a cluster. +The software catalog is a user interface for discovering Operators; it works in conjunction with Operator Lifecycle Manager (OLM), which installs and manages Operators on a cluster. ifndef::olm-user,openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -As a cluster administrator, you can install an Operator from OperatorHub by using the {product-title} +As a cluster administrator, you can install an Operator from the software catalog by using the {product-title} ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] web console or CLI. Subscribing an Operator to one or more namespaces makes the Operator available to developers on your cluster. endif::[] endif::[] ifdef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -As a `dedicated-admin`, you can install an Operator from OperatorHub by using the {product-title} web console or CLI. Subscribing an Operator to one or more namespaces makes the Operator available to developers on your cluster. +As a `dedicated-admin`, you can install an Operator from the software catalog by using the {product-title} web console or CLI. Subscribing an Operator to one or more namespaces makes the Operator available to developers on your cluster. endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] ifdef::olm-user[] -As a user with the proper permissions, you can install an Operator from OperatorHub by using the {product-title} web console or CLI. +As a user with the proper permissions, you can install an Operator from the software catalog by using the {product-title} web console or CLI. endif::[] During installation, you must determine the following initial settings for the Operator: diff --git a/modules/olm-mirroring-catalog-airgapped.adoc b/modules/olm-mirroring-catalog-airgapped.adoc index 279fcd1d0b..ecbc8fadbd 100644 --- a/modules/olm-mirroring-catalog-airgapped.adoc +++ b/modules/olm-mirroring-catalog-airgapped.adoc @@ -105,6 +105,6 @@ $ oc adm catalog mirror \ This step is required because the image mappings in the `imageContentSourcePolicy.yaml` file generated during the previous step must be updated from local paths to valid mirror locations. Failure to do so will cause errors when you create the `ImageContentSourcePolicy` object in a later step. ==== -After you mirror the catalog, you can continue with the remainder of your cluster installation. After your cluster installation has finished successfully, you must specify the manifests directory from this procedure to create the `ImageContentSourcePolicy` and `CatalogSource` objects. These objects are required to enable installation of Operators from OperatorHub. +After you mirror the catalog, you can continue with the remainder of your cluster installation. After your cluster installation has finished successfully, you must specify the manifests directory from this procedure to create the `ImageContentSourcePolicy` and `CatalogSource` objects. These objects are required to enable installation of Operators from the software catalog. :!index-image-pullspec: diff --git a/modules/olm-mirroring-catalog-post.adoc b/modules/olm-mirroring-catalog-post.adoc index 01c6d659a6..89814c2471 100644 --- a/modules/olm-mirroring-catalog-post.adoc +++ b/modules/olm-mirroring-catalog-post.adoc @@ -5,4 +5,4 @@ [id="olm-mirror-catalog-post_{context}"] = Postinstallation requirements -After you mirror the catalog, you can continue with the remainder of your cluster installation. After your cluster installation has finished successfully, you must specify the manifests directory from this procedure to create the `ImageContentSourcePolicy` and `CatalogSource` objects. These objects are required to populate and enable installation of Operators from OperatorHub. +After you mirror the catalog, you can continue with the remainder of your cluster installation. After your cluster installation has finished successfully, you must specify the manifests directory from this procedure to create the `ImageContentSourcePolicy` and `CatalogSource` objects. These objects are required to populate and enable installation of Operators from the software catalog. diff --git a/modules/olm-operator-framework.adoc b/modules/olm-operator-framework.adoc index 6e893115de..0ad00ed5b3 100644 --- a/modules/olm-operator-framework.adoc +++ b/modules/olm-operator-framework.adoc @@ -19,7 +19,7 @@ endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] Operator Registry:: The Operator Registry stores cluster service versions (CSVs) and custom resource definitions (CRDs) for creation in a cluster and stores Operator metadata about packages and channels. It runs in a Kubernetes or OpenShift cluster to provide this Operator catalog data to OLM. -OperatorHub:: -OperatorHub is a web console for cluster administrators to discover and select Operators to install on their cluster. It is deployed by default in {product-title}. +Software Catalog:: +The software catalog is a web console for cluster administrators to discover and select Operators to install on their cluster. It is deployed by default in {product-title}. These tools are designed to be composable, so you can use any that are useful to you. diff --git a/modules/olm-operatorgroups-limitations.adoc b/modules/olm-operatorgroups-limitations.adoc index c27ccfbdc6..8003e9dfad 100644 --- a/modules/olm-operatorgroups-limitations.adoc +++ b/modules/olm-operatorgroups-limitations.adoc @@ -15,6 +15,6 @@ All tenants, or namespaces, share the same control plane of a cluster. Therefore The supported scenarios include the following: * Operators of different versions that ship the exact same CRD definition (in case of versioned CRDs, the exact same set of versions) -* Operators of different versions that do not ship a CRD, and instead have their CRD available in a separate bundle on the OperatorHub +* Operators of different versions that do not ship a CRD, and instead have their CRD available in a separate bundle in the software catalog -All other scenarios are not supported, because the integrity of the cluster data cannot be guaranteed if there are multiple competing or overlapping CRDs from different Operator versions to be reconciled on the same cluster. \ No newline at end of file +All other scenarios are not supported, because the integrity of the cluster data cannot be guaranteed if there are multiple competing or overlapping CRDs from different Operator versions to be reconciled on the same cluster. diff --git a/modules/olm-operatorhub-architecture.adoc b/modules/olm-operatorhub-architecture.adoc index fda352fa34..9f60f2e3d4 100644 --- a/modules/olm-operatorhub-architecture.adoc +++ b/modules/olm-operatorhub-architecture.adoc @@ -3,14 +3,14 @@ // * operators/understanding/olm-understanding-operatorhub.adoc [id="olm-operatorhub-arch_{context}"] -= OperatorHub architecture += Software catalog architecture -The OperatorHub UI component is driven by the Marketplace Operator by default on {product-title} in the `openshift-marketplace` namespace. +The software catalog UI component is driven by the Marketplace Operator by default on {product-title} in the `openshift-marketplace` namespace. [id="olm-operatorhub-arch-operatorhub_crd_{context}"] == OperatorHub custom resource -The Marketplace Operator manages an `OperatorHub` custom resource (CR) named `cluster` that manages the default `CatalogSource` objects provided with OperatorHub. +The Marketplace Operator manages an `OperatorHub` custom resource (CR) named `cluster` that manages the default `CatalogSource` objects provided with the software catalog. ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] You can modify this resource to enable or disable the default catalogs, which is useful when configuring {product-title} in restricted network environments. diff --git a/modules/olm-operatorhub-overview.adoc b/modules/olm-operatorhub-overview.adoc index fe6e88d048..912c952e65 100644 --- a/modules/olm-operatorhub-overview.adoc +++ b/modules/olm-operatorhub-overview.adoc @@ -4,9 +4,9 @@ :_mod-docs-content-type: CONCEPT [id="olm-operatorhub-overview_{context}"] -= About OperatorHub += About the software catalog -_OperatorHub_ is the web console interface in {product-title} that cluster administrators use to discover and install Operators. With one click, an Operator can be pulled from its off-cluster source, installed and subscribed on the cluster, and made ready for engineering teams to self-service manage the product across deployment environments using Operator Lifecycle Manager (OLM). +The _software catalog_ is the web console interface in {product-title} that cluster administrators use to discover and install Operators. With one click, an Operator can be pulled from its off-cluster source, installed and subscribed on the cluster, and made ready for engineering teams to self-service manage the product across deployment environments using Operator Lifecycle Manager (OLM). ifndef::openshift-origin[] Cluster administrators can choose from catalogs grouped into the following categories: @@ -28,8 +28,8 @@ Cluster administrators can choose from catalogs grouped into the following categ |Optionally-visible software maintained by relevant representatives in the link:https://github.com/redhat-openshift-ecosystem/community-operators-prod/tree/main/operators[redhat-openshift-ecosystem/community-operators-prod/operators] GitHub repository. No official support. |Custom Operators -|Operators you add to the cluster yourself. If you have not added any custom Operators, the *Custom* category does not appear in the web console on your OperatorHub. +|Operators you add to the cluster yourself. If you have not added any custom Operators, the *Custom* category does not appear in the web console software catalog. |=== endif::[] -Operators on OperatorHub are packaged to run on OLM. This includes a YAML file called a cluster service version (CSV) containing all of the CRDs, RBAC rules, deployments, and container images required to install and securely run the Operator. It also contains user-visible information like a description of its features and supported Kubernetes versions. +Operators in the software catalog are packaged to run on OLM. This includes a YAML file called a cluster service version (CSV) containing all of the CRDs, RBAC rules, deployments, and container images required to install and securely run the Operator. It also contains user-visible information like a description of its features and supported Kubernetes versions. diff --git a/modules/olm-overriding-proxy-settings.adoc b/modules/olm-overriding-proxy-settings.adoc index 9fc51c9def..f738861e5b 100644 --- a/modules/olm-overriding-proxy-settings.adoc +++ b/modules/olm-overriding-proxy-settings.adoc @@ -34,7 +34,7 @@ endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] .Procedure -. Navigate in the web console to the *Operators → OperatorHub* page. +. Navigate in the web console to the *Ecosystem → Software Catalog* page. . Select the Operator and click *Install*. diff --git a/modules/olm-refresh-subs.adoc b/modules/olm-refresh-subs.adoc index f8a37f8786..3ddbc48bfd 100644 --- a/modules/olm-refresh-subs.adoc +++ b/modules/olm-refresh-subs.adoc @@ -97,7 +97,7 @@ This ensures pods that try to pull the inaccessible image are not recreated. $ oc delete configmap -n openshift-marketplace ---- -. Reinstall the Operator using OperatorHub in the web console. +. Reinstall the Operator using the software catalog in the web console. .Verification diff --git a/modules/olm-reinstall.adoc b/modules/olm-reinstall.adoc index 914748dfb6..413af11bdb 100644 --- a/modules/olm-reinstall.adoc +++ b/modules/olm-reinstall.adoc @@ -93,7 +93,7 @@ $ oc get namespace If the namespace or other Operator resources are still not uninstalled cleanly, contact Red Hat Support. ==== -. Reinstall the Operator using OperatorHub in the web console. +. Reinstall the Operator using the software catalog in the web console. .Verification @@ -102,4 +102,4 @@ If the namespace or other Operator resources are still not uninstalled cleanly, [source,terminal] ---- $ oc get sub,csv,installplan -n ----- \ No newline at end of file +---- diff --git a/modules/olm-restricted-networks-configuring-operatorhub.adoc b/modules/olm-restricted-networks-configuring-operatorhub.adoc index 2229dcc5de..613a6f9096 100644 --- a/modules/olm-restricted-networks-configuring-operatorhub.adoc +++ b/modules/olm-restricted-networks-configuring-operatorhub.adoc @@ -30,14 +30,14 @@ endif::[] :_mod-docs-content-type: PROCEDURE [id="olm-restricted-networks-operatorhub_{context}"] -= Disabling the default OperatorHub catalog sources += Disabling the default software catalog sources -Operator catalogs that source content provided by Red Hat and community projects are configured for OperatorHub by default during an {product-title} installation. +Operator catalogs that source content provided by Red Hat and community projects are configured for the software catalog by default during an {product-title} installation. ifndef::olm-managing-custom-catalogs[] In a restricted network environment, you must disable the default catalogs as a cluster administrator. endif::[] ifdef::olm-restricted-networks[] -You can then configure OperatorHub to use local catalog sources. +You can then configure the OperatorHub custom resource definition (CRD) to use local catalog sources for the software catalog. endif::[] ifdef::olm-managing-custom-catalogs[] As a cluster administrator, you can disable the set of default catalogs. diff --git a/modules/olm-updating-index-image.adoc b/modules/olm-updating-index-image.adoc index 3fad7a8bf5..18d60e49ff 100644 --- a/modules/olm-updating-index-image.adoc +++ b/modules/olm-updating-index-image.adoc @@ -13,7 +13,7 @@ endif::[] [id="olm-updating-index-image_{context}"] = Updating a SQLite-based index image -After configuring OperatorHub to use a catalog source that references a custom index image, +After configuring the software catalog to use a catalog source that references a custom index image, ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] cluster administrators endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] diff --git a/modules/op-installing-pipelines-operator-in-web-console.adoc b/modules/op-installing-pipelines-operator-in-web-console.adoc index 54a582c3d7..347539cd76 100644 --- a/modules/op-installing-pipelines-operator-in-web-console.adoc +++ b/modules/op-installing-pipelines-operator-in-web-console.adoc @@ -5,7 +5,7 @@ [id="op-installing-pipelines-operator-in-web-console_{context}"] = Installing the {pipelines-title} Operator in web console -You can install {pipelines-title} using the Operator listed in the {product-title} OperatorHub. When you install the {pipelines-title} Operator, the custom resources (CRs) required for the pipelines configuration are automatically installed along with the Operator. +You can install {pipelines-title} using the Operator listed in the {product-title} software catalog. When you install the {pipelines-title} Operator, the custom resources (CRs) required for the pipelines configuration are automatically installed along with the Operator. The default Operator custom resource definition (CRD) `config.operator.tekton.dev` is now replaced by `tektonconfigs.operator.tekton.dev`. In addition, the Operator provides the following additional CRDs to individually manage {pipelines-shortname} components: `tektonpipelines.operator.tekton.dev`, `tektontriggers.operator.tekton.dev` and `tektonaddons.operator.tekton.dev`. @@ -27,7 +27,7 @@ The supported profiles are: [discrete] .Procedure -. In the *Administrator* perspective of the web console, navigate to *Operators* -> *OperatorHub*. +. In the *Administrator* perspective of the web console, navigate to *Ecosystem* -> *Software Catalog*. . Use the *Filter by keyword* box to search for `{pipelines-title}` Operator in the catalog. Click the *{pipelines-title}* Operator tile. diff --git a/modules/op-installing-pipelines-operator-using-the-cli.adoc b/modules/op-installing-pipelines-operator-using-the-cli.adoc index e97f291255..09ed17a285 100644 --- a/modules/op-installing-pipelines-operator-using-the-cli.adoc +++ b/modules/op-installing-pipelines-operator-using-the-cli.adoc @@ -6,7 +6,7 @@ [id="op-installing-pipelines-operator-using-the-cli_{context}"] = Installing the {pipelines-shortname} Operator using the CLI -You can install {pipelines-title} Operator from the OperatorHub using the CLI. +You can install {pipelines-title} Operator from the software catalog using the CLI. [discrete] .Procedure @@ -31,7 +31,7 @@ spec: <1> The channel name of the Operator. The `pipelines-` channel is the default channel. For example, the default channel for {pipelines-title} Operator version `1.7` is `pipelines-1.7`. The `latest` channel enables installation of the most recent stable version of the {pipelines-title} Operator. <2> Name of the Operator to subscribe to. <3> Name of the CatalogSource that provides the Operator. -<4> Namespace of the CatalogSource. Use `openshift-marketplace` for the default OperatorHub CatalogSources. +<4> Namespace of the CatalogSource. Use `openshift-marketplace` for the default software catalog sources. . Create the Subscription object: + diff --git a/modules/op-installing-sbo-operator-using-the-web-console.adoc b/modules/op-installing-sbo-operator-using-the-web-console.adoc index bd1c4a9d28..ab477da571 100644 --- a/modules/op-installing-sbo-operator-using-the-web-console.adoc +++ b/modules/op-installing-sbo-operator-using-the-web-console.adoc @@ -2,7 +2,7 @@ [id="op-installing-sbo-operator-using-the-web-console_{context}"] = Installing the {servicebinding-title} using the web console -You can install {servicebinding-title} using the {product-title} OperatorHub. When you install the {servicebinding-title}, the custom resources (CRs) required for the service binding configuration are automatically installed along with the Operator. +You can install {servicebinding-title} using the {product-title} software catalog. When you install the {servicebinding-title}, the custom resources (CRs) required for the service binding configuration are automatically installed along with the Operator. //[discrete] //== Prerequisites @@ -11,7 +11,7 @@ You can install {servicebinding-title} using the {product-title} OperatorHub. Wh [discrete] .Procedure -. In the *Administrator* perspective of the web console, navigate to *Operators* -> *OperatorHub*. +. In the *Administrator* perspective of the web console, navigate to *Ecosystem* -> *Software Catalog*. . Use the *Filter by keyword* box to search for `{servicebinding-title}` in the catalog. Click the *{servicebinding-title}* tile. diff --git a/modules/op-uninstalling-the-pipelines-operator.adoc b/modules/op-uninstalling-the-pipelines-operator.adoc index fcd4cdde91..afa747df58 100644 --- a/modules/op-uninstalling-the-pipelines-operator.adoc +++ b/modules/op-uninstalling-the-pipelines-operator.adoc @@ -11,7 +11,7 @@ You can uninstall the {pipelines-title} Operator by using the *Administrator* pe [discrete] .Procedure -. From the *Operators* -> *OperatorHub* page, use the *Filter by keyword* box to search for the *{pipelines-title}* Operator. +. From the *Ecosystem* -> *Software Catalog* page, use the *Filter by keyword* box to search for the *{pipelines-title}* Operator. . Click the *{pipelines-title}* Operator tile. The Operator tile indicates that the Operator is installed. diff --git a/modules/optional-capabilities-operators.adoc b/modules/optional-capabilities-operators.adoc index a2f1a6ee25..383c3bc206 100644 --- a/modules/optional-capabilities-operators.adoc +++ b/modules/optional-capabilities-operators.adoc @@ -6,4 +6,4 @@ [id="optional-capabilities-operators_{context}"] = Enhancing the {product-title} web console with Operators -Cluster administrators can install Operators on clusters in the {product-title} web console by using the OperatorHub to provide customization outside of layered products for developers. For example, the Web Terminal Operator allows you to start a web terminal in your browser with common CLI tools for interacting with the cluster. \ No newline at end of file +Cluster administrators can install Operators on clusters in the {product-title} web console by using the software catalog to provide customization outside of layered products for developers. For example, the Web Terminal Operator allows you to start a web terminal in your browser with common CLI tools for interacting with the cluster. diff --git a/modules/osd-intro.adoc b/modules/osd-intro.adoc index f31f4ec6ed..ea88cf792a 100644 --- a/modules/osd-intro.adoc +++ b/modules/osd-intro.adoc @@ -33,7 +33,7 @@ RHCOS includes: == Other key features Operators are both the fundamental unit of the {product-title} code base and a convenient way to deploy applications and software components for your applications to use. In {product-title}, Operators serve as the platform foundation and remove the need for manual upgrades of operating systems and control plane applications. {product-title} Operators such as the Cluster Version Operator and Machine Config Operator allow simplified, cluster-wide management of those critical components. -Operator Lifecycle Manager (OLM) and the OperatorHub provide facilities for storing and distributing Operators to people developing and deploying applications. +Operator Lifecycle Manager (OLM) and the software catalog provide facilities for storing and distributing Operators to people developing and deploying applications. The {quay} Container Registry is a Quay.io container registry that serves most of the container images and Operators to {product-title} clusters. Quay.io is a public registry version of {quay} that stores millions of images and tags. diff --git a/modules/ossm-add-project-member-roll-resource-console.adoc b/modules/ossm-add-project-member-roll-resource-console.adoc index 44170d574a..86cdf6b765 100644 --- a/modules/ossm-add-project-member-roll-resource-console.adoc +++ b/modules/ossm-add-project-member-roll-resource-console.adoc @@ -21,7 +21,7 @@ The `ServiceMeshMemberRoll` resource is deleted when its corresponding `ServiceM . Log in to the {product-title} web console. -. Navigate to *Operators* -> *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . Click the *Project* menu and choose the project where your `ServiceMeshControlPlane` resource is deployed from the list. For example `istio-system`. @@ -52,4 +52,4 @@ spec: . Click *Save*. -. Click *Reload*. \ No newline at end of file +. Click *Reload*. diff --git a/modules/ossm-add-project-using-label-selectors-console.adoc b/modules/ossm-add-project-using-label-selectors-console.adoc index 7c4da30107..91129c6850 100644 --- a/modules/ossm-add-project-using-label-selectors-console.adoc +++ b/modules/ossm-add-project-using-label-selectors-console.adoc @@ -20,7 +20,7 @@ endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] .Procedure -. Navigate to *Operators* -> *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . Click the *Project* menu, and from the drop-down list, select the project where your `ServiceMeshMemberRoll` resource is deployed. For example, *istio-system*. @@ -38,4 +38,4 @@ For example, entering `mykey=myvalue` includes all namespaces with this label as + Entering `myotherkey=myothervalue` includes all namespaces with this label as part of the mesh. When the selector identifies a match, the project namespace is added to the service mesh. -. Click *Create*. \ No newline at end of file +. Click *Create*. diff --git a/modules/ossm-adding-project-using-smm-resource-console.adoc b/modules/ossm-adding-project-using-smm-resource-console.adoc index 446838257c..a13b21b690 100644 --- a/modules/ossm-adding-project-using-smm-resource-console.adoc +++ b/modules/ossm-adding-project-using-smm-resource-console.adoc @@ -18,7 +18,7 @@ You can add one or more projects to the mesh using the `ServiceMeshMember` resou . Log in to the {product-title} web console. -. Navigate to *Operators* -> *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . Click the *Project* menu and choose the project that you want to add to the mesh from the drop-down list. For example, `istio-system`. diff --git a/modules/ossm-config-control-plane-infrastructure-node-console.adoc b/modules/ossm-config-control-plane-infrastructure-node-console.adoc index 6c097f40da..c7566405dd 100644 --- a/modules/ossm-config-control-plane-infrastructure-node-console.adoc +++ b/modules/ossm-config-control-plane-infrastructure-node-console.adoc @@ -24,7 +24,7 @@ endif::openshift-rosa,openshift-dedicated[] . Log in to the {product-title} web console. -. Navigate to *Operators* -> *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . Click the {SMProductName} Operator, and then click *Istio Service Mesh Control Plane*. @@ -55,4 +55,4 @@ spec: . Click *Save*. -. Click *Reload*. \ No newline at end of file +. Click *Reload*. diff --git a/modules/ossm-config-disable-networkpolicy.adoc b/modules/ossm-config-disable-networkpolicy.adoc index b41fd8308d..2ea6b52453 100644 --- a/modules/ossm-config-disable-networkpolicy.adoc +++ b/modules/ossm-config-disable-networkpolicy.adoc @@ -20,7 +20,7 @@ When you disable `spec.security.manageNetworkPolicy` {SMProductName} will not cr .Procedure -. In the {product-title} web console, click *Operators* -> *Installed Operators*. +. In the {product-title} web console, click *Ecosystem* -> *Installed Operators*. . Select the project where you installed the {SMProductShortName} control plane, for example `istio-system`, from the *Project* menu. diff --git a/modules/ossm-config-enabling-controlplane.adoc b/modules/ossm-config-enabling-controlplane.adoc index 333d2523ae..4578eeb37f 100644 --- a/modules/ossm-config-enabling-controlplane.adoc +++ b/modules/ossm-config-enabling-controlplane.adoc @@ -27,7 +27,7 @@ You can also enable mTLS for the {SMProductShortName} control plane by using the . Click the *Project* menu and select the project where you installed the {SMProductShortName} control plane, for example *istio-system*. -. Click *Operators* -> *Installed Operators*. +. Click *Ecosystem* -> *Installed Operators*. . Click *Service Mesh Control Plane* under *Provided APIs*. diff --git a/modules/ossm-config-external-jaeger.adoc b/modules/ossm-config-external-jaeger.adoc index 4d4dcb0951..d07f60016b 100644 --- a/modules/ossm-config-external-jaeger.adoc +++ b/modules/ossm-config-external-jaeger.adoc @@ -20,7 +20,7 @@ Starting with {SMProductName} 2.5, {JaegerName} and {es-op} are deprecated and w .Procedure -. In the {product-title} web console, click *Operators* -> *Installed Operators*. +. In the {product-title} web console, click *Ecosystem* -> *Installed Operators*. . Click the *Project* menu and select the project where you installed the {SMProductShortName} control plane, for example *istio-system*. diff --git a/modules/ossm-config-individual-control-plane-infrastructure-node-console.adoc b/modules/ossm-config-individual-control-plane-infrastructure-node-console.adoc index a9993dd29c..0ed50d5575 100644 --- a/modules/ossm-config-individual-control-plane-infrastructure-node-console.adoc +++ b/modules/ossm-config-individual-control-plane-infrastructure-node-console.adoc @@ -24,7 +24,7 @@ endif::openshift-rosa,openshift-dedicated[] . Log in to the {product-title} web console. -. Navigate to *Operators* -> *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . Click the {SMProductName} Operator, and then click *Istio Service Mesh Control Plane*. @@ -90,4 +90,4 @@ spec: . Click *Save*. -. Click *Reload*. \ No newline at end of file +. Click *Reload*. diff --git a/modules/ossm-config-mtls-min-max.adoc b/modules/ossm-config-mtls-min-max.adoc index cab187f9f1..f422535fb2 100644 --- a/modules/ossm-config-mtls-min-max.adoc +++ b/modules/ossm-config-mtls-min-max.adoc @@ -32,7 +32,7 @@ The default is `TLS_AUTO` and does not specify a version of TLS. . Click the *Project* menu and select the project where you installed the {SMProductShortName} control plane, for example *istio-system*. -. Click *Operators* -> *Installed Operators*. +. Click *Ecosystem* -> *Installed Operators*. . Click *Service Mesh Control Plane* under *Provided APIs*. diff --git a/modules/ossm-config-sampling.adoc b/modules/ossm-config-sampling.adoc index d6e477b919..2675fb1f75 100644 --- a/modules/ossm-config-sampling.adoc +++ b/modules/ossm-config-sampling.adoc @@ -30,7 +30,7 @@ endif::[] .Procedure -. In the {product-title} web console, click *Operators* -> *Installed Operators*. +. In the {product-title} web console, click *Ecosystem* -> *Installed Operators*. . Click the *Project* menu and select the project where you installed the control plane, for example *istio-system*. diff --git a/modules/ossm-config-sec-mtls-mesh.adoc b/modules/ossm-config-sec-mtls-mesh.adoc index 35a666e30f..e96d9ab1f5 100644 --- a/modules/ossm-config-sec-mtls-mesh.adoc +++ b/modules/ossm-config-sec-mtls-mesh.adoc @@ -27,7 +27,7 @@ You can also enable mTLS by using the {product-title} web console. . Click the *Project* menu and select the project where you installed the {SMProductShortName} control plane, for example *istio-system*. -. Click *Operators* -> *Installed Operators*. +. Click *Ecosystem* -> *Installed Operators*. . Click *Service Mesh Control Plane* under *Provided APIs*. diff --git a/modules/ossm-config-web-console.adoc b/modules/ossm-config-web-console.adoc index 10efc3934b..f1ed7e13f9 100644 --- a/modules/ossm-config-web-console.adoc +++ b/modules/ossm-config-web-console.adoc @@ -11,7 +11,7 @@ You can configure the `ServiceMeshControlPlane` by using the {product-title} web .Procedure -. In the {product-title} web console, click *Operators* -> *Installed Operators*. +. In the {product-title} web console, click *Ecosystem* -> *Installed Operators*. . Click the *Project* menu and select the project where you installed the {SMProductShortName} control plane, for example *istio-system*. diff --git a/modules/ossm-configuring-jaeger-v1x.adoc b/modules/ossm-configuring-jaeger-v1x.adoc index f6e03904c5..84b5bc6965 100644 --- a/modules/ossm-configuring-jaeger-v1x.adoc +++ b/modules/ossm-configuring-jaeger-v1x.adoc @@ -166,7 +166,7 @@ Minimum deployment = 16Gi* . Log in to the {product-title} web console as a user with the `cluster-admin` role. -. Navigate to *Operators* -> *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . Click the {SMProductName} Operator. diff --git a/modules/ossm-control-plane-deploy-1x.adoc b/modules/ossm-control-plane-deploy-1x.adoc index 43ec80dfa2..88b46a7e24 100644 --- a/modules/ossm-control-plane-deploy-1x.adoc +++ b/modules/ossm-control-plane-deploy-1x.adoc @@ -39,7 +39,7 @@ Follow this procedure to deploy the {SMProductName} control plane by using the w .. Click *Create*. -. Navigate to *Operators* -> *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . If necessary, select `istio-system` from the Project menu. You may have to wait a few moments for the Operators to be copied to the new project. diff --git a/modules/ossm-control-plane-remove.adoc b/modules/ossm-control-plane-remove.adoc index cda0c44617..dd9e7a51bd 100644 --- a/modules/ossm-control-plane-remove.adoc +++ b/modules/ossm-control-plane-remove.adoc @@ -20,7 +20,7 @@ You can remove the {SMProductName} control plane by using the web console. . Click the *Project* menu and select the project where you installed the {SMProductShortName} control plane, for example *istio-system*. -. Navigate to *Operators* -> *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . Click *Service Mesh Control Plane* under *Provided APIs*. diff --git a/modules/ossm-control-plane-web.adoc b/modules/ossm-control-plane-web.adoc index d28a3c9cc1..d629fcd0b5 100644 --- a/modules/ossm-control-plane-web.adoc +++ b/modules/ossm-control-plane-web.adoc @@ -40,7 +40,7 @@ endif::openshift-rosa,openshift-rosa-hcp,openshift-dedcated[] + .. Click *Create*. -. Navigate to *Operators* -> *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . Click the {SMProductName} Operator, then click *Istio Service Mesh Control Plane*. diff --git a/modules/ossm-defining-namespace-receive-sidecar-injection-cluster-wide-mesh-console.adoc b/modules/ossm-defining-namespace-receive-sidecar-injection-cluster-wide-mesh-console.adoc index c2a5c2ac75..908fd5f169 100644 --- a/modules/ossm-defining-namespace-receive-sidecar-injection-cluster-wide-mesh-console.adoc +++ b/modules/ossm-defining-namespace-receive-sidecar-injection-cluster-wide-mesh-console.adoc @@ -22,7 +22,7 @@ Using discovery selectors to determine which namespaces the mesh can discover ha . Log in to the {product-title} web console. -. Navigate to *Operators* -> *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . Click the {SMProductName} Operator. diff --git a/modules/ossm-deploy-cluster-wide-control-plane-console.adoc b/modules/ossm-deploy-cluster-wide-control-plane-console.adoc index cf6de3a3d9..32f319d932 100644 --- a/modules/ossm-deploy-cluster-wide-control-plane-console.adoc +++ b/modules/ossm-deploy-cluster-wide-control-plane-console.adoc @@ -32,7 +32,7 @@ These steps use `istio-system` as an example. You can deploy the {SMProductShort + .. Click *Create*. -. Navigate to *Operators* -> *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . Click the {SMProductName} Operator, then click *Istio Service Mesh Control Plane*. diff --git a/modules/ossm-excluding-namespaces-from-cluster-wide-mesh-console.adoc b/modules/ossm-excluding-namespaces-from-cluster-wide-mesh-console.adoc index 505b053741..e5799c89ae 100644 --- a/modules/ossm-excluding-namespaces-from-cluster-wide-mesh-console.adoc +++ b/modules/ossm-excluding-namespaces-from-cluster-wide-mesh-console.adoc @@ -22,7 +22,7 @@ If you install ingress or egress gateways in the control plane namespace, you mu . Log in to the {product-title} web console. -. Navigate to *Operators* -> *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . Click the {SMProductName} Operator. @@ -66,4 +66,4 @@ spec: + If a namespace matches any of the discovery selectors, then the mesh discovers the namespace. The mesh excludes namespaces that do not match any of the discovery selectors. -. Save the file. \ No newline at end of file +. Save the file. diff --git a/modules/ossm-federation-config-smcp.adoc b/modules/ossm-federation-config-smcp.adoc index 45ce20f503..c1aabc4180 100644 --- a/modules/ossm-federation-config-smcp.adoc +++ b/modules/ossm-federation-config-smcp.adoc @@ -410,7 +410,7 @@ Follow this procedure to edit the `ServiceMeshControlPlane` with the {product-ti . Log in to the {product-title} web console as a user with the cluster-admin role. -. Navigate to *Operators* -> *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . Click the *Project* menu and select the project where you installed the {SMProductShortName} control plane. For example, `red-mesh-system`. diff --git a/modules/ossm-federation-create-export.adoc b/modules/ossm-federation-create-export.adoc index 852a097b32..de2fef7426 100644 --- a/modules/ossm-federation-create-export.adoc +++ b/modules/ossm-federation-create-export.adoc @@ -33,7 +33,7 @@ This is conjecture about what the flow might look like. Follow this procedure to create an `ExportedServiceSet` with the web console. This example shows the red-mesh exporting the ratings service from the bookinfo application to the green-mesh. . Log in to the {product-title} web console as a user with the cluster-admin role. -. Navigate to *Operators* → *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . Click the *Project* menu and select the project where you installed the {SMProductShortName} control plane for the mesh that will export services. For example, `red-mesh-system`. . Click the {SMProductName} Operator, then click *Istio Service Mesh ExportedServiceSet*. . On the *Istio Service Mesh ExportedServiceSet* tab, click *Create ExportedServiceSet*. diff --git a/modules/ossm-federation-create-import.adoc b/modules/ossm-federation-create-import.adoc index c59fe55448..bb2b5a25c0 100644 --- a/modules/ossm-federation-create-import.adoc +++ b/modules/ossm-federation-create-import.adoc @@ -28,7 +28,7 @@ This is conjecture about what the flow might look like. Follow this procedure to create an `ImportedServiceSet` with the web console. This example shows the green-mesh importing the ratings service that was exported by the red-mesh. . Log in to the {product-title} web console as a user with the cluster-admin role. -. Navigate to *Operators* → *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . Click the *Project* menu and select the project where you installed the {SMProductShortName} control plane for the mesh you want to import services into. For example, `green-mesh-system`. . Click the {SMProductName} Operator, then click *Istio Service Mesh ImportedServiceSet*. . On the *Istio Service Mesh ImportedServiceSet* tab, click *Create ImportedServiceSet*. diff --git a/modules/ossm-federation-create-meshPeer.adoc b/modules/ossm-federation-create-meshPeer.adoc index 0848ac82b8..24193e1990 100644 --- a/modules/ossm-federation-create-meshPeer.adoc +++ b/modules/ossm-federation-create-meshPeer.adoc @@ -22,7 +22,7 @@ This is conjecture about what the flow might look like… Follow this procedure to create a `ServiceMeshPeer` resource from the console. This example shows the `red-mesh` creating a peer resource for the `green-mesh`. . Log in to the {product-title} web console as a user with the cluster-admin role. -. Navigate to *Operators* → *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . Click the *Project* menu and select the project where you installed the control plane for the mesh that is creating the `ServiceMeshPeer` resource. For example, `red-mesh-system`. . Click the {SMProductName} Operator, then click *Istio Service Mesh ServiceMeshPeer*. . On the *Istio Service Mesh ServiceMeshPeer* tab, click *Create ServiceMeshPeer*. diff --git a/modules/ossm-install-kiali.adoc b/modules/ossm-install-kiali.adoc index f6bfeb792d..27d9c430fe 100644 --- a/modules/ossm-install-kiali.adoc +++ b/modules/ossm-install-kiali.adoc @@ -23,7 +23,7 @@ Do not install Community versions of the Operators. Community Operators are not . Log in to the {product-title} web console. -. Navigate to *Operators* -> *OperatorHub*. +. Navigate to *Ecosystem* -> *Software Catalog*. . Type *Kiali* into the filter box to find the Kiali Operator. diff --git a/modules/ossm-install-ossm-operator.adoc b/modules/ossm-install-ossm-operator.adoc index aae688fd70..995f8b60d4 100644 --- a/modules/ossm-install-ossm-operator.adoc +++ b/modules/ossm-install-ossm-operator.adoc @@ -38,7 +38,7 @@ ifdef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] . Log in to the {product-title} web console as a user with the `dedicated-admin` role. endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -. In the {product-title} web console, click *Operators* -> *OperatorHub*. +. In the {product-title} web console, click *Ecosystem* -> *Software Catalog*. . Type the name of the Operator into the filter box and select the Red Hat version of the Operator. Community versions of the Operators are not supported. @@ -67,4 +67,4 @@ Starting with {SMProductName} 2.5, {es-op} is deprecated and will be removed in .Verification -* After all you have installed all four Operators, click *Operators* -> *Installed Operators* to verify that your Operators are installed. +* After all you have installed all four Operators, click *Ecosystem* -> *Installed Operators* to verify that your Operators are installed. diff --git a/modules/ossm-jaeger-config-elasticsearch-v1x.adoc b/modules/ossm-jaeger-config-elasticsearch-v1x.adoc index ce92289dc0..9e184d35af 100644 --- a/modules/ossm-jaeger-config-elasticsearch-v1x.adoc +++ b/modules/ossm-jaeger-config-elasticsearch-v1x.adoc @@ -113,7 +113,7 @@ Minimum deployment = 16Gi* . Log in to the {product-title} web console as a user with the `cluster-admin` role. -. Navigate to *Operators* -> *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . Click the {SMProductName} Operator. diff --git a/modules/ossm-member-roll-create.adoc b/modules/ossm-member-roll-create.adoc index a967373582..695c6bd05e 100644 --- a/modules/ossm-member-roll-create.adoc +++ b/modules/ossm-member-roll-create.adoc @@ -32,7 +32,7 @@ You can add one or more projects to the {SMProductShortName} member roll from th .. Click *Create*. -. Navigate to *Operators* -> *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . Click the *Project* menu and choose the project where your `ServiceMeshControlPlane` resource is deployed from the list, for example `istio-system`. diff --git a/modules/ossm-member-roll-modify.adoc b/modules/ossm-member-roll-modify.adoc index 44eee39949..7a8117443b 100644 --- a/modules/ossm-member-roll-modify.adoc +++ b/modules/ossm-member-roll-modify.adoc @@ -26,7 +26,7 @@ You can add or remove projects from an existing {SMProductShortName} `ServiceMes . Log in to the {product-title} web console. -. Navigate to *Operators* -> *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . Click the *Project* menu and choose the project where your `ServiceMeshControlPlane` resource is deployed from the list, for example `istio-system`. diff --git a/modules/ossm-migrating-to-20.adoc b/modules/ossm-migrating-to-20.adoc index bef0ed47f9..e978765be8 100644 --- a/modules/ossm-migrating-to-20.adoc +++ b/modules/ossm-migrating-to-20.adoc @@ -96,7 +96,7 @@ $ oc create -n istio-system-upgrade -f .v2.yaml + Alternatively, you can use the console to create the {SMProductShortName} control plane. In the {product-title} web console, click *Project*. Then, select the project name you just entered. + -.. Click *Operators* -> *Installed Operators*. +.. Click *Ecosystem* -> *Installed Operators*. .. Click *Create ServiceMeshControlPlane*. .. Select *YAML view* and paste text of the YAML file you retrieved into the field. Check that the `apiVersion` field is set to `maistra.io/v2` and modify the `metadata.namespace` field to use the new namespace, for example `istio-system-upgrade`. .. Click *Create*. diff --git a/modules/ossm-recommended-resources.adoc b/modules/ossm-recommended-resources.adoc index 94cfa2ae46..25e42141b3 100644 --- a/modules/ossm-recommended-resources.adoc +++ b/modules/ossm-recommended-resources.adoc @@ -13,7 +13,7 @@ The settings in the following example are based on 1,000 services and 1,000 requ .Procedure -. In the {product-title} web console, click *Operators* -> *Installed Operators*. +. In the {product-title} web console, click *Ecosystem* -> *Installed Operators*. . Click the *Project* menu and select the project where you installed the {SMProductShortName} control plane, for example *istio-system*. diff --git a/modules/ossm-remove-operators.adoc b/modules/ossm-remove-operators.adoc index 350522ad72..7e590a124a 100644 --- a/modules/ossm-remove-operators.adoc +++ b/modules/ossm-remove-operators.adoc @@ -23,6 +23,6 @@ Follow this procedure to remove the Operators that make up {SMProductName}. Repe . Log in to the {product-title} web console. -. From the *Operators* → *Installed Operators* page, scroll or type a keyword into the *Filter by name* to find each Operator. Then, click the Operator name. +. From the *Ecosystem* -> *Installed Operators* page, scroll or type a keyword into the *Filter by name* to find each Operator. Then, click the Operator name. . On the *Operator Details* page, select *Uninstall Operator* from the *Actions* menu. Follow the prompts to uninstall each Operator. diff --git a/modules/ossm-rn-new-features.adoc b/modules/ossm-rn-new-features.adoc index 9897b87d46..af5e0bf9bd 100644 --- a/modules/ossm-rn-new-features.adoc +++ b/modules/ossm-rn-new-features.adoc @@ -1430,7 +1430,7 @@ When you disable `spec.security.manageNetworkPolicy` {SMProductName} will not cr .Procedure -. In the {product-title} web console, click *Operators* -> *Installed Operators*. +. In the {product-title} web console, click *Ecosystem* -> *Installed Operators*. . Select the project where you installed the {SMProductShortName} control plane, for example `istio-system`, from the Project menu. diff --git a/modules/ossm-troubleshooting-operators.adoc b/modules/ossm-troubleshooting-operators.adoc index a19e56d6c4..6676279bc3 100644 --- a/modules/ossm-troubleshooting-operators.adoc +++ b/modules/ossm-troubleshooting-operators.adoc @@ -14,7 +14,7 @@ If you experience Operator issues: [NOTE] ==== -You can install Operators only through the OpenShift console, the OperatorHub is not accessible from the command line. +You can install Operators only through the OpenShift console, the software catalog is not accessible from the command line. ==== == Viewing Operator pod logs diff --git a/modules/ossm-tutorial-bookinfo-install.adoc b/modules/ossm-tutorial-bookinfo-install.adoc index 9116f44c74..878f748a90 100644 --- a/modules/ossm-tutorial-bookinfo-install.adoc +++ b/modules/ossm-tutorial-bookinfo-install.adoc @@ -49,7 +49,7 @@ The commands in this section assume the {SMProductShortName} control plane proje $ oc new-project bookinfo ---- + -. Click *Operators* -> *Installed Operators*. +. Click *Ecosystem* -> *Installed Operators*. . Click the *Project* menu and use the {SMProductShortName} control plane namespace. In this example, use `istio-system`. diff --git a/modules/ossm-tutorial-bookinfo-removing.adoc b/modules/ossm-tutorial-bookinfo-removing.adoc index b3b8e883d4..fdad9d00c8 100644 --- a/modules/ossm-tutorial-bookinfo-removing.adoc +++ b/modules/ossm-tutorial-bookinfo-removing.adoc @@ -43,7 +43,7 @@ $ oc delete project bookinfo . Log in to the {product-title} web console. -. Click *Operators* -> *Installed Operators*. +. Click *Ecosystem* -> *Installed Operators*. . Click the *Project* menu and choose `istio-system` from the list. diff --git a/modules/ossm-upgrading-smcp.adoc b/modules/ossm-upgrading-smcp.adoc index 4dd4fcb429..e5f78d4599 100644 --- a/modules/ossm-upgrading-smcp.adoc +++ b/modules/ossm-upgrading-smcp.adoc @@ -51,7 +51,7 @@ spec: + Alternatively, instead of using the command line, you can use the web console to edit the {SMProductShortName} control plane. In the {product-title} web console, click *Project* and select the project name you just entered. + -.. Click *Operators* -> *Installed Operators*. +.. Click *Ecosystem* -> *Installed Operators*. .. Find your `ServiceMeshControlPlane` instance. .. Select *YAML view* and update text of the YAML file, as shown in the previous example. .. Click *Save*. diff --git a/modules/ossm-validating-smcp.adoc b/modules/ossm-validating-smcp.adoc index d2668b803f..5d4196a721 100644 --- a/modules/ossm-validating-smcp.adoc +++ b/modules/ossm-validating-smcp.adoc @@ -31,7 +31,7 @@ You view the Jaeger components under the {JaegerName} Operator and the Elasticse You can verify the {SMProductShortName} control plane installation in the {product-title} web console. -. Navigate to *Operators* -> *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . Select the `istio-system` namespace. . Select the {SMProductName} Operator. .. Click the *Istio Service Mesh Control Plane* tab. @@ -39,21 +39,21 @@ You can verify the {SMProductShortName} control plane installation in the {produ .. To view the resources created by the deployment, click the *Resources* tab. You can use the filter to narrow your view, for example, to check that all the *Pods* have a status of `running`. .. If the SMCP status indicates any problems, check the `status:` output in the YAML file for more information. -. Navigate back to *Operators* -> *Installed Operators*. +. Navigate back to *Ecosystem* -> *Installed Operators*. . Select the OpenShift Elasticsearch Operator. .. Click the *Elasticsearch* tab. .. Click the name of the deployment, for example `elasticsearch`. .. To view the resources created by the deployment, click the *Resources* tab. . .. If the `Status` column any problems, check the `status:` output on the *YAML* tab for more information. -. Navigate back to *Operators* -> *Installed Operators*. +. Navigate back to *Ecosystem* -> *Installed Operators*. . Select the {JaegerName} Operator. .. Click the *Jaeger* tab. .. Click the name of your deployment, for example `jaeger`. .. To view the resources created by the deployment, click the *Resources* tab. .. If the `Status` column indicates any problems, check the `status:` output on the *YAML* tab for more information. -. Navigate to *Operators* -> *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . Select the Kiali Operator. .. Click the *Istio Service Mesh Control Plane* tab. .. Click the name of your deployment, for example `kiali`. diff --git a/modules/otel-install-web-console.adoc b/modules/otel-install-web-console.adoc index 8945d9a813..5e6291e546 100644 --- a/modules/otel-install-web-console.adoc +++ b/modules/otel-install-web-console.adoc @@ -18,7 +18,7 @@ You can install the {OTELShortName} from the *Administrator* view of the web con . Install the {OTELOperator}: -.. Go to *Operators* -> *OperatorHub* and search for `{OTELOperator}`. +.. Go to *Ecosystem* -> *Software Catalog* and search for `{OTELOperator}`. .. Select the *{OTELOperator}* that is *provided by Red Hat* -> *Install* -> *Install* -> *View Operator*. + @@ -38,7 +38,7 @@ This installs the Operator with the default presets: . Create an *OpenTelemetry Collector* instance. -.. Go to *Operators* -> *Installed Operators*. +.. Go to *Ecosystem* -> *Installed Operators*. .. Select *OpenTelemetry Collector* -> *Create OpenTelemetry Collector* -> *YAML view*. @@ -54,6 +54,6 @@ include::snippets/otel-collector-custom-resource.adoc[] . Use the *Project:* dropdown list to select the project of the *OpenTelemetry Collector* instance. -. Go to *Operators* -> *Installed Operators* to verify that the *Status* of the *OpenTelemetry Collector* instance is *Condition: Ready*. +. Go to *Ecosystem* -> *Installed Operators* to verify that the *Status* of the *OpenTelemetry Collector* instance is *Condition: Ready*. . Go to *Workloads* -> *Pods* to verify that all the component pods of the *OpenTelemetry Collector* instance are running. diff --git a/modules/otel-remove-web-console.adoc b/modules/otel-remove-web-console.adoc index 66f4774bbb..b8bbaffef2 100644 --- a/modules/otel-remove-web-console.adoc +++ b/modules/otel-remove-web-console.adoc @@ -16,7 +16,7 @@ You can remove an OpenTelemetry Collector instance in the *Administrator* view o .Procedure -. Go to *Operators* -> *Installed Operators* -> *{OTELOperator}* -> *OpenTelemetryInstrumentation* or *OpenTelemetryCollector*. +. Go to *Ecosystem* -> *Installed Operators* -> *{OTELOperator}* -> *OpenTelemetryInstrumentation* or *OpenTelemetryCollector*. . To remove the relevant instance, select {kebab} -> *Delete* ... -> *Delete*. diff --git a/modules/persistent-storage-csi-gcp-file-install.adoc b/modules/persistent-storage-csi-gcp-file-install.adoc index f8f4352a55..a6f384fe08 100644 --- a/modules/persistent-storage-csi-gcp-file-install.adoc +++ b/modules/persistent-storage-csi-gcp-file-install.adoc @@ -43,7 +43,7 @@ You can also do this using Google Cloud web console. . Install the {gcp-short} Filestore CSI Operator: -.. Click *Operators* -> *OperatorHub*. +.. Click *Ecosystem* -> *Software Catalog*. .. Locate the {gcp-short} Filestore CSI Operator by typing *{gcp-short} Filestore* in the filter box. @@ -93,4 +93,4 @@ spec: * GCPFilestoreDriverNodeServiceControllerAvailable -* GCPFilestoreDriverControllerServiceControllerAvailable \ No newline at end of file +* GCPFilestoreDriverControllerServiceControllerAvailable diff --git a/modules/persistent-storage-csi-olm-operator-install.adoc b/modules/persistent-storage-csi-olm-operator-install.adoc index 464a58a2c1..937b3d4df4 100644 --- a/modules/persistent-storage-csi-olm-operator-install.adoc +++ b/modules/persistent-storage-csi-olm-operator-install.adoc @@ -36,7 +36,7 @@ To install the {FeatureName} CSI Driver Operator from the web console: . Install the {FeatureName} CSI Operator: -.. Click *Operators* -> *OperatorHub*. +.. Click *Ecosystem* -> *Software Catalog*. .. Locate the {FeatureName} CSI Operator by typing *{FeatureName} CSI* in the filter box. diff --git a/modules/persistent-storage-csi-olm-operator-uninstall.adoc b/modules/persistent-storage-csi-olm-operator-uninstall.adoc index 882817de25..c1760de8aa 100644 --- a/modules/persistent-storage-csi-olm-operator-uninstall.adoc +++ b/modules/persistent-storage-csi-olm-operator-uninstall.adoc @@ -41,7 +41,7 @@ Before you can uninstall the Operator, you must remove the CSI driver first. . Uninstall the {FeatureName} CSI Operator: -.. Click *Operators* -> *Installed Operators*. +.. Click *Ecosystem* -> *Installed Operators*. .. On the *Installed Operators* page, scroll or type {FeatureName} CSI into the *Search by name* box to find the Operator, and then click it. diff --git a/modules/persistent-storage-csi-secrets-store-driver-install.adoc b/modules/persistent-storage-csi-secrets-store-driver-install.adoc index 8bc0068e06..a1cbf5938a 100644 --- a/modules/persistent-storage-csi-secrets-store-driver-install.adoc +++ b/modules/persistent-storage-csi-secrets-store-driver-install.adoc @@ -18,7 +18,7 @@ To install the {secrets-store-driver}: . Install the {secrets-store-operator}: .. Log in to the web console. -.. Click *Operators* → *OperatorHub*. +.. Click *Ecosystem* -> *Software Catalog*. .. Locate the {secrets-store-operator} by typing "Secrets Store CSI" in the filter box. .. Click the *Secrets Store CSI Driver Operator* button. .. On the *Secrets Store CSI Driver Operator* page, click *Install*. diff --git a/modules/persistent-storage-csi-secrets-store-driver-uninstall.adoc b/modules/persistent-storage-csi-secrets-store-driver-uninstall.adoc index 6de18c362d..04eade7d6d 100644 --- a/modules/persistent-storage-csi-secrets-store-driver-uninstall.adoc +++ b/modules/persistent-storage-csi-secrets-store-driver-uninstall.adoc @@ -30,7 +30,7 @@ To uninstall the {secrets-store-operator}: Before you can uninstall the Operator, you must remove the CSI driver first. ==== + -.. Click *Operators* → *Installed Operators*. +.. Click *Ecosystem* -> *Installed Operators*. .. On the *Installed Operators* page, scroll or type "Secrets Store CSI" into the *Search by name* box to find the Operator, and then click it. .. On the upper, right of the *Installed Operators* > *Operator details* page, click *Actions* → *Uninstall Operator*. .. When prompted on the *Uninstall Operator* window, click the *Uninstall* button to remove the Operator from the namespace. Any applications deployed by the Operator on the cluster need to be cleaned up manually. diff --git a/modules/persistent-storage-local-discovery.adoc b/modules/persistent-storage-local-discovery.adoc index 4193f705a1..53e4afd58e 100644 --- a/modules/persistent-storage-local-discovery.adoc +++ b/modules/persistent-storage-local-discovery.adoc @@ -36,7 +36,7 @@ Use the `LocalVolumeSet` object with caution. When you automatically provision p . To enable automatic discovery of local devices from the web console: -.. Click *Operators* -> *Installed Operators*. +.. Click *Ecosystem* -> *Installed Operators*. .. In the `openshift-local-storage` namespace, click *Local Storage*. @@ -64,7 +64,7 @@ The device list updates continuously as local disks are added or removed. You ca . To automatically provision local volumes for the discovered devices from the web console: -.. Navigate to *Operators* -> *Installed Operators* and select *Local Storage* from the list of Operators. +.. Navigate to *Ecosystem* -> *Installed Operators* and select *Local Storage* from the list of Operators. .. Select *Local Volume Set* -> *Create Local Volume Set*. diff --git a/modules/persistent-storage-local-install.adoc b/modules/persistent-storage-local-install.adoc index b108bf1c6f..38da4418de 100644 --- a/modules/persistent-storage-local-install.adoc +++ b/modules/persistent-storage-local-install.adoc @@ -51,7 +51,7 @@ To install the Local Storage Operator from the web console, follow these steps: . Log in to the {product-title} web console. -. Navigate to *Operators* -> *OperatorHub*. +. Navigate to *Ecosystem* -> *Software Catalog*. . Type *Local Storage* into the filter box to locate the Local Storage Operator. diff --git a/modules/persistent-storage-local-metrics.adoc b/modules/persistent-storage-local-metrics.adoc index 389209a57c..7b500057cf 100644 --- a/modules/persistent-storage-local-metrics.adoc +++ b/modules/persistent-storage-local-metrics.adoc @@ -21,7 +21,7 @@ To use these metrics, enable them by doing one of the following: -* When installing the Local Storage Operator from _OperatorHub_ in the web console, select the _Enable Operator recommended cluster monitoring on this Namespace_ checkbox. +* When installing the Local Storage Operator from the software catalog in the web console, select the _Enable Operator recommended cluster monitoring on this Namespace_ checkbox. * Manually add the `openshift.io/cluster-monitoring=true` label to the Operator namespace by running the following command: + diff --git a/modules/persistent-storage-local-uninstall-operator.adoc b/modules/persistent-storage-local-uninstall-operator.adoc index abd7e3c69e..1562cd6957 100644 --- a/modules/persistent-storage-local-uninstall-operator.adoc +++ b/modules/persistent-storage-local-uninstall-operator.adoc @@ -41,7 +41,7 @@ $ oc delete localvolumediscovery --all --all-namespaces .. Log in to the {product-title} web console. -.. Navigate to *Operators* -> *Installed Operators*. +.. Navigate to *Ecosystem* -> *Installed Operators*. .. Type *Local Storage* into the filter box to locate the Local Storage Operator. diff --git a/modules/pipelines-web-console.adoc b/modules/pipelines-web-console.adoc index 81cc3f9004..422c17e06f 100644 --- a/modules/pipelines-web-console.adoc +++ b/modules/pipelines-web-console.adoc @@ -6,4 +6,4 @@ [id="pipelines-web-console_{context}"] = {pipelines-title} in the web console -{pipelines-title} is a cloud-native, continuous integration and continuous delivery (CI/CD) solution based on Kubernetes resources. Install the {pipelines-title} Operator using the OperatorHub in the {product-title} web console. Once the Operator is installed, you can create and modify pipeline objects on *Pipelines* page. \ No newline at end of file +{pipelines-title} is a cloud-native, continuous integration and continuous delivery (CI/CD) solution based on Kubernetes resources. Install the {pipelines-title} Operator using the software catalog in the {product-title} web console. Once the Operator is installed, you can create and modify pipeline objects on *Pipelines* page. diff --git a/modules/power-monitoring-deleting-kepler.adoc b/modules/power-monitoring-deleting-kepler.adoc index 4c81ad6c3d..2efaa7d0c1 100644 --- a/modules/power-monitoring-deleting-kepler.adoc +++ b/modules/power-monitoring-deleting-kepler.adoc @@ -19,7 +19,7 @@ Starting with {PM-title} 0.5 (Technology Preview), use the `PowerMonitor` CRD, a .Procedure -. In the web console, go to *Operators* -> *Installed Operators*. +. In the web console, go to *Ecosystem* -> *Installed Operators*. . Click *{PM-title-c}* from the *Installed Operators* list and go to the *{PM-kepler}* tab. diff --git a/modules/power-monitoring-deleting-power-monitor-custom-resource.adoc b/modules/power-monitoring-deleting-power-monitor-custom-resource.adoc index 3794e48cd5..61928a03f0 100644 --- a/modules/power-monitoring-deleting-power-monitor-custom-resource.adoc +++ b/modules/power-monitoring-deleting-power-monitor-custom-resource.adoc @@ -15,7 +15,7 @@ You can delete the `PowerMonitor` custom resource (CR) by removing the `power-mo .Procedure -. In the web console, go to *Operators* → *Installed Operators*. +. In the web console, go to *Ecosystem* -> *Installed Operators*. . Click *{PM-title-c}* from the *Installed Operators* list and go to the *PowerMonitor* tab. @@ -23,4 +23,4 @@ You can delete the `PowerMonitor` custom resource (CR) by removing the `power-mo . Click the {kebab} for this entry and select *Delete PowerMonitor*. -. In the *Delete PowerMonitor?* dialog, click *Delete* to delete the `PowerMonitor` instance. \ No newline at end of file +. In the *Delete PowerMonitor?* dialog, click *Delete* to delete the `PowerMonitor` instance. diff --git a/modules/power-monitoring-deploying-power-monitor-custom-resource.adoc b/modules/power-monitoring-deploying-power-monitor-custom-resource.adoc index ed11118a6d..65c3c17eee 100644 --- a/modules/power-monitoring-deploying-power-monitor-custom-resource.adoc +++ b/modules/power-monitoring-deploying-power-monitor-custom-resource.adoc @@ -20,7 +20,7 @@ The `Kepler` custom resource definition (CRD) has been deprecated and will be re .Procedure -. In the web console, go to *Operators* -> *Installed Operators*. +. In the web console, go to *Ecosystem* -> *Installed Operators*. . Click *{PM-title-c}* from the *Installed Operators* list and go to the *PowerMonitor* tab. diff --git a/modules/power-monitoring-installing-pmo.adoc b/modules/power-monitoring-installing-pmo.adoc index 46654149af..baa231d578 100644 --- a/modules/power-monitoring-installing-pmo.adoc +++ b/modules/power-monitoring-installing-pmo.adoc @@ -6,7 +6,7 @@ [id="power-monitoring-installing-pmo_{context}"] = Installing the {PM-operator} -As a cluster administrator, you can install the {PM-operator} from OperatorHub by using the {product-title} web console. +As a cluster administrator, you can install the {PM-operator} from the software catalog by using the {product-title} web console. [WARNING] ==== @@ -19,7 +19,7 @@ You must remove any previously installed versions of the {PM-operator} before in .Procedure -. In the web console, go to *Operators* -> *OperatorHub*. +. In the web console, go to *Ecosystem* -> *Software Catalog*. . Search for `{PM-shortname}`, click the *{PM-title-c}* tile, and then click *Install*. //. On the *Install Operator* page: @@ -33,4 +33,4 @@ You must remove any previously installed versions of the {PM-operator} before in .Verification -. Verify that the {PM-operator} is listed in *Operators* -> *Installed Operators*. The *Status* should resolve to *Succeeded*. \ No newline at end of file +. Verify that the {PM-operator} is listed in *Ecosystem* -> *Installed Operators*. The *Status* should resolve to *Succeeded*. diff --git a/modules/power-monitoring-uninstalling-pmo.adoc b/modules/power-monitoring-uninstalling-pmo.adoc index 975d53f698..a8fd46db79 100644 --- a/modules/power-monitoring-uninstalling-pmo.adoc +++ b/modules/power-monitoring-uninstalling-pmo.adoc @@ -6,7 +6,7 @@ [id="power-monitoring-uninstalling-pmo_{context}"] = Uninstalling the {PM-operator} -If you installed the {PM-operator} by using OperatorHub, you can uninstall it from the {product-title} web console. +If you installed the {PM-operator} by using the software catalog, you can uninstall it from the {product-title} web console. .Prerequisites * You have access to the {product-title} web console. @@ -21,7 +21,7 @@ If you installed the {PM-operator} by using OperatorHub, you can uninstall it fr Ensure that you have deleted the {PM-kepler} instance before uninstalling the {PM-operator}. ==== -. Go to *Operators* → *Installed Operators*. +. Go to *Ecosystem* -> *Installed Operators*. . Locate the *{PM-title-c}* entry in the list. diff --git a/modules/psap-driver-toolkit.adoc b/modules/psap-driver-toolkit.adoc index 36903e0846..4c8bcfc605 100644 --- a/modules/psap-driver-toolkit.adoc +++ b/modules/psap-driver-toolkit.adoc @@ -39,4 +39,4 @@ The Driver Toolkit also has several tools that are commonly needed to build and == Purpose Prior to the Driver Toolkit's existence, users would install kernel packages in a pod or build config on {product-title} using link:https://www.openshift.com/blog/how-to-use-entitled-image-builds-to-build-drivercontainers-with-ubi-on-openshift[entitled builds] or by installing from the kernel RPMs in the hosts `machine-os-content`. The Driver Toolkit simplifies the process by removing the entitlement step, and avoids the privileged operation of accessing the machine-os-content in a pod. The Driver Toolkit can also be used by partners who have access to pre-released {product-title} versions to prebuild driver-containers for their hardware devices for future {product-title} releases. -The Driver Toolkit is also used by the Kernel Module Management (KMM), which is currently available as a community Operator on OperatorHub. KMM supports out-of-tree and third-party kernel drivers and the support software for the underlying operating system. Users can create modules for KMM to build and deploy a driver container, as well as support software like a device plugin, or metrics. Modules can include a build config to build a driver container-based on the Driver Toolkit, or KMM can deploy a prebuilt driver container. +The Driver Toolkit is also used by the Kernel Module Management (KMM), which is currently available as a community Operator in the software catalog. KMM supports out-of-tree and third-party kernel drivers and the support software for the underlying operating system. Users can create modules for KMM to build and deploy a driver container, as well as support software like a device plugin, or metrics. Modules can include a build config to build a driver container-based on the Driver Toolkit, or KMM can deploy a prebuilt driver container. diff --git a/modules/psap-installing-node-feature-discovery-operator.adoc b/modules/psap-installing-node-feature-discovery-operator.adoc index fb97b384a3..0c21f6da57 100644 --- a/modules/psap-installing-node-feature-discovery-operator.adoc +++ b/modules/psap-installing-node-feature-discovery-operator.adoc @@ -124,7 +124,7 @@ As a cluster administrator, you can install the NFD Operator using the web conso .Procedure -. In the {product-title} web console, click *Operators* -> *OperatorHub*. +. In the {product-title} web console, click *Ecosystem* -> *Software Catalog*. . Choose *Node Feature Discovery* from the list of available Operators, and then click *Install*. @@ -134,7 +134,7 @@ As a cluster administrator, you can install the NFD Operator using the web conso To verify that the NFD Operator installed successfully: -. Navigate to the *Operators* -> *Installed Operators* page. +. Navigate to the *Ecosystem* -> *Installed Operators* page. . Ensure that *Node Feature Discovery* is listed in the *openshift-nfd* project with a *Status* of *InstallSucceeded*. + [NOTE] @@ -146,5 +146,5 @@ During installation an Operator might display a *Failed* status. If the installa If the Operator does not appear as installed, troubleshoot further: -. Navigate to the *Operators* -> *Installed Operators* page and inspect the *Operator Subscriptions* and *Install Plans* tabs for any failure or errors under *Status*. +. Navigate to the *Ecosystem* -> *Installed Operators* page and inspect the *Operator Subscriptions* and *Install Plans* tabs for any failure or errors under *Status*. . Navigate to the *Workloads* -> *Pods* page and check the logs for pods in the `openshift-nfd` project. diff --git a/modules/red-hat-marketplace-features.adoc b/modules/red-hat-marketplace-features.adoc index fb9f0cb15f..f89108a9ef 100644 --- a/modules/red-hat-marketplace-features.adoc +++ b/modules/red-hat-marketplace-features.adoc @@ -18,9 +18,9 @@ a Marketplace Operator is installed that updates the image registry secret, mana [id="marketplace-install-applications_{context}"] == Install applications -Cluster administrators can link:https://marketplace.redhat.com/en-us/documentation/operators[install Marketplace applications] from within OperatorHub in {product-title}, or from the link:https://marketplace.redhat.com[Marketplace web application]. +Cluster administrators can link:https://marketplace.redhat.com/en-us/documentation/operators[install Marketplace applications] from within the software catalog in {product-title}, or from the link:https://marketplace.redhat.com[Marketplace web application]. -You can access installed applications from the web console by clicking **Operators > Installed Operators**. +You can access installed applications from the web console by clicking *Ecosystem* -> *Installed Operators*. [id="marketplace-deploy_{context}"] == Deploy applications from different perspectives diff --git a/modules/removing-cso-operator.adoc b/modules/removing-cso-operator.adoc index 2586826701..3d061edc71 100644 --- a/modules/removing-cso-operator.adoc +++ b/modules/removing-cso-operator.adoc @@ -10,7 +10,7 @@ To uninstall the Container Security Operator, you must uninstall the Operator an .Procedure -. On the {product-title} web console, click *Operators* -> *Installed Operators*. +. On the {product-title} web console, click *Ecosystem* -> *Installed Operators*. . Click the Options menu {kebab} of the Container Security Operator. @@ -32,4 +32,4 @@ $ oc delete customresourcedefinition imagemanifestvulns.secscan.quay.redhat.com [source,terminal] ---- customresourcedefinition.apiextensions.k8s.io "imagemanifestvulns.secscan.quay.redhat.com" deleted ----- \ No newline at end of file +---- diff --git a/modules/removing-devworkspace-operator.adoc b/modules/removing-devworkspace-operator.adoc index 42d75d602b..df00ffcded 100644 --- a/modules/removing-devworkspace-operator.adoc +++ b/modules/removing-devworkspace-operator.adoc @@ -129,7 +129,7 @@ $ oc delete clusterrolebinding devworkspace-webhook-server ---- . Uninstall the {devworkspace-op}: -.. In the *Administrator* perspective of the web console, navigate to *Operators -> Installed Operators*. +.. In the *Administrator* perspective of the web console, navigate to *Ecosystem* -> *Installed Operators*. .. Scroll the filter list or type a keyword into the *Filter by name* box to find the {devworkspace-op}. .. Click the Options menu {kebab} for the Operator, and then select *Uninstall Operator*. .. In the *Uninstall Operator* confirmation dialog box, click *Uninstall* to remove the Operator, Operator deployments, and pods from the cluster. The Operator stops running and no longer receives updates. diff --git a/modules/removing-web-terminal-operator.adoc b/modules/removing-web-terminal-operator.adoc index fb7e52b301..c3b17e83ef 100644 --- a/modules/removing-web-terminal-operator.adoc +++ b/modules/removing-web-terminal-operator.adoc @@ -15,7 +15,7 @@ You can uninstall the web terminal by removing the {web-terminal-op} and custom .Procedure -. In the web console, navigate to *Operators -> Installed Operators*. +. In the web console, navigate to *Ecosystem* -> *Installed Operators*. . Scroll the filter list or type a keyword into the *Filter by name* box to find the {web-terminal-op}. . Click the Options menu {kebab} for the {web-terminal-op}, and then select *Uninstall Operator*. . In the *Uninstall Operator* confirmation dialog box, click *Uninstall* to remove the Operator, Operator deployments, and pods from the cluster. The Operator stops running and no longer receives updates. diff --git a/modules/rodoo-install-operator.adoc b/modules/rodoo-install-operator.adoc index 37059ba7ea..c47001b674 100644 --- a/modules/rodoo-install-operator.adoc +++ b/modules/rodoo-install-operator.adoc @@ -22,7 +22,7 @@ You can use the web console to install the {run-once-operator}. .. Enter `openshift-run-once-duration-override-operator` in the *Name* field and click *Create*. . Install the {run-once-operator}. -.. Navigate to *Operators* -> *OperatorHub*. +.. Navigate to *Ecosystem* -> *Software Catalog*. .. Enter *{run-once-operator}* into the filter box. .. Select the *{run-once-operator}* and click *Install*. .. On the *Install Operator* page: @@ -36,7 +36,7 @@ You can use the web console to install the {run-once-operator}. ... Click *Install*. . Create a `RunOnceDurationOverride` instance. -.. From the *Operators* -> *Installed Operators* page, click *{run-once-operator}*. +.. From the *Ecosystem* -> *Installed Operators* page, click *{run-once-operator}*. .. Select the *Run Once Duration Override* tab and click *Create RunOnceDurationOverride*. .. Edit the settings as necessary. + diff --git a/modules/rodoo-uninstall-operator.adoc b/modules/rodoo-uninstall-operator.adoc index 8f7be61130..6d228320a0 100644 --- a/modules/rodoo-uninstall-operator.adoc +++ b/modules/rodoo-uninstall-operator.adoc @@ -18,7 +18,7 @@ You can use the web console to uninstall the {run-once-operator}. Uninstalling t . Log in to the {product-title} web console. -. Navigate to *Operators* -> *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . Select `openshift-run-once-duration-override-operator` from the *Project* dropdown list. @@ -28,6 +28,6 @@ You can use the web console to uninstall the {run-once-operator}. Uninstalling t .. In the confirmation dialog, click *Delete*. . Uninstall the {run-once-operator}. -.. Navigate to *Operators* -> *Installed Operators*. +.. Navigate to *Ecosystem* -> *Installed Operators*. .. Click the Options menu {kebab} next to the *{run-once-operator}* entry and click *Uninstall Operator*. .. In the confirmation dialog, click *Uninstall*. diff --git a/modules/rosa-policy-incident.adoc b/modules/rosa-policy-incident.adoc index d464a4b35d..a2b485f793 100644 --- a/modules/rosa-policy-incident.adoc +++ b/modules/rosa-policy-incident.adoc @@ -27,7 +27,7 @@ service, and respond to alerts. |**Red{nbsp}Hat** - Monitor, alert, and address incidents related to cluster DNS, network plugin connectivity between cluster components, and the default Ingress Controller. -|- Monitor and address incidents related to optional Ingress Controllers, additional Operators installed through the OperatorHub, and network plugins replacing the default OpenShift CNI plugins. +|- Monitor and address incidents related to optional Ingress Controllers, additional Operators installed through the software catalog, and network plugins replacing the default OpenShift CNI plugins. |Virtual networking management |**Red{nbsp}Hat** diff --git a/modules/rosa-sdpolicy-platform.adoc b/modules/rosa-sdpolicy-platform.adoc index 75a8b8e672..d9bf6a9449 100644 --- a/modules/rosa-sdpolicy-platform.adoc +++ b/modules/rosa-sdpolicy-platform.adoc @@ -163,4 +163,6 @@ Red{nbsp}Hat workloads typically refer to Red{nbsp}Hat-provided Operators made a [id="rosa-sdpolicy-kubernetes-operator_{context}"] == Kubernetes Operator support -All Operators listed in the OperatorHub marketplace should be available for installation. These Operators are considered customer workloads, and are not monitored nor managed by Red{nbsp}Hat SRE. Operators authored by Red{nbsp}Hat are supported by Red{nbsp}Hat. + +All Operators listed in the software catalog marketplace should be available for installation. These Operators are considered customer workloads, and are not monitored nor managed by Red{nbsp}Hat SRE. Operators authored by Red{nbsp}Hat are supported by Red{nbsp}Hat. + diff --git a/modules/rosa-sdpolicy-security.adoc b/modules/rosa-sdpolicy-security.adoc index 80f4950e23..40d27682d8 100644 --- a/modules/rosa-sdpolicy-security.adoc +++ b/modules/rosa-sdpolicy-security.adoc @@ -57,7 +57,7 @@ group called `dedicated-admin`. Any users on the cluster that are members of the - Can add and manage `NetworkPolicy` objects. - Are able to view information about specific nodes and PVs in the cluster, including scheduler information. - Can access the reserved `dedicated-admin` project on the cluster, which allows for the creation of service accounts with elevated privileges and also gives the ability to update default limits and quotas for projects on the cluster. -- Can install Operators from OperatorHub and perform all verbs in all `*.operators.coreos.com` API groups. +- Can install Operators from the software catalog and perform all verbs in all `*.operators.coreos.com` API groups. [id="rosa-sdpolicy-cluster-admin-role_{context}"] == Cluster administration role diff --git a/modules/sd-nodes-cma-autoscaling-custom-install.adoc b/modules/sd-nodes-cma-autoscaling-custom-install.adoc index e7eac6e962..0330950b78 100644 --- a/modules/sd-nodes-cma-autoscaling-custom-install.adoc +++ b/modules/sd-nodes-cma-autoscaling-custom-install.adoc @@ -43,7 +43,7 @@ $ oc create configmap -n openshift-keda thanos-cert --from-file=ca-cert.pem .Procedure -. In the {product-title} web console, click *Operators* -> *OperatorHub*. +. In the {product-title} web console, click *Ecosystem* -> *Software Catalog*. . Choose *Custom Metrics Autoscaler* from the list of available Operators, and click *Install*. diff --git a/modules/sdpolicy-platform.adoc b/modules/sdpolicy-platform.adoc index 7dd05d24ff..24f184bab1 100644 --- a/modules/sdpolicy-platform.adoc +++ b/modules/sdpolicy-platform.adoc @@ -104,4 +104,4 @@ Red Hat workloads typically refer to Red Hat-provided Operators made available t [id="kubernetes-operator-support_{context}"] == Kubernetes Operator support -All Operators listed in the OperatorHub marketplace should be available for installation. Operators installed from OperatorHub, including Red Hat Operators, are not SRE managed as part of the {product-title} service. Refer to the link:https://access.redhat.com/solutions/4807821[Red Hat Customer Portal] for more information on the supportability of a given Operator. +All Operators listed in the software catalog marketplace should be available for installation. Operators installed from the software catalog, including Red Hat Operators, are not SRE managed as part of the {product-title} service. Refer to the link:https://access.redhat.com/solutions/4807821[Red Hat Customer Portal] for more information on the supportability of a given Operator. diff --git a/modules/sdpolicy-security.adoc b/modules/sdpolicy-security.adoc index 44083b8065..3877c869dd 100644 --- a/modules/sdpolicy-security.adoc +++ b/modules/sdpolicy-security.adoc @@ -30,7 +30,7 @@ In addition to normal users, {product-title} provides access to an {product-titl * Can add and manage `NetworkPolicy` objects. * Are able to view information about specific nodes and PVs in the cluster, including scheduler information. * Can access the reserved `dedicated-admin` project on the cluster, which allows for the creation of service accounts with elevated privileges and also gives the ability to update default limits and quotas for projects on the cluster. -* Can install Operators from OperatorHub (`\*` verbs in all `*.operators.coreos.com` API groups). +* Can install Operators from the software catalog (`\*` verbs in all `*.operators.coreos.com` API groups). [id="cluster-admin-role_{context}"] == Cluster administration role diff --git a/modules/security-pod-scan-cso.adoc b/modules/security-pod-scan-cso.adoc index 07efcaeb04..ccc83602d7 100644 --- a/modules/security-pod-scan-cso.adoc +++ b/modules/security-pod-scan-cso.adoc @@ -18,7 +18,7 @@ You can install the {rhq-cso} from the {product-title} web console Operator Hub, . You can install the {rhq-cso} by using the {product-title} web console: -.. On the web console, navigate to *Operators* -> *OperatorHub* and select *Security*. +.. On the web console, navigate to *Ecosystem* -> *Software Catalog* and select *Security*. .. Select the *{rhq-cso}* Operator, and then select *Install*. diff --git a/modules/serverless-creating-a-kafka-event-sink.adoc b/modules/serverless-creating-a-kafka-event-sink.adoc index bb92d96f31..8ee072c132 100644 --- a/modules/serverless-creating-a-kafka-event-sink.adoc +++ b/modules/serverless-creating-a-kafka-event-sink.adoc @@ -13,7 +13,7 @@ As a developer, you can create an event sink to receive events from a particular .Prerequisites -* You have installed the {ServerlessOperatorName}, with Knative Serving, Knative Eventing, and Knative broker for Apache Kafka APIs, from the OperatorHub. +* You have installed the {ServerlessOperatorName}, with Knative Serving, Knative Eventing, and Knative broker for Apache Kafka APIs, from the software catalog. * You have created a Kafka topic in your Kafka environment. .Procedure diff --git a/modules/serverless-install-cli.adoc b/modules/serverless-install-cli.adoc index 85d2f7ad77..2d9f44e3a5 100644 --- a/modules/serverless-install-cli.adoc +++ b/modules/serverless-install-cli.adoc @@ -6,7 +6,7 @@ [id="serverless-install-cli_{context}"] = Installing the {ServerlessOperatorName} from the CLI -You can install the {ServerlessOperatorName} from the OperatorHub by using the CLI. Installing this Operator enables you to install and use Knative components. +You can install the {ServerlessOperatorName} from the software catalog by using the CLI. Installing this Operator enables you to install and use Knative components. .Prerequisites @@ -53,8 +53,8 @@ spec: ---- <1> The channel name of the Operator. The `stable` channel enables installation of the most recent stable version of the {ServerlessOperatorName}. <2> The name of the Operator to subscribe to. For the {ServerlessOperatorName}, this is always `serverless-operator`. -<3> The name of the CatalogSource that provides the Operator. Use `redhat-operators` for the default OperatorHub catalog sources. -<4> The namespace of the CatalogSource. Use `openshift-marketplace` for the default OperatorHub catalog sources. +<3> The name of the CatalogSource that provides the Operator. Use `redhat-operators` for the default software catalog sources. +<4> The namespace of the CatalogSource. Use `openshift-marketplace` for the default software catalog sources. . Create the `Subscription` object: + diff --git a/modules/serverless-install-eventing-web-console.adoc b/modules/serverless-install-eventing-web-console.adoc index 0b89755b76..f39619a384 100644 --- a/modules/serverless-install-eventing-web-console.adoc +++ b/modules/serverless-install-eventing-web-console.adoc @@ -23,7 +23,7 @@ endif::[] .Procedure -. In the *Administrator* perspective of the {product-title} web console, navigate to *Operators* → *Installed Operators*. +. In the *Administrator* perspective of the {product-title} web console, navigate to *Ecosystem* -> *Installed Operators*. . Check that the *Project* dropdown at the top of the page is set to *Project: knative-eventing*. diff --git a/modules/serverless-install-kafka-odc.adoc b/modules/serverless-install-kafka-odc.adoc index 59f4e892fa..48530a454a 100644 --- a/modules/serverless-install-kafka-odc.adoc +++ b/modules/serverless-install-kafka-odc.adoc @@ -28,7 +28,7 @@ endif::[] .Procedure -. In the *Administrator* perspective, navigate to *Operators* -> *Installed Operators*. +. In the *Administrator* perspective, navigate to *Ecosystem* -> *Installed Operators*. . Check that the *Project* dropdown at the top of the page is set to *Project: knative-eventing*. diff --git a/modules/serverless-install-serving-web-console.adoc b/modules/serverless-install-serving-web-console.adoc index 204d524456..b0d1bf9bac 100644 --- a/modules/serverless-install-serving-web-console.adoc +++ b/modules/serverless-install-serving-web-console.adoc @@ -23,7 +23,7 @@ endif::[] .Procedure -. In the *Administrator* perspective of the {product-title} web console, navigate to *Operators* -> *Installed Operators*. +. In the *Administrator* perspective of the {product-title} web console, navigate to *Ecosystem* -> *Installed Operators*. . Check that the *Project* dropdown at the top of the page is set to *Project: knative-serving*. diff --git a/modules/serverless-install-web-console.adoc b/modules/serverless-install-web-console.adoc index 1ce0a1b21b..272170382c 100644 --- a/modules/serverless-install-web-console.adoc +++ b/modules/serverless-install-web-console.adoc @@ -6,7 +6,7 @@ [id="serverless-install-web-console_{context}"] = Installing the {ServerlessOperatorName} from the web console -You can install the {ServerlessOperatorName} from the OperatorHub by using the {product-title} web console. Installing this Operator enables you to install and use Knative components. +You can install the {ServerlessOperatorName} from the software catalog by using the {product-title} web console. Installing this Operator enables you to install and use Knative components. .Prerequisites @@ -23,7 +23,7 @@ endif::[] .Procedure -. In the {product-title} web console, navigate to the *Operators* -> *OperatorHub* page. +. In the {product-title} web console, navigate to the *Ecosystem* -> *Software Catalog* page. . Scroll, or type the keyword *Serverless* into the *Filter by keyword* box to find the {ServerlessOperatorName}. diff --git a/modules/serverless-web-console.adoc b/modules/serverless-web-console.adoc index 3706006cab..e744d61214 100644 --- a/modules/serverless-web-console.adoc +++ b/modules/serverless-web-console.adoc @@ -6,4 +6,4 @@ [id="using-serverless-with-openshift_{context}"] = Red Hat {serverlessproductname} in the web console -Red Hat {serverlessproductname} enables developers to create and deploy serverless, event-driven applications on {product-title}. You can use the {product-title} web console OperatorHub to install the {serverlessproductname} Operator. \ No newline at end of file +Red Hat {serverlessproductname} enables developers to create and deploy serverless, event-driven applications on {product-title}. You can use the {product-title} web console software catalog to install the {serverlessproductname} Operator. diff --git a/modules/spo-installing.adoc b/modules/spo-installing.adoc index 54d4f58299..3283eb2902 100644 --- a/modules/spo-installing.adoc +++ b/modules/spo-installing.adoc @@ -12,7 +12,7 @@ .Procedure -. In the {product-title} web console, navigate to *Operators* -> *OperatorHub*. +. In the {product-title} web console, navigate to *Ecosystem* -> *Software Catalog*. . Search for the Security Profiles Operator, then click *Install*. . Keep the default selection of *Installation mode* and *namespace* to ensure that the Operator will be installed to the `openshift-security-profiles` namespace. . Click *Install*. @@ -21,10 +21,10 @@ To confirm that the installation is successful: -. Navigate to the *Operators* -> *Installed Operators* page. +. Navigate to the *Ecosystem* -> *Installed Operators* page. . Check that the Security Profiles Operator is installed in the `openshift-security-profiles` namespace and its status is `Succeeded`. If the Operator is not installed successfully: -. Navigate to the *Operators* -> *Installed Operators* page and inspect the `Status` column for any errors or failures. -. Navigate to the *Workloads* -> *Pods* page and check the logs in any pods in the `openshift-security-profiles` project that are reporting issues. \ No newline at end of file +. Navigate to the *Ecosystem* -> *Installed Operators* page and inspect the `Status` column for any errors or failures. +. Navigate to the *Workloads* -> *Pods* page and check the logs in any pods in the `openshift-security-profiles` project that are reporting issues. diff --git a/modules/spo-uninstall-console.adoc b/modules/spo-uninstall-console.adoc index 2f43283b78..84ad0f2a7b 100644 --- a/modules/spo-uninstall-console.adoc +++ b/modules/spo-uninstall-console.adoc @@ -17,11 +17,11 @@ To remove the Security Profiles Operator, you must first delete the `seccomp` an To remove the Security Profiles Operator by using the {product-title} web console: -. Navigate to the *Operators* -> *Installed Operators* page. +. Navigate to the *Ecosystem* -> *Installed Operators* page. . Delete all `seccomp` profiles, SELinux profiles, and webhook configurations. -. Switch to the *Administration* -> *Operators* -> *Installed Operators* page. +. Switch to the *Administration* -> *Ecosystem* -> *Installed Operators* page. . Click the Options menu {kebab} on the *Security Profiles Operator* entry and select *Uninstall Operator*. @@ -38,4 +38,4 @@ To remove the Security Profiles Operator by using the {product-title} web consol [source,terminal] ---- $ oc delete MutatingWebhookConfiguration spo-mutating-webhook-configuration ----- \ No newline at end of file +---- diff --git a/modules/troubleshooting-network-observability-controller-manager-pod-out-of-memory.adoc b/modules/troubleshooting-network-observability-controller-manager-pod-out-of-memory.adoc index 1210328cdf..5e22d87455 100644 --- a/modules/troubleshooting-network-observability-controller-manager-pod-out-of-memory.adoc +++ b/modules/troubleshooting-network-observability-controller-manager-pod-out-of-memory.adoc @@ -10,7 +10,7 @@ You can increase memory limits for the Network Observability Operator by editing .Procedure -. In the web console, navigate to *Operators* -> *Installed Operators* +. In the web console, navigate to *Ecosystem* -> *Installed Operators* . Click *Network Observability* and then select *Subscription*. . From the *Actions* menu, click *Edit Subscription*. .. Alternatively, you can use the CLI to open the YAML configuration for the `Subscription` object by running the following command: @@ -44,4 +44,4 @@ spec: startingCSV: <2> ---- <1> For example, you can increase the memory limit to `800Mi`. -<2> This value should not be edited, but note that it changes depending on the most current release of the Operator. \ No newline at end of file +<2> This value should not be edited, but note that it changes depending on the most current release of the Operator. diff --git a/modules/troubleshooting-network-observability-loki-resource-exhausted.adoc b/modules/troubleshooting-network-observability-loki-resource-exhausted.adoc index bdd2a07495..aba35b9e68 100644 --- a/modules/troubleshooting-network-observability-loki-resource-exhausted.adoc +++ b/modules/troubleshooting-network-observability-loki-resource-exhausted.adoc @@ -9,7 +9,7 @@ Loki may return a `ResourceExhausted` error when network flow data sent by network observability exceeds the configured maximum message size. If you are using the Red{nbsp}Hat {loki-op}, this maximum message size is configured to 100 MiB. .Procedure -. Navigate to *Operators* -> *Installed Operators*, viewing *All projects* from the *Project* drop-down menu. +. Navigate to *Ecosystem* -> *Installed Operators*, viewing *All projects* from the *Project* drop-down menu. . In the *Provided APIs* list, select the Network Observability Operator. . Click the *Flow Collector* then the *YAML view* tab. .. If you are using the {loki-op}, check that the `spec.loki.batchSize` value does not exceed 98 MiB. diff --git a/modules/troubleshooting-network-observability-loki-tenant-rate-limit.adoc b/modules/troubleshooting-network-observability-loki-tenant-rate-limit.adoc index 13f28f69f5..24231972d5 100644 --- a/modules/troubleshooting-network-observability-loki-tenant-rate-limit.adoc +++ b/modules/troubleshooting-network-observability-loki-tenant-rate-limit.adoc @@ -11,7 +11,7 @@ A rate-limit placed on the Loki tenant can result in potential temporary loss of You can update the LokiStack CRD with the `perStreamRateLimit` and `perStreamRateLimitBurst` specifications, as shown in the following procedure. .Procedure -. Navigate to *Operators* -> *Installed Operators*, viewing *All projects* from the *Project* dropdown. +. Navigate to *Ecosystem* -> *Installed Operators*, viewing *All projects* from the *Project* dropdown. . Look for *{loki-op}*, and select the *LokiStack* tab. . Create or edit an existing *LokiStack* instance using the *YAML view* to add the `perStreamRateLimit` and `perStreamRateLimitBurst` specifications: + diff --git a/modules/understanding-openshift.adoc b/modules/understanding-openshift.adoc index 414aab53cc..54d776b87e 100644 --- a/modules/understanding-openshift.adoc +++ b/modules/understanding-openshift.adoc @@ -14,10 +14,10 @@ After a node is booted and configured, it obtains a container runtime, such as C {product-title} configures and manages the networking, load balancing and routing of the cluster. {product-title} adds cluster services for monitoring the cluster health and performance, logging, and for managing upgrades. -The container image registry and OperatorHub provide Red Hat certified products and community built softwares for providing various application services within the cluster. These applications and services manage the applications deployed in the cluster, databases, frontends and user interfaces, application runtimes and business automation, and developer services for development and testing of container applications. +The container image registry and software catalog provide Red Hat certified products and community built softwares for providing various application services within the cluster. These applications and services manage the applications deployed in the cluster, databases, frontends and user interfaces, application runtimes and business automation, and developer services for development and testing of container applications. You can manage applications within the cluster either manually by configuring deployments of containers running from pre-built images or through resources known as Operators. You can build custom images from pre-build images and source code, and store these custom images locally in an internal, private or public registry. The Multicluster Management layer can manage multiple clusters including their deployment, configuration, compliance and distribution of workloads in a single console. -image::oke-about-ocp-stack-image.png[Red Hat {oke}] \ No newline at end of file +image::oke-about-ocp-stack-image.png[Red Hat {oke}] diff --git a/modules/uninstall-cluster-logging-operator.adoc b/modules/uninstall-cluster-logging-operator.adoc index 2d11e97cbc..98707a2289 100644 --- a/modules/uninstall-cluster-logging-operator.adoc +++ b/modules/uninstall-cluster-logging-operator.adoc @@ -31,7 +31,7 @@ Deleting the `ClusterLogging` CR does not remove the persistent volume claims (P . If you have created a `ClusterLogForwarder` CR, click the Options menu {kebab} next to *ClusterLogForwarder*, and then click *Delete Custom Resource Definition*. -. Go to the *Operators* -> *Installed Operators* page. +. Go to the *Ecosystem* -> *Installed Operators* page. . Click the Options menu {kebab} next to the {clo}, and then click *Uninstall Operator*. diff --git a/modules/uninstall-es-operator.adoc b/modules/uninstall-es-operator.adoc index bbc6bc8665..d8767066fc 100644 --- a/modules/uninstall-es-operator.adoc +++ b/modules/uninstall-es-operator.adoc @@ -26,7 +26,7 @@ . Delete the object storage secret. -. Go to the *Operators* -> *Installed Operators* page. +. Go to the *Ecosystem* -> *Installed Operators* page. . Click the Options menu {kebab} next to the {es-op}, and then click *Uninstall Operator*. diff --git a/modules/uninstall-loki-operator.adoc b/modules/uninstall-loki-operator.adoc index f11f4d72c4..f921b6f503 100644 --- a/modules/uninstall-loki-operator.adoc +++ b/modules/uninstall-loki-operator.adoc @@ -26,7 +26,7 @@ . Delete the object storage secret. -. Go to the *Operators* -> *Installed Operators* page. +. Go to the *Ecosystem* -> *Installed Operators* page. . Click the Options menu {kebab} next to the {loki-op}, and then click *Uninstall Operator*. diff --git a/modules/uninstalling-wmco.adoc b/modules/uninstalling-wmco.adoc index 3a0ca2c9c2..7da279bed1 100644 --- a/modules/uninstalling-wmco.adoc +++ b/modules/uninstalling-wmco.adoc @@ -14,7 +14,7 @@ You can uninstall the Windows Machine Config Operator (WMCO) from your cluster. .Procedure -. From the *Operators -> OperatorHub* page, use the *Filter by keyword* box to search for `Red Hat Windows Machine Config Operator`. +. From the *Ecosystem* -> *Software Catalog* page, use the *Filter by keyword* box to search for `Red Hat Windows Machine Config Operator`. . Click the *Red Hat Windows Machine Config Operator* tile. The Operator tile indicates it is installed. diff --git a/modules/update-conditional-web-console.adoc b/modules/update-conditional-web-console.adoc index 0bc5ff7815..dc0dfc6f7a 100644 --- a/modules/update-conditional-web-console.adoc +++ b/modules/update-conditional-web-console.adoc @@ -15,7 +15,7 @@ You can view and assess the risks associated with particular updates with condit * Pause all `MachineHealthCheck` resources. -* You have updated all Operators previously installed through Operator Lifecycle Manager (OLM) to a version that is compatible with your target release. Updating the Operators ensures they have a valid update path when the default OperatorHub catalogs switch from the current minor version to the next during a cluster update. See "Updating installed Operators" in the "Additional resources" section for more information on how to check compatibility and, if necessary, update the installed Operators. +* You have updated all Operators previously installed through Operator Lifecycle Manager (OLM) to a version that is compatible with your target release. Updating the Operators ensures they have a valid update path when the default software catalogs switch from the current minor version to the next during a cluster update. See "Updating installed Operators" in the "Additional resources" section for more information on how to check compatibility and, if necessary, update the installed Operators. * Your machine config pools (MCPs) are running and not paused. Nodes associated with a paused MCP are skipped during the update process. You can pause the MCPs if you are performing an advanced update strategy, such as a canary rollout, an EUS update, or a control-plane update. @@ -29,4 +29,4 @@ You can view and assess the risks associated with particular updates with condit If a version with known issues is selected, more information is provided with potential risks that are associated with the version. ==== -. Review the notification detailing the potential risks to updating. \ No newline at end of file +. Review the notification detailing the potential risks to updating. diff --git a/modules/update-service-create-service-web-console.adoc b/modules/update-service-create-service-web-console.adoc index 588d0d4721..89a275dfba 100644 --- a/modules/update-service-create-service-web-console.adoc +++ b/modules/update-service-create-service-web-console.adoc @@ -15,7 +15,7 @@ You can use the {product-title} web console to create an OpenShift Update Servic .Procedure -. In the web console, click *Operators* -> *Installed Operators*. +. In the web console, click *Ecosystem* -> *Installed Operators*. . Choose *OpenShift Update Service* from the list of installed Operators. diff --git a/modules/update-service-delete-service-web-console.adoc b/modules/update-service-delete-service-web-console.adoc index b3cb9d64cd..486998cdbe 100644 --- a/modules/update-service-delete-service-web-console.adoc +++ b/modules/update-service-delete-service-web-console.adoc @@ -13,7 +13,7 @@ You can use the {product-title} web console to delete an OpenShift Update Servic .Procedure -. In the web console, click *Operators* -> *Installed Operators*. +. In the web console, click *Ecosystem* -> *Installed Operators*. . Choose *OpenShift Update Service* from the list of installed Operators. diff --git a/modules/update-service-install-web-console.adoc b/modules/update-service-install-web-console.adoc index 571dd1e15f..1073a320b6 100644 --- a/modules/update-service-install-web-console.adoc +++ b/modules/update-service-install-web-console.adoc @@ -9,7 +9,7 @@ You can use the web console to install the OpenShift Update Service Operator. .Procedure -. In the web console, click *Operators* -> *OperatorHub*. +. In the web console, click *Ecosystem* -> *Software Catalog*. + [NOTE] ==== @@ -34,6 +34,6 @@ Enter `Update Service` into the *Filter by keyword...* field to find the Operato .. Click *Install*. -. Go to *Operators* -> *Installed Operators* and verify that the OpenShift Update Service Operator is installed. +. Go to *Ecosystem* -> *Installed Operators* and verify that the OpenShift Update Service Operator is installed. . Ensure that *OpenShift Update Service* is listed in the correct namespace with a *Status* of *Succeeded*. diff --git a/modules/update-service-uninstall-web-console.adoc b/modules/update-service-uninstall-web-console.adoc index 3e2eb2bd78..4ced5ab84c 100644 --- a/modules/update-service-uninstall-web-console.adoc +++ b/modules/update-service-uninstall-web-console.adoc @@ -13,7 +13,7 @@ You can use the {product-title} web console to uninstall the OpenShift Update Se .Procedure -. In the web console, click *Operators* -> *Installed Operators*. +. In the web console, click *Ecosystem* -> *Installed Operators*. . Select *OpenShift Update Service* from the list of installed Operators and click *Uninstall Operator*. diff --git a/modules/update-upgrading-web.adoc b/modules/update-upgrading-web.adoc index 92c4fdab0e..99cbc5e3a8 100644 --- a/modules/update-upgrading-web.adoc +++ b/modules/update-upgrading-web.adoc @@ -24,7 +24,7 @@ link:https://access.redhat.com/downloads/content/290[in the errata section] of t * Pause all `MachineHealthCheck` resources. -* You have updated all Operators previously installed through Operator Lifecycle Manager (OLM) to a version that is compatible with your target release. Updating the Operators ensures they have a valid update path when the default OperatorHub catalogs switch from the current minor version to the next during a cluster update. See "Updating installed Operators" in the "Additional resources" section for more information on how to check compatibility and, if necessary, update the installed Operators. +* You have updated all Operators previously installed through Operator Lifecycle Manager (OLM) to a version that is compatible with your target release. Updating the Operators ensures they have a valid update path when the default software catalogs switch from the current minor version to the next during a cluster update. See "Updating installed Operators" in the "Additional resources" section for more information on how to check compatibility and, if necessary, update the installed Operators. * Your machine config pools (MCPs) are running and not paused. Nodes associated with a paused MCP are skipped during the update process. You can pause the MCPs if you are performing a canary rollout update strategy. diff --git a/modules/updating-control-plane-only-layered-products.adoc b/modules/updating-control-plane-only-layered-products.adoc index bac24c5164..0dbc6ea347 100644 --- a/modules/updating-control-plane-only-layered-products.adoc +++ b/modules/updating-control-plane-only-layered-products.adoc @@ -17,7 +17,7 @@ Layered products refer to products that are made of multiple underlying products As you perform a Control Plane Only update for the clusters of layered products and those of Operators that have been installed through OLM, you must complete the following: -. You have updated all Operators previously installed through Operator Lifecycle Manager (OLM) to a version that is compatible with your target release. Updating the Operators ensures they have a valid update path when the default OperatorHub catalogs switch from the current minor version to the next during a cluster update. See "Updating installed Operators" in the "Additional resources" section for more information on how to check compatibility and, if necessary, update the installed Operators. +. You have updated all Operators previously installed through Operator Lifecycle Manager (OLM) to a version that is compatible with your target release. Updating the Operators ensures they have a valid update path when the default software catalogs switch from the current minor version to the next during a cluster update. See "Updating installed Operators" in the "Additional resources" section for more information on how to check compatibility and, if necessary, update the installed Operators. . Confirm the cluster version compatibility between the current and intended Operator versions. You can verify which versions your OLM Operators are compatible with by using the link:https://access.redhat.com/labs/ocpouic/?operator=logging&&ocp_versions=4.10,4.11,4.12[Red{nbsp}Hat {product-title} Operator Update Information Checker]. diff --git a/modules/virt-changing-update-settings.adoc b/modules/virt-changing-update-settings.adoc index 9bbb01d410..a4eeef88c7 100644 --- a/modules/virt-changing-update-settings.adoc +++ b/modules/virt-changing-update-settings.adoc @@ -15,7 +15,7 @@ You can change the update channel and approval strategy for your {VirtProductNam .Procedure -. Click *Operators* -> *Installed Operators*. +. Click *Ecosystem* -> *Installed Operators*. . Select *{VirtProductName}* from the list. diff --git a/modules/virt-creating-fusionaccess-cr.adoc b/modules/virt-creating-fusionaccess-cr.adoc index 59818b2044..f005d06d36 100644 --- a/modules/virt-creating-fusionaccess-cr.adoc +++ b/modules/virt-creating-fusionaccess-cr.adoc @@ -18,7 +18,7 @@ Creating the `FusionAccess` CR triggers the installation of the correct version .Procedure -. In the {product-title} web console, navigate to *Operators* -> *Installed Operators*. +. In the {product-title} web console, navigate to *Ecosystem* -> *Installed Operators*. . Click on the {FusionSAN} Operator you installed. diff --git a/modules/virt-deleting-deployment-custom-resource.adoc b/modules/virt-deleting-deployment-custom-resource.adoc index 1a26f06b32..38843e4150 100644 --- a/modules/virt-deleting-deployment-custom-resource.adoc +++ b/modules/virt-deleting-deployment-custom-resource.adoc @@ -14,7 +14,7 @@ To uninstall {VirtProductName}, you first delete the `HyperConverged` custom res .Procedure -. Navigate to the *Operators* -> *Installed Operators* page. +. Navigate to the *Ecosystem* -> *Installed Operators* page. . Select the {VirtProductName} Operator. @@ -22,4 +22,4 @@ To uninstall {VirtProductName}, you first delete the `HyperConverged` custom res . Click the Options menu {kebab} beside `kubevirt-hyperconverged` and select *Delete HyperConverged*. -. Click *Delete* in the confirmation window. \ No newline at end of file +. Click *Delete* in the confirmation window. diff --git a/modules/virt-installing-fusion-access-operator.adoc b/modules/virt-installing-fusion-access-operator.adoc index 94e4279f3c..e0bc653e27 100644 --- a/modules/virt-installing-fusion-access-operator.adoc +++ b/modules/virt-installing-fusion-access-operator.adoc @@ -6,7 +6,7 @@ [id="installing-fusion-access-operator_{context}"] = Installing the {FusionSAN} Operator -Install the {FusionSAN} Operator from the *OperatorHub* in the {product-title} web console. +Install the {FusionSAN} Operator from the software catalog in the {product-title} web console. .Prerequisites @@ -15,7 +15,7 @@ Install the {FusionSAN} Operator from the *OperatorHub* in the {product-title} w .Procedure -. In the {product-title} web console, navigate to *Operators* -> *OperatorHub*. +. In the {product-title} web console, navigate to *Ecosystem* -> *Software Catalog*. . In the *Filter by keyword* field, type `Fusion Access for SAN`. @@ -42,6 +42,6 @@ This installs the Operator. .Verification -. Navigate to *Operators* -> *Installed Operators*. +. Navigate to *Ecosystem* -> *Installed Operators*. . Verify that the {FusionSAN} Operator is displayed. diff --git a/modules/virt-installing-virt-operator.adoc b/modules/virt-installing-virt-operator.adoc index b461a1a5f4..7492774796 100644 --- a/modules/virt-installing-virt-operator.adoc +++ b/modules/virt-installing-virt-operator.adoc @@ -19,7 +19,7 @@ endif::openshift-rosa,openshift-dedicated,openshift-rosa-hcp[] .Procedure -. From the *Administrator* perspective, click *Operators* -> *OperatorHub*. +. From the *Administrator* perspective, click *Ecosystem* -> *Software Catalog*. . In the *Filter by keyword* field, type *Virtualization*. diff --git a/modules/wmco-upgrades-eus-using-web-console.adoc b/modules/wmco-upgrades-eus-using-web-console.adoc index a3283daa88..923e883411 100644 --- a/modules/wmco-upgrades-eus-using-web-console.adoc +++ b/modules/wmco-upgrades-eus-using-web-console.adoc @@ -24,7 +24,7 @@ Delete the Operator only. Do not delete the Windows namespace or any Windows wor ==== + .. Log in to the {product-title} web console. -.. Navigate to *Operators -> OperatorHub*. +.. Navigate to *Ecosystem* -> *Software Catalog*. .. Use the *Filter by keyword* box to search for `Red Hat Windows Machine Config Operator`. .. Click the *Red Hat Windows Machine Config Operator* tile. The Operator tile indicates it is installed. .. In the *Windows Machine Config Operator* descriptor page, click *Uninstall*. diff --git a/modules/zero-trust-manager-install-console.adoc b/modules/zero-trust-manager-install-console.adoc index fcf272abb9..d82754ef46 100644 --- a/modules/zero-trust-manager-install-console.adoc +++ b/modules/zero-trust-manager-install-console.adoc @@ -18,7 +18,7 @@ You can use the web console to install the {zero-trust-full}. . Log in to the {product-title} web console. -. Go to *Operators* -> *OperatorHub*. +. Go to *Ecosystem* -> *Software Catalog*. . Enter *{zero-trust-full}* into the filter box. @@ -44,7 +44,7 @@ If the `zero-trust-workload-identity-manager` namespace does not exist, it is cr .Verification -* Navigate to *Operators* -> *Installed Operators*. +* Navigate to *Ecosystem* -> *Installed Operators*. ** Verify that *{zero-trust-full}* is listed with a *Status* of *Succeeded* in the `zero-trust-workload-identity-manager` namespace. @@ -60,4 +60,4 @@ $ oc get deployment -l name=zero-trust-workload-identity-manager -n zero-trust-w ---- NAME READY UP-TO-DATE AVAILABLE AGE zero-trust-workload-identity-manager-controller-manager-6c4djb 1/1 1 1 43m ----- \ No newline at end of file +---- diff --git a/modules/zero-trust-manager-uninstall-console.adoc b/modules/zero-trust-manager-uninstall-console.adoc index 18bd8a29f5..abc9e161db 100644 --- a/modules/zero-trust-manager-uninstall-console.adoc +++ b/modules/zero-trust-manager-uninstall-console.adoc @@ -22,7 +22,7 @@ You can uninstall the {zero-trust-full} by using the web console. . Uninstall the {zero-trust-full}. -.. Go to *Operators* -> *Installed Operators*. +.. Go to *Ecosystem* -> *Installed Operators*. .. Click the *Options* menu next to the *{zero-trust-full}* entry, and then click *Uninstall Operator*. diff --git a/modules/ztp-lvms-installing-lvms-web-console.adoc b/modules/ztp-lvms-installing-lvms-web-console.adoc index 75b19cd3e4..8e43d0bb22 100644 --- a/modules/ztp-lvms-installing-lvms-web-console.adoc +++ b/modules/ztp-lvms-installing-lvms-web-console.adoc @@ -15,7 +15,7 @@ You can use the {product-title} web console to install {lvms-first}. .Procedure -. In the {product-title} web console, navigate to *Operators* -> *OperatorHub*. +. In the {product-title} web console, navigate to *Ecosystem* -> *Software Catalog*. . Search for the *{lvms}* from the list of available Operators, and then click *Install*. . Keep the default selection of *Installation mode* (*"All namespaces on the cluster (default)"*) and *Installed Namespace* (*"openshift-operators"*) to ensure that the Operator is installed properly. . Click *Install*. @@ -24,10 +24,10 @@ You can use the {product-title} web console to install {lvms-first}. . To confirm that the installation is successful: -.. Navigate to the *Operators* -> *Installed Operators* page. +.. Navigate to the *Ecosystem* -> *Installed Operators* page. .. Check that the Operator is installed in the `All Namespaces` namespace and its status is `Succeeded`. . If the Operator is not installed successfully: -.. Navigate to the *Operators* -> *Installed Operators* page and inspect the `Status` column for any errors or failures. -.. Navigate to the *Workloads* -> *Pods* page and check the logs in any containers in the `local-storage-operator` pod that are reporting issues. \ No newline at end of file +.. Navigate to the *Ecosystem* -> *Installed Operators* page and inspect the `Status` column for any errors or failures. +.. Navigate to the *Workloads* -> *Pods* page and check the logs in any containers in the `local-storage-operator` pod that are reporting issues. diff --git a/networking/networking_operators/aws_load_balancer_operator/install-aws-load-balancer-operator.adoc b/networking/networking_operators/aws_load_balancer_operator/install-aws-load-balancer-operator.adoc index ade40ec13a..ab343810bb 100644 --- a/networking/networking_operators/aws_load_balancer_operator/install-aws-load-balancer-operator.adoc +++ b/networking/networking_operators/aws_load_balancer_operator/install-aws-load-balancer-operator.adoc @@ -6,10 +6,10 @@ include::_attributes/common-attributes.adoc[] toc::[] -The AWS Load Balancer Operator deploys and manages the AWS Load Balancer Controller. You can install the AWS Load Balancer Operator from the OperatorHub by using {product-title} web console or CLI. +The AWS Load Balancer Operator deploys and manages the AWS Load Balancer Controller. You can install the AWS Load Balancer Operator from the software catalog by using {product-title} web console or CLI. include::modules/installing-aws-load-balancer-operator.adoc[leveloffset=+1] include::modules/installing-aws-load-balancer-operator-cli.adoc[leveloffset=+1] -include::modules/creating-instance-aws-load-balancer-controller.adoc[leveloffset=+1] \ No newline at end of file +include::modules/creating-instance-aws-load-balancer-controller.adoc[leveloffset=+1] diff --git a/networking/networking_operators/aws_load_balancer_operator/understanding-aws-load-balancer-operator.adoc b/networking/networking_operators/aws_load_balancer_operator/understanding-aws-load-balancer-operator.adoc index f09096d2fc..26e0d7444a 100644 --- a/networking/networking_operators/aws_load_balancer_operator/understanding-aws-load-balancer-operator.adoc +++ b/networking/networking_operators/aws_load_balancer_operator/understanding-aws-load-balancer-operator.adoc @@ -6,10 +6,10 @@ include::_attributes/common-attributes.adoc[] toc::[] -The AWS Load Balancer Operator deploys and manages the AWS Load Balancer Controller. You can install the AWS Load Balancer Operator from OperatorHub by using {product-title} web console or CLI. +The AWS Load Balancer Operator deploys and manages the AWS Load Balancer Controller. You can install the AWS Load Balancer Operator from the software catalog by using {product-title} web console or CLI. include::modules/nw-aws-load-balancer-operator-considerations.adoc[leveloffset=+1] include::modules/nw-aws-load-balancer-operator.adoc[leveloffset=+1] -include::modules/nw-aws-load-balancer-with-outposts.adoc[leveloffset=+1] \ No newline at end of file +include::modules/nw-aws-load-balancer-with-outposts.adoc[leveloffset=+1] diff --git a/observability/cluster_observability_operator/installing-the-cluster-observability-operator.adoc b/observability/cluster_observability_operator/installing-the-cluster-observability-operator.adoc index 1aab682cec..93d89b7edd 100644 --- a/observability/cluster_observability_operator/installing-the-cluster-observability-operator.adoc +++ b/observability/cluster_observability_operator/installing-the-cluster-observability-operator.adoc @@ -6,8 +6,8 @@ include::_attributes/common-attributes.adoc[] toc::[] -As a cluster administrator, you can install or remove the {coo-first} from OperatorHub by using the {product-title} web console. -OperatorHub is a user interface that works in conjunction with Operator Lifecycle Manager (OLM), which installs and manages Operators on a cluster. +As a cluster administrator, you can install or remove the {coo-first} from the software catalog by using the {product-title} web console. +The software catalog is a user interface that works in conjunction with Operator Lifecycle Manager (OLM), which installs and manages Operators on a cluster. // Installing the COO using the OCP web console include::modules/monitoring-installing-cluster-observability-operator-using-the-web-console.adoc[leveloffset=+1] diff --git a/observability/distr_tracing/distr-tracing-tempo-installing.adoc b/observability/distr_tracing/distr-tracing-tempo-installing.adoc index 848e9bbaf2..21de535c2d 100644 --- a/observability/distr_tracing/distr-tracing-tempo-installing.adoc +++ b/observability/distr_tracing/distr-tracing-tempo-installing.adoc @@ -148,6 +148,6 @@ include::modules/distr-tracing-tempo-install-tempomonolithic-cli.adoc[leveloffse * xref:../../post_installation_configuration/preparing-for-users.adoc#creating-cluster-admin_post-install-preparing-for-users[Creating a cluster admin] * link:https://operatorhub.io/[OperatorHub.io] * xref:../../web_console/web-console.adoc#web-console[Accessing the web console] -* xref:../../operators/admin/olm-adding-operators-to-cluster.adoc#olm-installing-from-operatorhub-using-web-console_olm-adding-operators-to-a-cluster[Installing from OperatorHub using the web console] +* xref:../../operators/admin/olm-adding-operators-to-cluster.adoc#olm-installing-from-operatorhub-using-web-console_olm-adding-operators-to-a-cluster[Installing from the software catalog using the web console] * xref:../../operators/user/olm-creating-apps-from-installed-operators.adoc#olm-creating-apps-from-installed-operators[Creating applications from installed Operators] * xref:../../cli_reference/openshift_cli/getting-started-cli.adoc#getting-started-cli[Getting started with the OpenShift CLI] diff --git a/observability/logging/logging-6.0/log6x-about.adoc b/observability/logging/logging-6.0/log6x-about.adoc index e3db4768e8..79bc4fb014 100644 --- a/observability/logging/logging-6.0/log6x-about.adoc +++ b/observability/logging/logging-6.0/log6x-about.adoc @@ -50,7 +50,7 @@ Logging includes extensive validation rules and default values to ensure a smoot .Procedure -. Install the `{clo}`, `{loki-op}`, and `{coo-first}` from OperatorHub. +. Install the `{clo}`, `{loki-op}`, and `{coo-first}` from the software catalog. . Create a secret to access an existing object storage bucket: + diff --git a/observability/otel/otel-installing.adoc b/observability/otel/otel-installing.adoc index 032a92d849..8e78842f37 100644 --- a/observability/otel/otel-installing.adoc +++ b/observability/otel/otel-installing.adoc @@ -29,6 +29,6 @@ include::modules/otel-creating-required-RBAC-resources-automatically.adoc[levelo * xref:../../post_installation_configuration/preparing-for-users.adoc#creating-cluster-admin_post-install-preparing-for-users[Creating a cluster admin] * link:https://operatorhub.io/[OperatorHub.io] * xref:../../web_console/web-console.adoc#web-console[Accessing the web console] -* xref:../../operators/admin/olm-adding-operators-to-cluster.adoc#olm-installing-from-operatorhub-using-web-console_olm-adding-operators-to-a-cluster[Installing from OperatorHub using the web console] +* xref:../../operators/admin/olm-adding-operators-to-cluster.adoc#olm-installing-from-operatorhub-using-web-console_olm-adding-operators-to-a-cluster[Installing from the software catalog using the web console] * xref:../../operators/user/olm-creating-apps-from-installed-operators.adoc#olm-creating-apps-from-installed-operators[Creating applications from installed Operators] * xref:../../cli_reference/openshift_cli/getting-started-cli.adoc#getting-started-cli[Getting started with the OpenShift CLI] diff --git a/operators/admin/olm-adding-operators-to-cluster.adoc b/operators/admin/olm-adding-operators-to-cluster.adoc index d76f6b5696..2ed6e481bf 100644 --- a/operators/admin/olm-adding-operators-to-cluster.adoc +++ b/operators/admin/olm-adding-operators-to-cluster.adoc @@ -35,7 +35,7 @@ include::modules/olm-installing-operators-from-operatorhub.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources -* xref:../../operators/understanding/olm-understanding-operatorhub.adoc#olm-understanding-operatorhub[Understanding OperatorHub] +* xref:../../operators/understanding/olm-understanding-operatorhub.adoc#olm-understanding-operatorhub[Understanding the software catalog] // Installing from OperatorHub by using the CLI include::modules/olm-installing-from-operatorhub-using-web-console.adoc[leveloffset=+1] @@ -60,7 +60,7 @@ include::modules/olm-installing-from-operatorhub-using-cli.adoc[leveloffset=+1] include::modules/olm-preparing-multitenant-operators.adoc[leveloffset=+1] .Next steps -* Install the Operator in the tenant Operator namespace. This task is more easily performed by using the OperatorHub in the web console instead of the CLI; for a detailed procedure, "Installing from OperatorHub using the web console". +* Install the Operator in the tenant Operator namespace. This task is more easily performed by using the software catalog in the web console instead of the CLI; for a detailed procedure, "Installing from software catalog using the web console". + [NOTE] ==== diff --git a/operators/index.adoc b/operators/index.adoc index 9fc02790c5..9fe97e8e8f 100644 --- a/operators/index.adoc +++ b/operators/index.adoc @@ -41,10 +41,10 @@ ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] ** xref:../operators/admin/olm-creating-policy.adoc#olm-creating-policy[Allow non-cluster administrators to install Operators]. endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -** xref:../operators/user/olm-installing-operators-in-namespace.adoc#olm-installing-operators-in-namespace[Install an Operator from OperatorHub]. +** xref:../operators/user/olm-installing-operators-in-namespace.adoc#olm-installing-operators-in-namespace[Install an Operator from the software catalog]. endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] ifdef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -** xref:../operators/admin/olm-adding-operators-to-cluster.adoc#olm-installing-operators-from-operatorhub_olm-adding-operators-to-a-cluster[Install an Operator from OperatorHub]. +** xref:../operators/admin/olm-adding-operators-to-cluster.adoc#olm-installing-operators-from-operatorhub_olm-adding-operators-to-a-cluster[Install an Operator from the software catalog]. endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] ** xref:../operators/admin/olm-status.adoc#olm-status[View Operator status]. ** xref:../operators/admin/olm-managing-operatorconditions.adoc#olm-managing-operatorconditions[Manage Operator conditions]. diff --git a/operators/operator-reference.adoc b/operators/operator-reference.adoc index e258c9b49f..8ba11cb072 100644 --- a/operators/operator-reference.adoc +++ b/operators/operator-reference.adoc @@ -12,7 +12,7 @@ Cluster administrators can view cluster Operators in the {product-title} web con [NOTE] ==== -Cluster Operators are not managed by Operator Lifecycle Manager (OLM) and OperatorHub. OLM and OperatorHub are part of the link:https://operatorframework.io/[Operator Framework] used in {product-title} for installing and running optional xref:../architecture/control-plane.adoc#olm-operators_control-plane[add-on Operators]. +Cluster Operators are not managed by Operator Lifecycle Manager (OLM) and the software catalog. OLM and the software catalog are part of the link:https://operatorframework.io/[Operator Framework] used in {product-title} for installing and running optional xref:../architecture/control-plane.adoc#olm-operators_control-plane[add-on Operators]. ==== Some of the following cluster Operators can be disabled prior to installation. For more information see xref:../installing/overview/cluster-capabilities.adoc#cluster-capabilities[cluster capabilities]. diff --git a/operators/understanding/olm-understanding-operatorhub.adoc b/operators/understanding/olm-understanding-operatorhub.adoc index ab126661f2..4e2ec9afd7 100644 --- a/operators/understanding/olm-understanding-operatorhub.adoc +++ b/operators/understanding/olm-understanding-operatorhub.adoc @@ -1,6 +1,6 @@ :_mod-docs-content-type: ASSEMBLY [id="olm-understanding-operatorhub"] -= Understanding OperatorHub += Understanding the software catalog include::_attributes/common-attributes.adoc[] :context: olm-understanding-operatorhub diff --git a/operators/understanding/olm/olm-understanding-olm.adoc b/operators/understanding/olm/olm-understanding-olm.adoc index 5f53847256..1cdf8f5957 100644 --- a/operators/understanding/olm/olm-understanding-olm.adoc +++ b/operators/understanding/olm/olm-understanding-olm.adoc @@ -15,7 +15,7 @@ include::modules/olm-catalogsource.adoc[leveloffset=+2] [role="_additional-resources"] .Additional resources -* xref:../../../operators/understanding/olm-understanding-operatorhub.adoc#olm-understanding-operatorhub[Understanding OperatorHub] +* xref:../../../operators/understanding/olm-understanding-operatorhub.adoc#olm-understanding-operatorhub[Understanding the software catalog] * xref:../../../operators/understanding/olm-rh-catalogs.adoc#olm-rh-catalogs[Red Hat-provided Operator catalogs] * xref:../../../operators/admin/olm-managing-custom-catalogs.adoc#olm-creating-catalog-from-index_olm-managing-custom-catalogs[Adding a catalog source to a cluster] * xref:../../../operators/understanding/olm/olm-understanding-dependency-resolution.adoc#olm-dependency-catalog-priority_olm-understanding-dependency-resolution[Catalog priority] diff --git a/operators/user/olm-installing-operators-in-namespace.adoc b/operators/user/olm-installing-operators-in-namespace.adoc index 5736c7135d..e4c9e6673a 100644 --- a/operators/user/olm-installing-operators-in-namespace.adoc +++ b/operators/user/olm-installing-operators-in-namespace.adoc @@ -20,7 +20,7 @@ If you have the pull secret, add the `redhat-operators` catalog to the `Operator endif::[] include::modules/olm-installing-operators-from-operatorhub.adoc[leveloffset=+1] -* xref:../../operators/understanding/olm-understanding-operatorhub.adoc#olm-understanding-operatorhub[Understanding OperatorHub] +* xref:../../operators/understanding/olm-understanding-operatorhub.adoc#olm-understanding-operatorhub[Understanding the software catalog] include::modules/olm-installing-from-operatorhub-using-web-console.adoc[leveloffset=+1] ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] diff --git a/osd_cluster_admin/dedicated-admin-role.adoc b/osd_cluster_admin/dedicated-admin-role.adoc index a26f022070..26c7fd01ba 100644 --- a/osd_cluster_admin/dedicated-admin-role.adoc +++ b/osd_cluster_admin/dedicated-admin-role.adoc @@ -33,10 +33,10 @@ include::modules/dedicated-managing-service-accounts.adoc[leveloffset=+1] include::modules/dedicated-managing-quotas-and-limit-ranges.adoc[leveloffset=+1] [id="osd-installing-operators-from-operatorhub_{context}"] -== Installing Operators from the OperatorHub +== Installing Operators from the software catalog {product-title} administrators can install Operators from a curated list -provided by the OperatorHub. This makes the Operator available to all developers +provided by the software catalog. This makes the Operator available to all developers on your cluster to create Custom Resources and applications using that Operator. [NOTE] diff --git a/post_installation_configuration/configuring-multi-arch-compute-machines/multiarch-tuning-operator.adoc b/post_installation_configuration/configuring-multi-arch-compute-machines/multiarch-tuning-operator.adoc index 41dbcc8e59..03a7a14698 100644 --- a/post_installation_configuration/configuring-multi-arch-compute-machines/multiarch-tuning-operator.adoc +++ b/post_installation_configuration/configuring-multi-arch-compute-machines/multiarch-tuning-operator.adoc @@ -46,7 +46,7 @@ include::modules/multi-arch-installing-using-cli.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources -* xref:../../operators/user/olm-installing-operators-in-namespace.adoc#olm-installing-operator-from-operatorhub-using-cli_olm-installing-operators-in-namespace[Installing from OperatorHub using the CLI] +* xref:../../operators/user/olm-installing-operators-in-namespace.adoc#olm-installing-operator-from-operatorhub-using-cli_olm-installing-operators-in-namespace[Installing from the software catalog using the CLI] include::modules/multi-arch-installing-using-web-console.adoc[leveloffset=+1] diff --git a/post_installation_configuration/index.adoc b/post_installation_configuration/index.adoc index ff4aa0b293..b09aeb1577 100644 --- a/post_installation_configuration/index.adoc +++ b/post_installation_configuration/index.adoc @@ -53,6 +53,6 @@ The following lists details these configurations: ** Specify an identity provider. ** Use role-based access control to define and grant permissions to users. -** Install an Operator from OperatorHub. +** Install an Operator from the software catalog. * xref:../post_installation_configuration/configuring-alert-notifications.adoc#configuring-alert-notifications[Configuring alert notifications]: By default, firing alerts are displayed on the Alerting UI of the web console. You can also configure {product-title} to send alert notifications to external systems. diff --git a/post_installation_configuration/preparing-for-users.adoc b/post_installation_configuration/preparing-for-users.adoc index 9dc5b5d88d..632650dcdd 100644 --- a/post_installation_configuration/preparing-for-users.adoc +++ b/post_installation_configuration/preparing-for-users.adoc @@ -123,9 +123,9 @@ include::modules/authentication-kubeadmin.adoc[leveloffset=+1] include::modules/authentication-remove-kubeadmin.adoc[leveloffset=+2] [id="post-install-mirrored-catalogs"] -== Populating OperatorHub from mirrored Operator catalogs +== Populating the software catalog from mirrored Operator catalogs -If you mirrored Operator catalogs for use with disconnected clusters, you can populate OperatorHub with the Operators from your mirrored catalogs. You can use the generated manifests from the mirroring process to create the required `ImageContentSourcePolicy` and `CatalogSource` objects. +If you mirrored Operator catalogs for use with disconnected clusters, you can populate the software catalog with the Operators from your mirrored catalogs. You can use the generated manifests from the mirroring process to create the required `ImageContentSourcePolicy` and `CatalogSource` objects. [id="prerequisites_post-install-mirrored-catalogs"] === Prerequisites diff --git a/rosa_hcp/rosa-hcp-egress-zero-install.adoc b/rosa_hcp/rosa-hcp-egress-zero-install.adoc index 3b395aa5d5..d43ab2071f 100644 --- a/rosa_hcp/rosa-hcp-egress-zero-install.adoc +++ b/rosa_hcp/rosa-hcp-egress-zero-install.adoc @@ -17,7 +17,7 @@ See xref:../upgrading/rosa-hcp-upgrading.adoc#rosa-hcp-upgrading[Upgrading {prod [NOTE] ==== -Clusters created in restricted network environments may be unable to use certain {product-title} features including Red Hat Insights and Telemetry. These clusters may also experience potential failures for workloads that require public access to registries such as `quay.io`. When using clusters installed with {egress-zero}, you can also install Red Hat-owned Operators from OperatorHub. For a complete list of Red Hat-owned Operators, see the link:https://catalog.redhat.com/search?searchType=software&target_platforms=Red%20Hat%20OpenShift&deployed_as=Operator&p=1&partnerName=Red%20Hat%2C%20Inc.%7CRed%20Hat[Red{nbsp}Hat Ecosystem Catalog]. Only the default Operator channel is mirrored for any Operator that is installed with {egress-zero}. +Clusters created in restricted network environments may be unable to use certain {product-title} features including Red Hat Insights and Telemetry. These clusters may also experience potential failures for workloads that require public access to registries such as `quay.io`. When using clusters installed with {egress-zero}, you can also install Red Hat-owned Operators from the software catalog. For a complete list of Red Hat-owned Operators, see the link:https://catalog.redhat.com/search?searchType=software&target_platforms=Red%20Hat%20OpenShift&deployed_as=Operator&p=1&partnerName=Red%20Hat%2C%20Inc.%7CRed%20Hat[Red{nbsp}Hat Ecosystem Catalog]. Only the default Operator channel is mirrored for any Operator that is installed with {egress-zero}. ==== [discrete] @@ -104,4 +104,4 @@ include::modules/rosa-hcp-creating-account-wide-sts-roles-and-policies.adoc[leve include::modules/rosa-sts-byo-oidc.adoc[leveloffset=+1] include::modules/rosa-operator-config.adoc[leveloffset=+1] -include::modules/rosa-hcp-sts-creating-a-cluster-egress-lockdown-cli.adoc[leveloffset=+1] \ No newline at end of file +include::modules/rosa-hcp-sts-creating-a-cluster-egress-lockdown-cli.adoc[leveloffset=+1] diff --git a/security/compliance_operator/compliance-operator-release-notes.adoc b/security/compliance_operator/compliance-operator-release-notes.adoc index d0c9579f85..3730163b2b 100644 --- a/security/compliance_operator/compliance-operator-release-notes.adoc +++ b/security/compliance_operator/compliance-operator-release-notes.adoc @@ -311,7 +311,7 @@ The following advisory is available for the OpenShift Compliance Operator 1.1.0: * A start and end timestamp is now available in the `ComplianceScan` custom resource definition (CRD) status. -* The Compliance Operator can now be deployed on {hcp} using the OperatorHub by creating a `Subscription` file. For more information, see xref:../../security/compliance_operator/co-management/compliance-operator-installation.adoc#installing-compliance-operator-hcp_compliance-operator-installation[Installing the Compliance Operator on {hcp}]. +* The Compliance Operator can now be deployed on {hcp} using the software catalog by creating a `Subscription` file. For more information, see xref:../../security/compliance_operator/co-management/compliance-operator-installation.adoc#installing-compliance-operator-hcp_compliance-operator-installation[Installing the Compliance Operator on {hcp}]. [id="compliance-operator-1-1-0-bug-fixes"] === Bug fixes diff --git a/serverless/install/preparing-serverless-install.adoc b/serverless/install/preparing-serverless-install.adoc index 4792bb23d2..ca28eb86a5 100644 --- a/serverless/install/preparing-serverless-install.adoc +++ b/serverless/install/preparing-serverless-install.adoc @@ -61,6 +61,6 @@ endif::[] == Additional resources ifdef::openshift-enterprise[] * xref:../../disconnected/using-olm.adoc#olm-restricted-networks[Using Operator Lifecycle Manager in disconnected environments] -* xref:../../operators/understanding/olm-understanding-operatorhub.adoc#olm-operatorhub-overview[Understanding OperatorHub] +* xref:../../operators/understanding/olm-understanding-operatorhub.adoc#olm-operatorhub-overview[Understanding the software catalog] * xref:../../installing/overview/cluster-capabilities.adoc#cluster-capabilities[Cluster capabilities] endif::[] diff --git a/service_mesh/v1x/installing-ossm.adoc b/service_mesh/v1x/installing-ossm.adoc index 9ea31c5243..9b8eeb8541 100644 --- a/service_mesh/v1x/installing-ossm.adoc +++ b/service_mesh/v1x/installing-ossm.adoc @@ -29,7 +29,7 @@ The {SMProductShortName} documentation uses `istio-system` as the example projec * Follow the xref:../../service_mesh/v1x/preparing-ossm-installation.adoc#preparing-ossm-installation-v1x[Preparing to install {SMProductName}] process. * An account with the `cluster-admin` role. -The {SMProductShortName} installation process uses the link:https://operatorhub.io/[OperatorHub] to install the `ServiceMeshControlPlane` custom resource definition within the `openshift-operators` project. The {SMProductName} defines and monitors the `ServiceMeshControlPlane` related to the deployment, update, and deletion of the control plane. +The {SMProductShortName} installation process uses the software catalog to install the `ServiceMeshControlPlane` custom resource definition within the `openshift-operators` project. The {SMProductName} defines and monitors the `ServiceMeshControlPlane` related to the deployment, update, and deletion of the control plane. Starting with {SMProductName} {SMProductVersion1x}, you must install the OpenShift Elasticsearch Operator, the Jaeger Operator, and the Kiali Operator before the {SMProductName} Operator can install the control plane. diff --git a/snippets/olmv1-cli-only.adoc b/snippets/olmv1-cli-only.adoc index 69a1d81681..33206b487c 100644 --- a/snippets/olmv1-cli-only.adoc +++ b/snippets/olmv1-cli-only.adoc @@ -7,5 +7,5 @@ [NOTE] ==== -For {product-title} {product-version}, documented procedures for {olmv1} are CLI-based only. Alternatively, administrators can create and view related objects in the web console by using normal methods, such as the *Import YAML* and *Search* pages. However, the existing *OperatorHub* and *Installed Operators* pages do not yet display {olmv1} components. +For {product-title} {product-version}, documented procedures for {olmv1} are CLI-based only. Alternatively, administrators can create and view related objects in the web console by using normal methods, such as the *Import YAML* and *Search* pages. However, the existing *Software Catalog* and *Installed Operators* pages do not yet display {olmv1} components. ==== diff --git a/support/troubleshooting/troubleshooting-operator-issues.adoc b/support/troubleshooting/troubleshooting-operator-issues.adoc index 0e969a9127..f96b67da38 100644 --- a/support/troubleshooting/troubleshooting-operator-issues.adoc +++ b/support/troubleshooting/troubleshooting-operator-issues.adoc @@ -12,7 +12,7 @@ Operators are a method of packaging, deploying, and managing an {product-title} {product-title} {product-version} includes a default set of Operators that are required for proper functioning of the cluster. These default Operators are managed by the Cluster Version Operator (CVO). -As a cluster administrator, you can install application Operators from the OperatorHub using the {product-title} web console or the CLI. You can then subscribe the Operator to one or more namespaces to make it available for developers on your cluster. Application Operators are managed by Operator Lifecycle Manager (OLM). +As a cluster administrator, you can install application Operators from the software catalog using the {product-title} web console or the CLI. You can then subscribe the Operator to one or more namespaces to make it available for developers on your cluster. Application Operators are managed by Operator Lifecycle Manager (OLM). If you experience Operator issues, verify Operator subscription status. Check Operator pod health across the cluster and gather Operator logs for diagnosis. diff --git a/updating/updating_a_cluster/updating-cluster-cli.adoc b/updating/updating_a_cluster/updating-cluster-cli.adoc index 92110ebb80..0269dc407a 100644 --- a/updating/updating_a_cluster/updating-cluster-cli.adoc +++ b/updating/updating_a_cluster/updating-cluster-cli.adoc @@ -26,7 +26,7 @@ See xref:../../authentication/using-rbac.adoc#using-rbac[Using RBAC to define an * Have a recent xref:../../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backup-etcd[etcd backup] in case your update fails and you must restore your cluster to a previous state. * Have a recent xref:../../backup_and_restore/application_backup_and_restore/installing/oadp-backup-restore-csi-snapshots.adoc[Container Storage Interface (CSI) volume snapshot] in case you need to restore persistent volumes due to a pod failure. * Your {op-system-base}7 workers are replaced with {op-system-base}8 or {op-system} workers. Red{nbsp}Hat does not support in-place {op-system-base}7 to {op-system-base}8 updates for {op-system-base} workers; those hosts must be replaced with a clean operating system install. -* You have updated all Operators previously installed through Operator Lifecycle Manager (OLM) to a version that is compatible with your target release. Updating the Operators ensures they have a valid update path when the default OperatorHub catalogs switch from the current minor version to the next during a cluster update. See xref:../../operators/admin/olm-upgrading-operators.adoc#olm-upgrading-operators[Updating installed Operators] for more information on how to check compatibility and, if necessary, update the installed Operators. +* You have updated all Operators previously installed through Operator Lifecycle Manager (OLM) to a version that is compatible with your target release. Updating the Operators ensures they have a valid update path when the default software catalogs switch from the current minor version to the next during a cluster update. See xref:../../operators/admin/olm-upgrading-operators.adoc#olm-upgrading-operators[Updating installed Operators] for more information on how to check compatibility and, if necessary, update the installed Operators. * Ensure that all machine config pools (MCPs) are running and not paused. Nodes associated with a paused MCP are skipped during the update process. You can pause the MCPs if you are performing a canary rollout update strategy. * If your cluster uses manually maintained credentials, update the cloud provider resources for the new release. For more information, including how to determine if this is a requirement for your cluster, see xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials]. * Ensure that you address all `Upgradeable=False` conditions so the cluster allows an update to the next minor version. An alert displays at the top of the *Cluster Settings* page when you have one or more cluster Operators that cannot be updated. You can still update to the next available patch update for the minor release you are currently on. diff --git a/virt/backup_restore/virt-backup-restore-overview.adoc b/virt/backup_restore/virt-backup-restore-overview.adoc index 8e0f0c6496..25e09addb4 100644 --- a/virt/backup_restore/virt-backup-restore-overview.adoc +++ b/virt/backup_restore/virt-backup-restore-overview.adoc @@ -46,7 +46,7 @@ For more information, see xref:../../backup_and_restore/application_backup_and_r endif::openshift-rosa,openshift-dedicated,openshift-rosa-hcp[] ==== -To install the {oadp-short} Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. +To install the {oadp-short} Operator in a restricted network environment, you must first disable the default software catalog sources and mirror the Operator catalog. ifndef::openshift-rosa,openshift-dedicated,openshift-rosa-hcp[] See xref:../../disconnected/using-olm.adoc#olm-restricted-networks[Using Operator Lifecycle Manager in disconnected environments] for details. diff --git a/virt/install/installing-virt.adoc b/virt/install/installing-virt.adoc index 6b751dd866..87adef048e 100644 --- a/virt/install/installing-virt.adoc +++ b/virt/install/installing-virt.adoc @@ -13,7 +13,7 @@ ifndef::openshift-rosa,openshift-dedicated,openshift-rosa-hcp[] ==== If you install {VirtProductName} in a restricted environment with no internet connectivity, you must xref:../../disconnected/using-olm.adoc#olm-restricted-networks[configure Operator Lifecycle Manager for disconnected environments]. -If you have limited internet connectivity, you can xref:../../operators/admin/olm-configuring-proxy-support.adoc#olm-configuring-proxy-support[configure proxy support in OLM] to access the OperatorHub. +If you have limited internet connectivity, you can xref:../../operators/admin/olm-configuring-proxy-support.adoc#olm-configuring-proxy-support[configure proxy support in OLM] to access the software catalog. ==== endif::openshift-rosa,openshift-dedicated,openshift-rosa-hcp[] diff --git a/virt/managing_vms/advanced_vm_management/virt-high-availability-for-vms.adoc b/virt/managing_vms/advanced_vm_management/virt-high-availability-for-vms.adoc index e12065df15..c47a99cb70 100644 --- a/virt/managing_vms/advanced_vm_management/virt-high-availability-for-vms.adoc +++ b/virt/managing_vms/advanced_vm_management/virt-high-availability-for-vms.adoc @@ -22,6 +22,6 @@ ifdef::openshift-rosa,openshift-dedicated,openshift-rosa-hcp[] You can enable high availability for virtual machines (VMs) by configuring remediating nodes. endif::openshift-rosa,openshift-dedicated,openshift-rosa-hcp[] -You can configure remediating nodes by installing the Self Node Remediation Operator or the Fence Agents Remediation Operator from the OperatorHub and enabling machine health checks or node remediation checks. +You can configure remediating nodes by installing the Self Node Remediation Operator or the Fence Agents Remediation Operator from the software catalog and enabling machine health checks or node remediation checks. For more information on remediation, fencing, and maintaining nodes, see the link:https://docs.redhat.com/en/documentation/workload_availability_for_red_hat_openshift/24.3[Workload Availability for Red Hat OpenShift] documentation. diff --git a/virt/nodes/virt-node-maintenance.adoc b/virt/nodes/virt-node-maintenance.adoc index 9bdce4d490..89a3d00a6c 100644 --- a/virt/nodes/virt-node-maintenance.adoc +++ b/virt/nodes/virt-node-maintenance.adoc @@ -10,7 +10,7 @@ Nodes can be placed into maintenance mode by using the `oc adm` utility or `Node [NOTE] ==== -The `node-maintenance-operator` (NMO) is no longer shipped with {VirtProductName}. It is deployed as a standalone Operator from the *OperatorHub* in the {product-title} web console or by using the OpenShift CLI (`oc`). +The `node-maintenance-operator` (NMO) is no longer shipped with {VirtProductName}. It is deployed as a standalone Operator from the software catalog in the {product-title} web console or by using the OpenShift CLI (`oc`). For more information on remediation, fencing, and maintaining nodes, see the link:https://access.redhat.com/documentation/en-us/workload_availability_for_red_hat_openshift/23.2/html-single/remediation_fencing_and_maintenance/index#about-remediation-fencing-maintenance[Workload Availability for Red Hat OpenShift] documentation. ==== diff --git a/virt/post_installation_configuration/virt-post-install-network-config.adoc b/virt/post_installation_configuration/virt-post-install-network-config.adoc index dcf0fdc171..d30cc2673e 100644 --- a/virt/post_installation_configuration/virt-post-install-network-config.adoc +++ b/virt/post_installation_configuration/virt-post-install-network-config.adoc @@ -24,7 +24,7 @@ You can install the xref:../../networking/hardware_networks/about-sriov.adoc#abo endif::openshift-rosa,openshift-dedicated,openshift-rosa-hcp[] ifndef::openshift-rosa,openshift-dedicated,openshift-rosa-hcp[] -You can add the xref:../../networking/networking_operators/metallb-operator/about-metallb.adoc#about-metallb[About MetalLB and the MetalLB Operator] to manage the lifecycle for an instance of MetalLB on your cluster. For installation instructions, see xref:../../networking/networking_operators/metallb-operator/metallb-operator-install.adoc#installing-the-metallb-operator-using-web-console_metallb-operator-install[Installing the MetalLB Operator from the OperatorHub using the web console]. +You can add the xref:../../networking/networking_operators/metallb-operator/about-metallb.adoc#about-metallb[About MetalLB and the MetalLB Operator] to manage the lifecycle for an instance of MetalLB on your cluster. For installation instructions, see xref:../../networking/networking_operators/metallb-operator/metallb-operator-install.adoc#installing-the-metallb-operator-using-web-console_metallb-operator-install[Installing the MetalLB Operator from the software catalog using the web console]. endif::openshift-rosa,openshift-dedicated,openshift-rosa-hcp[] [id="configuring-linux-bridge-network"] diff --git a/web_console/capabilities_products-web-console.adoc b/web_console/capabilities_products-web-console.adoc index 059fab4c13..b8dbab5c65 100644 --- a/web_console/capabilities_products-web-console.adoc +++ b/web_console/capabilities_products-web-console.adoc @@ -15,7 +15,7 @@ include::modules/optional-capabilities-operators.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources -* xref:../operators/understanding/olm-understanding-operatorhub.adoc#olm-understanding-operatorhub[Understanding OperatorHub] +* xref:../operators/understanding/olm-understanding-operatorhub.adoc#olm-understanding-operatorhub[Understanding the software catalog] * xref:../web_console/web_terminal/installing-web-terminal.adoc#installing-web-terminal[Installing the web terminal] //OpenShift LightSpeed @@ -53,4 +53,4 @@ include::modules/rhdh-install-web-console.adoc[leveloffset=+2] //Concept module explaining dance and why its useful. //[role="_additional-resources"] //.Additional resources -//Link out to dance docs when it comes to it. \ No newline at end of file +//Link out to dance docs when it comes to it. diff --git a/web_console/web_terminal/installing-web-terminal.adoc b/web_console/web_terminal/installing-web-terminal.adoc index 41299b731b..1205017f4e 100644 --- a/web_console/web_terminal/installing-web-terminal.adoc +++ b/web_console/web_terminal/installing-web-terminal.adoc @@ -7,7 +7,7 @@ include::_attributes/attributes-openshift-dedicated.adoc[] toc::[] -You can install the web terminal by using the {web-terminal-op} listed in the {product-title} OperatorHub. When you install the {web-terminal-op}, the custom resource definitions (CRDs) that are required for the command line configuration, such as the `DevWorkspace` CRD, are automatically installed. The web console creates the required resources when you open the web terminal. +You can install the web terminal by using the {web-terminal-op} listed in the {product-title} software catalog. When you install the {web-terminal-op}, the custom resource definitions (CRDs) that are required for the command line configuration, such as the `DevWorkspace` CRD, are automatically installed. The web console creates the required resources when you open the web terminal. [id="prerequisites_installing-web-terminal"] @@ -20,7 +20,7 @@ You can install the web terminal by using the {web-terminal-op} listed in the {p [id="installing-web-terminal-procedure"] == Procedure -. In the *Administrator* perspective of the web console, navigate to *Operators -> OperatorHub*. +. In the *Administrator* perspective of the web console, navigate to *Ecosystem* -> *Software Catalog*. . Use the *Filter by keyword* box to search for the {web-terminal-op} in the catalog, and then click the *Web Terminal* tile. . Read the brief description about the Operator on the *Web Terminal* page, and then click *Install*. . On the *Install Operator* page, retain the default values for all fields. @@ -38,4 +38,4 @@ You can install the web terminal by using the {web-terminal-op} listed in the {p The {web-terminal-op} installs the DevWorkspace Operator as a dependency. ==== -. After the Operator is installed, refresh your page to see the command-line terminal icon (image:odc-wto-icon.png[title="web terminal icon"]) in the masthead of the console. \ No newline at end of file +. After the Operator is installed, refresh your page to see the command-line terminal icon (image:odc-wto-icon.png[title="web terminal icon"]) in the masthead of the console. diff --git a/welcome/oke_about.adoc b/welcome/oke_about.adoc index cf9a5193a9..3220f4e2d1 100644 --- a/welcome/oke_about.adoc +++ b/welcome/oke_about.adoc @@ -283,7 +283,7 @@ s| Feature s| {oke} s| {product-title} s| Operator name | CSI Plugin ISV Compatibility | Included | Included | N/A | RHT and {ibm-name} middleware à la carte purchases (not included in {product-title} or {oke}) | Included | Included | N/A | ISV or Partner Operator and Container Compatibility (not included in {product-title} or {oke}) | Included | Included | N/A -| Embedded OperatorHub | Included | Included | N/A +| Embedded software catalog | Included | Included | N/A | Embedded Marketplace | Included | Included | N/A | Quay Compatibility (not included) | Included | Included | N/A | OpenShift API for Data Protection (OADP) | Included | Included | OADP Operator diff --git a/whats_new/new-features.adoc b/whats_new/new-features.adoc index 71cac7e3e4..8d36dbebe1 100644 --- a/whats_new/new-features.adoc +++ b/whats_new/new-features.adoc @@ -46,7 +46,7 @@ the installation program controls all areas of the installation process. Installer-provisioned infrastructure also provides an opinionated best practices deployment of OpenShift v4 for AWS instances only. This provides a slimmer default installation, with incremental feature buy-in through -OperatorHub. +the software catalog. You can also install with a user-provided infrastructure on AWS, bare metal, or vSphere hosts. If you use the installer-provisioned @@ -64,16 +64,16 @@ xref:../updating/updating_a_cluster/updating-cluster-web-console.adoc#updating-c for more information. [id="ocp-operator-hub"] -=== OperatorHub +=== Software catalog -OperatorHub is available to administrators and helps with easy discovery and +The software catalog is available to administrators and helps with easy discovery and installation of all optional components and applications. It includes offerings from Red Hat products, Red Hat partners, and the community. -.Features provided with base installation and OperatorHub +.Features provided with base installation and the software catalog [cols="3",options="header"] |=== -|Feature |New installer |OperatorHub +|Feature |New installer |Software catalog |Console and authentication |* [x] @@ -114,7 +114,7 @@ from Red Hat products, Red Hat partners, and the community. |=== See -xref:../operators/understanding/olm-understanding-operatorhub.adoc#olm-understanding-operatorhub[Understanding the OperatorHub] for more information. +xref:../operators/understanding/olm-understanding-operatorhub.adoc#olm-understanding-operatorhub[Understanding the software catalog] for more information. [id="ocp-storage"] == Storage diff --git a/windows_containers/enabling-windows-container-workloads.adoc b/windows_containers/enabling-windows-container-workloads.adoc index 85e39fbf2a..0d2259dfc3 100644 --- a/windows_containers/enabling-windows-container-workloads.adoc +++ b/windows_containers/enabling-windows-container-workloads.adoc @@ -6,7 +6,7 @@ include::_attributes/common-attributes.adoc[] toc::[] -Before adding Windows workloads to your cluster, you must install the Windows Machine Config Operator (WMCO), which is available in the {product-title} OperatorHub. The WMCO orchestrates the process of deploying and managing Windows workloads on a cluster. +Before adding Windows workloads to your cluster, you must install the Windows Machine Config Operator (WMCO), which is available in the {product-title} software catalog. The WMCO orchestrates the process of deploying and managing Windows workloads on a cluster. [NOTE] ====