From 90bb62c7513f385cdeebb7e1ffe6fc7f2ca74973 Mon Sep 17 00:00:00 2001 From: Alex Dellapenta Date: Fri, 3 Mar 2023 14:19:08 -0700 Subject: [PATCH] Add OLM multitenancy doc --- _topic_maps/_topic_map.yml | 3 + modules/olm-colocation-namespaces.adoc | 30 +++++++++ modules/olm-default-install-behavior.adoc | 20 ++++++ modules/olm-installing-global-namespaces.adoc | 57 ++++++++++++++++ modules/olm-multitenancy-solution.adoc | 38 +++++++++++ modules/olm-operatorgroups-limitations.adoc | 22 +++++-- .../olm-preparing-multitenant-operators.adoc | 66 +++++++++++++++++++ modules/olm-terms.adoc | 9 ++- .../olm-adding-operators-to-cluster.adoc | 31 ++++++++- operators/understanding/olm-multitenancy.adoc | 31 +++++++++ ...m-understanding-dependency-resolution.adoc | 6 ++ .../olm/olm-understanding-olm.adoc | 6 +- .../olm/olm-understanding-operatorgroups.adoc | 6 ++ 13 files changed, 314 insertions(+), 11 deletions(-) create mode 100644 modules/olm-colocation-namespaces.adoc create mode 100644 modules/olm-default-install-behavior.adoc create mode 100644 modules/olm-installing-global-namespaces.adoc create mode 100644 modules/olm-multitenancy-solution.adoc create mode 100644 modules/olm-preparing-multitenant-operators.adoc create mode 100644 operators/understanding/olm-multitenancy.adoc diff --git a/_topic_maps/_topic_map.yml b/_topic_maps/_topic_map.yml index c776342780..edd2e62b26 100644 --- a/_topic_maps/_topic_map.yml +++ b/_topic_maps/_topic_map.yml @@ -1619,6 +1619,9 @@ Topics: - Name: Red Hat-provided Operator catalogs Distros: openshift-enterprise File: olm-rh-catalogs + - Name: Operators in multitenant clusters + Distros: openshift-enterprise,openshift-origin + File: olm-multitenancy - Name: CRDs Dir: crds Topics: diff --git a/modules/olm-colocation-namespaces.adoc b/modules/olm-colocation-namespaces.adoc new file mode 100644 index 0000000000..91bf066d48 --- /dev/null +++ b/modules/olm-colocation-namespaces.adoc @@ -0,0 +1,30 @@ +// Module included in the following assemblies: +// +// * operators/understanding/olm/olm-understanding-dependency-resolution.adoc +// * operators/understanding/olm-multitenancy.adoc + +:_content-type: CONCEPT +[id="olm-colocation-namespaces_{context}"] += Colocation of Operators in a namespace + +Operator Lifecycle Manager (OLM) handles OLM-managed Operators that are installed in the same namespace, meaning their `Subscription` resources are colocated in the same namespace, as related Operators. Even if they are not actually related, OLM considers their states, such as their version and update policy, when any one of them is updated. + +This default behavior manifests in two ways: + +* `InstallPlan` resources of pending updates include `ClusterServiceVersion` (CSV) resources of all other Operators that are in the same namespace. +* All Operators in the same namespace share the same update policy. For example, if one Operator is set to manual updates, all other Operators' update policies are also set to manual. + +These scenarios can lead to the following issues: + +* It becomes hard to reason about install plans for Operator updates, because there are many more resources defined in them than just the updated Operator. +* It becomes impossible to have some Operators in a namespace update automatically while other are updated manually, which is a common desire for cluster administrators. + +These issues usually surface because, when installing Operators with the {product-title} web console, the default behavior installs Operators that support the *All namespaces* install mode into the default `openshift-operators` global namespace. + +As a cluster administrator, you can bypass this default behavior manually by using the following workflow: + +. Create a namespace for the installation of the Operator. +. Create a custom _global Operator group_, which is an Operator group that watches all namespaces. By associating this Operator group with the namespace you just created, it makes the installation namespace a global namespace, which makes Operators installed there available in all namespaces. +. Install the desired Operator in the installation namespace. + +If the Operator has dependencies, the dependencies are automatically installed in the pre-created namespace. As a result, it is then valid for the dependency Operators to have the same update policy and shared install plans. For a detailed procedure, see "Installing global Operators in custom namespaces". \ No newline at end of file diff --git a/modules/olm-default-install-behavior.adoc b/modules/olm-default-install-behavior.adoc new file mode 100644 index 0000000000..387570c751 --- /dev/null +++ b/modules/olm-default-install-behavior.adoc @@ -0,0 +1,20 @@ +// Module included in the following assemblies: +// +// * operators/understanding/olm-multitenancy.adoc + +:_content-type: CONCEPT +[id="olm-default-install-modes-behavior_{context}"] += Default Operator install modes and behavior + +When installing Operators with the web console as an administrator, you typically have two choices for the install mode, depending on the Operator's capabilities: + +Single namespace:: Installs the Operator in the chosen single namespace, and makes all permissions that the Operator requests available in that namespace. + +All namespaces:: Installs the Operator in the default `openshift-operators` namespace to watch and be made available to all namespaces in the cluster. Makes all permissions that the Operator requests available in all namespaces. In some cases, an Operator author can define metadata to give the user a second option for that Operator's suggested namespace. + +This choice also means that users in the affected namespaces get access to the Operators APIs, which can leverage the custom resources (CRs) they own, depending on their role in the namespace: + +* The `namespace-admin` and `namespace-edit` roles can read/write to the Operator APIs, meaning they can use them. +* The `namespace-view` role can read CR objects of that Operator. + +For *Single namespace* mode, because the Operator itself installs in the chosen namespace, its pod and service account are also located there. For *All namespaces* mode, the Operator's privileges are all automatically elevated to cluster roles, meaning the Operator has those permissions in all namespaces. \ No newline at end of file diff --git a/modules/olm-installing-global-namespaces.adoc b/modules/olm-installing-global-namespaces.adoc new file mode 100644 index 0000000000..ba08d65460 --- /dev/null +++ b/modules/olm-installing-global-namespaces.adoc @@ -0,0 +1,57 @@ +// Module included in the following assemblies: +// +// * operators/admin/olm-adding-operators-to-cluster.adoc + +:_content-type: PROCEDURE +[id="olm-installing-global-namespaces_{context}"] += Installing global Operators in custom namespaces + +When installing Operators with the {product-title} web console, the default behavior installs Operators that support the *All namespaces* install mode into the default `openshift-operators` global namespace. This can cause issues related to shared install plans and update policies between all Operators in the namespace. For more details on these limitations, see "Colocation of Operators in a namespace". + +As a cluster administrator, you can bypass this default behavior manually by creating a custom global namespace and using that namespace to install your individual or scoped set of Operators and their dependencies. + +.Procedure + +. Before installing the Operator, create a namespace for the installation of your desired Operator. This installation namespace will become the custom global namespace: + +.. Define a `Namespace` resource and save the YAML file, for example, `global-operators.yaml`: ++ +[source,yaml] +---- +apiVersion: v1 +kind: Namespace +metadata: + name: global-operators +---- + +.. Create the namespace by running the following command: ++ +[source,terminal] +---- +$ oc create -f global-operators.yaml +---- + +. Create a custom _global Operator group_, which is an Operator group that watches all namespaces: + +.. Define an `OperatorGroup` resource and save the YAML file, for example, `global-operatorgroup.yaml`. Omit both the `spec.selector` and `spec.targetNamespaces` fields to make it a _global Operator group_, which selects all namespaces: ++ +[source,yaml] +---- +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: global-operatorgroup + namespace: global-operators +---- ++ +[NOTE] +==== +The `status.namespaces` of a created global Operator group contains the empty string (`""`), which signals to a consuming Operator that it should watch all namespaces. +==== + +.. Create the Operator group by running the following command: ++ +[source,terminal] +---- +$ oc create -f global-operatorgroup.yaml +---- \ No newline at end of file diff --git a/modules/olm-multitenancy-solution.adoc b/modules/olm-multitenancy-solution.adoc new file mode 100644 index 0000000000..623a490d0a --- /dev/null +++ b/modules/olm-multitenancy-solution.adoc @@ -0,0 +1,38 @@ +// Module included in the following assemblies: +// +// * operators/understanding/olm-multitenancy.adoc + +:_content-type: CONCEPT +[id="olm-multitenancy-solution_{context}"] += Recommended solution for multitenant clusters + +While a *Multinamespace* install mode does exist, it is supported by very few Operators. As a middle ground solution between the standard *All namespaces* and *Single namespace* install modes, you can install multiple instances of the same Operator, one for each tenant, by using the following workflow: + +. Create a namespace for the tenant Operator that is separate from the tenant's namespace. +. Create an Operator group for the tenant Operator scoped only to the tenant's namespace. +. Install the Operator in the tenant Operator namespace. + +As a result, the Operator resides in the tenant Operator namespace and watches the tenant namespace, but neither the Operator's pod nor its service account are visible or usable by the tenant. + +This solution provides better tenant separation, least privilege principle at the cost of resource usage, and additional orchestration to ensure the constraints are met. For a detailed procedure, see "Preparing for multiple instances of an Operator for multitenant clusters". + +.Limitations and considerations + +This solution only works when the following constraints are met: + +* All instances of the same Operator must be the same version. +* The Operator cannot have dependencies on other Operators. +* The Operator cannot ship a CRD conversion webhook. + +[IMPORTANT] +==== +You cannot use different versions of the same Operator on the same cluster. Eventually, the installation of another instance of the Operator would be blocked when it meets the following conditions: + +* The instance is not the newest version of the Operator. +* The instance ships an older revision of the CRDs that lack information or versions that newer revisions have that are already in use on the cluster. +==== + +[WARNING] +==== +As an administrator, use caution when allowing non-cluster admintrators to install Operators self-sufficiently, as explained in "Allowing non-cluster administrators to install Operators". These tenants should only have access to a curated catalog of Operators that are known to not have dependencies. These tenants must also be forced to use the same version line of an Operator, to ensure the CRDs do not change. This requires the use of namespace-scoped catalogs and likely disabling the global default catalogs. +==== \ No newline at end of file diff --git a/modules/olm-operatorgroups-limitations.adoc b/modules/olm-operatorgroups-limitations.adoc index dc2d289577..b576bda73d 100644 --- a/modules/olm-operatorgroups-limitations.adoc +++ b/modules/olm-operatorgroups-limitations.adoc @@ -1,12 +1,20 @@ +// Module included in the following assemblies: +// +// * operators/understanding/olm/olm-understanding-operatorgroups.adoc + +:_content-type: CONCEPT [id="olm-operatorgroups-limitations"] -= Limitations for multi-tenant Operator management += Limitations for multitenant Operator management -{product-title} provides limited support for simultaneously installing different variations of an Operator on a cluster. Operators are control plane extensions. All tenants, or namespaces, share the same control plane of a cluster. Therefore, tenants in a multi-tenant environment also have to share Operators. +{product-title} provides limited support for simultaneously installing different versions of an Operator on the same cluster. Operator Lifecycle Manager (OLM) installs Operators multiple times in different namespaces. One constraint of this is that the Operator's API versions must be the same. -The Operator Lifecycle Manager (OLM) installs Operators multiple times in different namespaces. One constraint of this is that the Operator’s API versions must be the same. +Operators are control plane extensions due to their usage of `CustomResourceDefinition` objects (CRDs), which are global resources in Kubernetes. Different major versions of an Operator often have incompatible CRDs. This makes them incompatible to install simultaneously in different namespaces on a cluster. -Different major versions of an Operator often have incompatible custom resource definitions (CRDs). This makes it difficult to quickly verify OLMs. +All tenants, or namespaces, share the same control plane of a cluster. Therefore, tenants in a multitenant cluster also share global CRDs, which limits the scenarios in which different instances of the same Operator can be used in parallel on the same cluster. -[role="_additional-resources"] -== Additional resources -* xref:../../../operators/admin/olm-creating-policy.adoc#olm-creating-policy[Allowing non-cluster administrators to install Operators] +The supported scenarios include the following: + +* Operators of different versions that ship the exact same CRD definition (in case of versioned CRDs, the exact same set of versions) +* Operators of different versions that do not ship a CRD, and instead have their CRD available in a separate bundle on the OperatorHub + +All other scenarios are not supported, because the integrity of the cluster data cannot be guaranteed if there are multiple competing or overlapping CRDs from different Operator versions to be reconciled on the same cluster. \ No newline at end of file diff --git a/modules/olm-preparing-multitenant-operators.adoc b/modules/olm-preparing-multitenant-operators.adoc new file mode 100644 index 0000000000..4b6733bcd7 --- /dev/null +++ b/modules/olm-preparing-multitenant-operators.adoc @@ -0,0 +1,66 @@ +// Module included in the following assemblies: +// +// * operators/admin/olm-adding-operators-to-cluster.adoc + +:_content-type: PROCEDURE +[id="olm-preparing-operators-multitenant_{context}"] += Preparing for multiple instances of an Operator for multitenant clusters + +As a cluster administrator, you can add multiple instances of an Operator for use in multitenant clusters. This is an alternative solution to either using the standard *All namespaces* install mode, which can be considered to violate the principle of least privilege, or the *Multinamespace* mode, which is not widely adopted. For more information, see "Operators in multitenant clusters". + +In the following procedure, the _tenant_ is a user or group of users that share common access and privileges for a set of deployed workloads. The _tenant Operator_ is the instance of an Operator that is intended for use by only that tenant. + +.Prerequisites + +* All instances of the Operator you want to install must be the same version across a given cluster. ++ +[IMPORTANT] +==== +For more information on this and other limitations, see "Operators in multitenant clusters". +==== + +.Procedure + +. Before installing the Operator, create a namespace for the tenant Operator that is separate from the tenant's namespace. For example, if the tenant's namespace is `team1`, you might create a `team1-operator` namespace: + +.. Define a `Namespace` resource and save the YAML file, for example, `team1-operator.yaml`: ++ +[source,yaml] +---- +apiVersion: v1 +kind: Namespace +metadata: + name: team1-operator +---- + +.. Create the namespace by running the following command: ++ +[source,terminal] +---- +$ oc create -f team1-operator.yaml +---- + +. Create an Operator group for the tenant Operator scoped to the tenant's namespace, with only that one namespace entry in the `spec.targetNamespaces` list: + +.. Define an `OperatorGroup` resource and save the YAML file, for example, `team1-operatorgroup.yaml`: ++ +[source,yaml] +---- +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: team1-operatorgroup + namespace: team1-operator +spec: + targetNamespaces: + - team1 <1> +---- +<1> Define only the tenant's namespace in the `spec.targetNamespaces` list. + +.. Create the Operator group by running the following command: ++ +[source,terminal] +---- +$ oc create -f team1-operatorgroup.yaml +---- + diff --git a/modules/olm-terms.adoc b/modules/olm-terms.adoc index 67d310c921..50f98d0c55 100644 --- a/modules/olm-terms.adoc +++ b/modules/olm-terms.adoc @@ -2,6 +2,7 @@ // // * operators/understanding/olm/olm-common-terms.adoc +:_content-type: CONCEPT [id="olm-common-terms-glossary_{context}"] = Common Operator Framework terms @@ -15,7 +16,7 @@ In the bundle format, a _bundle image_ is a container image that is built from O [id="olm-common-terms-catalogsource_{context}"] == Catalog source -A _catalog source_ is a repository of CSVs, CRDs, and packages that define an application. +A _catalog source_ represents a store of metadata that OLM can query to discover and install Operators and their dependencies. [id="olm-common-terms-channel_{context}"] == Channel @@ -50,6 +51,12 @@ In the bundle format, an _index image_ refers to an image of a database (a datab == Install plan An _install plan_ is a calculated list of resources to be created to automatically install or upgrade a CSV. +[id="olm-common-terms-multitenancy_{context}"] +== Multitenancy +A _tenant_ in {product-title} is a user or group of users that share common access and privileges for a set of deployed workloads, typically represented by a namespace or project. You can use tenants to provide a level of isolation between different groups or teams. + +When a cluster is shared by multiple users or groups, it is considered a _multitenant_ cluster. + [id="olm-common-terms-operatorgroup_{context}"] == Operator group diff --git a/operators/admin/olm-adding-operators-to-cluster.adoc b/operators/admin/olm-adding-operators-to-cluster.adoc index 4ad7f1f2fd..c3ebb2f426 100644 --- a/operators/admin/olm-adding-operators-to-cluster.adoc +++ b/operators/admin/olm-adding-operators-to-cluster.adoc @@ -33,12 +33,41 @@ include::modules/olm-installing-from-operatorhub-using-cli.adoc[leveloffset=+1] * xref:../../operators/understanding/olm/olm-understanding-operatorgroups.adoc#olm-operatorgroups-about_olm-understanding-operatorgroups[About Operator groups] include::modules/olm-installing-specific-version-cli.adoc[leveloffset=+1] - [role="_additional-resources"] .Additional resources * xref:../../operators/admin/olm-upgrading-operators.adoc#olm-approving-pending-upgrade_olm-upgrading-operators[Manually approving a pending Operator update] +include::modules/olm-preparing-multitenant-operators.adoc[leveloffset=+1] +.Next steps + +* Install the Operator in the tenant Operator namespace. This task is more easily performed by using the OperatorHub in the web console instead of the CLI; for a detailed procedure, see xref:../../operators/admin/olm-adding-operators-to-cluster.adoc#olm-installing-from-operatorhub-using-web-console_olm-adding-operators-to-a-cluster[Installing from OperatorHub using the web console]. ++ +[NOTE] +==== +After completing the Operator installation, the Operator resides in the tenant Operator namespace and watches the tenant namespace, but neither the Operator's pod nor its service account are visible or usable by the tenant. +==== + +[role="_additional-resources"] +.Additional resources + +* xref:../../operators/understanding/olm-multitenancy.adoc#olm-multitenancy[Operators in multitenant clusters] + +include::modules/olm-installing-global-namespaces.adoc[leveloffset=+1] +.Next steps + +* Install the desired Operator in the installation namespace. This task is more easily performed by using the OperatorHub in the web console instead of the CLI; for a detailed procedure, see xref:../../operators/admin/olm-adding-operators-to-cluster.adoc#olm-installing-from-operatorhub-using-web-console_olm-adding-operators-to-a-cluster[Installing from OperatorHub using the web console]. ++ +[NOTE] +==== +When you initiate the Operator installation, if the Operator has dependencies, the dependencies are also automatically installed in the custom global namespace. As a result, it is then valid for the dependency Operators to have the same update policy and shared install plans. +==== + +[role="_additional-resources"] +.Additional resources + +* xref:../../operators/understanding/olm/olm-understanding-dependency-resolution.adoc#olm-colocation-namespaces_olm-understanding-dependency-resolution[Colocation of Operators in a namespace] + include::modules/olm-pod-placement.adoc[leveloffset=+1] [role="_additional-resources"] diff --git a/operators/understanding/olm-multitenancy.adoc b/operators/understanding/olm-multitenancy.adoc new file mode 100644 index 0000000000..595c066b70 --- /dev/null +++ b/operators/understanding/olm-multitenancy.adoc @@ -0,0 +1,31 @@ +:_content-type: ASSEMBLY +[id="olm-multitenancy"] += Operators in multitenant clusters +include::_attributes/common-attributes.adoc[] +:context: olm-multitenancy + +toc::[] + +The default behavior for Operator Lifecycle Manager (OLM) aims to provide simplicity during Operator installation. However, this behavior can lack flexibility, especially in multitenant clusters. In order for multiple tenants on a {product-title} cluster to use an Operator, the default behavior of OLM requires that administrators install the Operator in *All namespaces* mode, which can be considered to violate the principle of least privilege. + +Consider the following scenarios to determine which Operator installation workflow works best for your environment and requirements. + +[role="_additional-resources"] +.Additional resources +* xref:../../operators/understanding/olm-common-terms.adoc#olm-common-terms-multitenancy_olm-common-terms[Common terms: Multitenant] +* xref:../../operators/understanding/olm/olm-understanding-dependency-resolution.adoc#olm-colocation-namespaces_olm-understanding-dependency-resolution[Colocation of Operators in a namespace] +* xref:../../operators/understanding/olm/olm-understanding-operatorgroups.adoc#olm-operatorgroups-limitations[Limitations for multitenant Operator management] + +include::modules/olm-default-install-behavior.adoc[leveloffset=+1] +[role="_additional-resources"] +.Additional resources +* xref:../../operators/admin/olm-adding-operators-to-cluster.adoc#olm-adding-operators-to-a-cluster[Adding Operators to a cluster] +* xref:../../operators/understanding/olm/olm-understanding-operatorgroups.adoc#olm-operatorgroups-membership_olm-understanding-operatorgroups[Install modes types] +* xref:../../operators/operator_sdk/osdk-generating-csvs.adoc#osdk-suggested-namespace_osdk-generating-csvs[Setting a suggested namespace] + +include::modules/olm-multitenancy-solution.adoc[leveloffset=+1] +[role="_additional-resources"] +.Additional resources +* xref:../../operators/admin/olm-adding-operators-to-cluster.adoc#olm-preparing-operators-multitenant_olm-adding-operators-to-a-cluster[Preparing for multiple instances of an Operator for multitenant clusters] +* xref:../../operators/admin/olm-creating-policy.adoc#olm-creating-policy[Allowing non-cluster administrators to install Operators] +* xref:../../operators/admin/olm-managing-custom-catalogs.adoc#olm-restricted-networks-operatorhub_olm-managing-custom-catalogs[Disabling the default OperatorHub catalog sources] \ No newline at end of file diff --git a/operators/understanding/olm/olm-understanding-dependency-resolution.adoc b/operators/understanding/olm/olm-understanding-dependency-resolution.adoc index 5510fd7f37..340175e192 100644 --- a/operators/understanding/olm/olm-understanding-dependency-resolution.adoc +++ b/operators/understanding/olm/olm-understanding-dependency-resolution.adoc @@ -33,3 +33,9 @@ include::modules/olm-dependencies-best-practices.adoc[leveloffset=+1] include::modules/olm-dependencies-caveats.adoc[leveloffset=+1] include::modules/olm-dependency-resolution-examples.adoc[leveloffset=+1] +include::modules/olm-colocation-namespaces.adoc[leveloffset=+1] +[role="_additional-resources"] +.Additional resources + +* xref:../../../operators/admin/olm-adding-operators-to-cluster.adoc#olm-installing-global-namespaces_olm-adding-operators-to-a-cluster[Installing global Operators in custom namespaces] +* xref:../../../operators/understanding/olm-multitenancy.adoc#olm-multitenancy[Operators in multitenant clusters] diff --git a/operators/understanding/olm/olm-understanding-olm.adoc b/operators/understanding/olm/olm-understanding-olm.adoc index 9ebd7338d7..2930be6212 100644 --- a/operators/understanding/olm/olm-understanding-olm.adoc +++ b/operators/understanding/olm/olm-understanding-olm.adoc @@ -30,6 +30,7 @@ include::modules/olm-subscription.adoc[leveloffset=+2] [role="_additional-resources"] .Additional resources +* xref:../../../operators/understanding/olm/olm-understanding-dependency-resolution.adoc#olm-colocation-namespaces_olm-understanding-dependency-resolution[Colocation of Operators in a namespace] * xref:../../../operators/admin/olm-status.adoc#olm-status-viewing-cli_olm-status[Viewing Operator subscription status by using the CLI] include::modules/olm-installplan.adoc[leveloffset=+2] @@ -37,16 +38,17 @@ include::modules/olm-installplan.adoc[leveloffset=+2] [role="_additional-resources"] .Additional resources +* xref:../../../operators/understanding/olm/olm-understanding-dependency-resolution.adoc#olm-colocation-namespaces_olm-understanding-dependency-resolution[Colocation of Operators in a namespace] * xref:../../../operators/admin/olm-creating-policy.adoc#olm-creating-policy[Allowing non-cluster administrators to install Operators] include::modules/olm-operatorgroups-about.adoc[leveloffset=+2] .Additional resources -* xref:../../../operators/understanding/olm/olm-understanding-operatorgroups.adoc#olm-understanding-operatorgroups[Operator groups]. +* xref:../../../operators/understanding/olm/olm-understanding-operatorgroups.adoc#olm-understanding-operatorgroups[Operator groups] include::modules/olm-operatorconditions-about.adoc[leveloffset=+2] [role="_additional-resources"] .Additional resources -* xref:../../../operators/understanding/olm/olm-operatorconditions.adoc#olm-operatorconditions[Operator conditions]. +* xref:../../../operators/understanding/olm/olm-operatorconditions.adoc#olm-operatorconditions[Operator conditions] diff --git a/operators/understanding/olm/olm-understanding-operatorgroups.adoc b/operators/understanding/olm/olm-understanding-operatorgroups.adoc index c048d9417e..623d03264b 100644 --- a/operators/understanding/olm/olm-understanding-operatorgroups.adoc +++ b/operators/understanding/olm/olm-understanding-operatorgroups.adoc @@ -18,4 +18,10 @@ include::modules/olm-operatorgroups-copied-csvs.adoc[leveloffset=+1] include::modules/olm-operatorgroups-static.adoc[leveloffset=+1] include::modules/olm-operatorgroups-intersections.adoc[leveloffset=+1] include::modules/olm-operatorgroups-limitations.adoc[leveloffset=+1] +[role="_additional-resources"] +.Additional resources + +* xref:../../../operators/understanding/olm-multitenancy.adoc#olm-multitenancy[Operators in multitenant clusters] +* xref:../../../operators/admin/olm-creating-policy.adoc#olm-creating-policy[Allowing non-cluster administrators to install Operators] + include::modules/olm-operatorgroups-troubleshooting.adoc[leveloffset=+1]