From 735e13bc263d8d7c97c2696ec54857ef08905ceb Mon Sep 17 00:00:00 2001 From: Michael Ryan Peter Date: Thu, 17 Apr 2025 15:59:01 -0400 Subject: [PATCH] [enterprise-4.19] OSDOCS#14398: Remove docs for the Operator SDK --- _attributes/common-attributes.adoc | 4 - _topic_maps/_topic_map.yml | 82 ---- _topic_maps/_topic_map_osd.yml | 85 ---- _topic_maps/_topic_map_rosa.yml | 85 ---- _topic_maps/_topic_map_rosa_hcp.yml | 100 ---- .../osdk-updating-v1101-to-v1160.adoc | 195 -------- .../osdk-updating-v125-to-v128.adoc | 130 ----- .../osdk-upgrading-v180-to-v1101.adoc | 39 -- architecture/control-plane.adoc | 6 +- cli_reference/index.adoc | 5 +- cli_reference/opm/cli-opm-install.adoc | 5 +- cli_reference/osdk/_attributes | 1 - cli_reference/osdk/cli-osdk-install.adoc | 25 - cli_reference/osdk/cli-osdk-ref.adoc | 56 --- cli_reference/osdk/images | 1 - cli_reference/osdk/modules | 1 - cli_reference/osdk/snippets | 1 - ...talling-mirroring-installation-images.adoc | 8 - disconnected/using-olm.adoc | 3 +- extensions/catalogs/managing-catalogs.adoc | 2 - getting_started/openshift-overview.adoc | 3 - modules/arch-olm-operators.adoc | 2 - ...uilding-memcached-operator-using-osdk.adoc | 443 ------------------ modules/creating-new-osdk-v0-1-0-project.adoc | 45 -- modules/migrating-custom-types-pkg-apis.adoc | 59 --- modules/migrating-reconcile-code.adoc | 313 ------------- modules/olm-about-catalogs.adoc | 2 - .../olm-enabling-operator-for-multi-arch.adoc | 63 --- ...-enabling-operator-restricted-network.adoc | 201 -------- modules/olm-operator-framework.adoc | 3 - modules/olm-operator-maturity-model.adoc | 3 - modules/olm-operatorhub-overview.adoc | 2 - modules/osdk-about-openapi-validation.adoc | 21 - modules/osdk-about-pkg-format-migration.adoc | 62 --- modules/osdk-ansible-cr-status-about.adoc | 36 -- modules/osdk-ansible-cr-status-manual.adoc | 57 --- modules/osdk-ansible-create-api.adoc | 34 -- .../osdk-ansible-custom-resource-files.adoc | 58 --- modules/osdk-ansible-extra-variables.adoc | 48 -- .../osdk-ansible-inside-operator-local.adoc | 124 ----- ...ible-inside-operator-logs-full-result.adoc | 22 - ...-ansible-inside-operator-logs-verbose.adoc | 25 - ...sdk-ansible-inside-operator-logs-view.adoc | 43 -- .../osdk-ansible-inside-operator-logs.adoc | 8 - modules/osdk-ansible-k8s-install.adoc | 43 -- modules/osdk-ansible-k8s-local.adoc | 122 ----- modules/osdk-ansible-metrics.adoc | 240 ---------- modules/osdk-ansible-modify-manager.adoc | 85 ---- modules/osdk-ansible-project-layout.adoc | 60 --- modules/osdk-ansible-runner-directory.adoc | 13 - modules/osdk-ansible-watches-file.adoc | 120 ----- modules/osdk-apiservices.adoc | 109 ----- modules/osdk-building-helm-operator.adoc | 350 -------------- modules/osdk-bundle-operator.adoc | 92 ---- modules/osdk-bundle-upgrade-olm.adoc | 82 ---- modules/osdk-bundle-validate-about.adoc | 65 --- modules/osdk-bundle-validate-run.adoc | 62 --- modules/osdk-bundle-validate-tests.adoc | 37 -- modules/osdk-cli-ref-bundle.adoc | 33 -- modules/osdk-cli-ref-cleanup.adoc | 29 -- modules/osdk-cli-ref-completion.adoc | 45 -- modules/osdk-cli-ref-create.adoc | 24 - modules/osdk-cli-ref-generate-bundle.adoc | 66 --- modules/osdk-cli-ref-generate-kustomize.adoc | 42 -- modules/osdk-cli-ref-generate.adoc | 9 - modules/osdk-cli-ref-init.adoc | 35 -- modules/osdk-cli-ref-run-bundle-upgrade.adoc | 36 -- modules/osdk-cli-ref-run-bundle.adoc | 42 -- modules/osdk-cli-ref-run.adoc | 9 - modules/osdk-cli-ref-scorecard.adoc | 53 --- modules/osdk-common-prereqs.adoc | 58 --- modules/osdk-control-compat.adoc | 90 ---- modules/osdk-crd-templates.adoc | 20 - modules/osdk-crds.adoc | 9 - modules/osdk-create-cr.adoc | 244 ---------- modules/osdk-create-project.adoc | 119 ----- modules/osdk-csv-annotations-dep.adoc | 49 -- modules/osdk-csv-annotations-infra.adoc | 81 ---- modules/osdk-csv-annotations-other.adoc | 53 --- modules/osdk-csv-bundle-files.adoc | 21 - .../osdk-csv-composition-configuration.adoc | 22 - modules/osdk-csv-manual-annotations.adoc | 8 - modules/osdk-csv-ver.adoc | 10 - modules/osdk-deploy-olm.adoc | 70 --- ...operator-workloads-run-restricted-psa.adoc | 65 --- modules/osdk-generating-a-csv.adoc | 23 - modules/osdk-golang-controller-configs.adoc | 29 -- .../osdk-golang-controller-rbac-markers.adoc | 23 - ...osdk-golang-controller-reconcile-loop.adoc | 54 --- modules/osdk-golang-controller-resources.adoc | 30 -- .../osdk-golang-create-api-controller.adoc | 42 -- modules/osdk-golang-define-api.adoc | 43 -- modules/osdk-golang-generate-crd.adoc | 20 - modules/osdk-golang-implement-controller.adoc | 227 --------- modules/osdk-golang-manager.adoc | 35 -- modules/osdk-golang-multi-group-apis.adoc | 30 -- modules/osdk-golang-project-layout.adoc | 36 -- modules/osdk-ha-sno-api-examples.adoc | 38 -- modules/osdk-ha-sno-api.adoc | 19 - modules/osdk-helm-charts.adoc | 38 -- modules/osdk-helm-existing-chart.adoc | 65 --- modules/osdk-helm-logic.adoc | 26 - modules/osdk-helm-modify-cr.adoc | 45 -- modules/osdk-helm-project-layout.adoc | 33 -- modules/osdk-helm-sample-chart.adoc | 12 - modules/osdk-hiding-internal-objects.adoc | 30 -- modules/osdk-how-csv-gen-works.adoc | 14 - modules/osdk-init-resource.adoc | 76 --- modules/osdk-installing-cli-linux-macos.adoc | 72 --- modules/osdk-installing-cli-macos.adoc | 85 ---- modules/osdk-leader-election-types.adoc | 59 --- modules/osdk-manager-file.adoc | 26 - ...-operators-with-escalated-permissions.adoc | 61 --- modules/osdk-manually-defined-csv-fields.adoc | 83 ---- modules/osdk-migrating-pkgman.adoc | 61 --- modules/osdk-monitoring-custom-metrics.adoc | 168 ------- ...onitoring-prometheus-operator-support.adoc | 10 - modules/osdk-multi-arch-building-images.adoc | 107 ----- modules/osdk-multi-arch-node-affinity.adoc | 13 - modules/osdk-multi-arch-node-preference.adoc | 63 --- modules/osdk-multi-arch-node-reqs.adoc | 130 ----- modules/osdk-multi-arch-validate.adoc | 43 -- modules/osdk-operatorconditions.adoc | 79 ---- modules/osdk-owned-crds.adoc | 117 ----- modules/osdk-project-file.adoc | 92 ---- modules/osdk-pruning-utility-about.adoc | 23 - modules/osdk-pruning-utility-config.adoc | 86 ---- modules/osdk-publish-catalog.adoc | 192 -------- modules/osdk-quickstart.adoc | 226 --------- modules/osdk-required-crds.adoc | 49 -- modules/osdk-run-deployment.adoc | 84 ---- modules/osdk-run-locally.adoc | 77 --- modules/osdk-run-operator.adoc | 66 --- modules/osdk-run-proxy.adoc | 150 ------ modules/osdk-scorecard-about.adoc | 34 -- modules/osdk-scorecard-config.adoc | 61 --- modules/osdk-scorecard-custom-tests.adoc | 155 ------ modules/osdk-scorecard-output.adoc | 74 --- modules/osdk-scorecard-parallel.adoc | 46 -- modules/osdk-scorecard-run.adoc | 31 -- modules/osdk-scorecard-select-tests.adoc | 40 -- modules/osdk-scorecard-tests.adoc | 47 -- ...sdk-suggested-namespace-node-selector.adoc | 47 -- modules/osdk-suggested-namespace.adoc | 30 -- modules/osdk-updating-128-to-131.adoc | 203 -------- modules/osdk-updating-131-to-1361.adoc | 260 ---------- modules/osdk-updating-1361-to-138.adoc | 419 ----------------- modules/osdk-workflow.adoc | 19 - .../monitoring-stack-architecture.adoc | 7 - .../monitoring/monitoring-overview.adoc | 1 - .../admin/olm-configuring-proxy-support.adoc | 3 - .../admin/olm-managing-custom-catalogs.adoc | 4 +- operators/index.adoc | 11 +- operators/operator-reference.adoc | 1 - operators/operator_sdk/ansible/_attributes | 1 - operators/operator_sdk/ansible/images | 1 - operators/operator_sdk/ansible/modules | 1 - .../ansible/osdk-ansible-cr-status.adoc | 12 - .../ansible/osdk-ansible-inside-operator.adoc | 19 - .../ansible/osdk-ansible-k8s-collection.adoc | 23 - .../ansible/osdk-ansible-project-layout.adoc | 13 - .../ansible/osdk-ansible-quickstart.adoc | 29 -- .../ansible/osdk-ansible-support.adoc | 14 - .../ansible/osdk-ansible-tutorial.adoc | 93 ---- .../osdk-ansible-updating-projects.adoc | 22 - operators/operator_sdk/ansible/snippets | 1 - operators/operator_sdk/golang/_attributes | 1 - operators/operator_sdk/golang/images | 1 - operators/operator_sdk/golang/modules | 1 - .../golang/osdk-golang-project-layout.adoc | 13 - .../golang/osdk-golang-quickstart.adoc | 27 -- .../golang/osdk-golang-tutorial.adoc | 103 ---- .../golang/osdk-golang-updating-projects.adoc | 22 - operators/operator_sdk/golang/snippets | 1 - operators/operator_sdk/helm/_attributes | 1 - operators/operator_sdk/helm/images | 1 - operators/operator_sdk/helm/modules | 1 - .../helm/osdk-helm-project-layout.adoc | 13 - .../helm/osdk-helm-quickstart.adoc | 29 -- .../operator_sdk/helm/osdk-helm-support.adoc | 11 - .../operator_sdk/helm/osdk-helm-tutorial.adoc | 96 ---- .../helm/osdk-helm-updating-projects.adoc | 22 - operators/operator_sdk/helm/snippets | 1 - operators/operator_sdk/osdk-about.adoc | 64 --- .../operator_sdk/osdk-bundle-validate.adoc | 30 -- operators/operator_sdk/osdk-cli-ref.adoc | 70 --- .../operator_sdk/osdk-complying-with-psa.adoc | 48 -- .../operator_sdk/osdk-generating-csvs.adoc | 99 ---- operators/operator_sdk/osdk-ha-sno.adoc | 22 - .../operator_sdk/osdk-installing-cli.adoc | 28 -- .../operator_sdk/osdk-leader-election.adoc | 21 - .../osdk-migrating-to-v0-1-0.adoc | 23 - .../osdk-monitoring-prometheus.adoc | 43 -- .../operator_sdk/osdk-multi-arch-support.adoc | 48 -- .../operator_sdk/osdk-pkgman-to-bundle.adoc | 20 - .../operator_sdk/osdk-pruning-utility.adoc | 14 - operators/operator_sdk/osdk-scorecard.adoc | 23 - .../osdk-working-bundle-images.adoc | 52 -- operators/understanding/olm-common-terms.adoc | 2 +- operators/understanding/olm-multitenancy.adoc | 3 +- .../olm-understanding-operatorhub.adoc | 2 - .../olm/olm-operatorconditions.adoc | 1 - ...m-understanding-dependency-resolution.adoc | 8 +- operators/understanding/olm/olm-webhooks.adoc | 2 - rosa_architecture/index.adoc | 3 - snippets/osdk-deprecation.adoc | 54 --- 206 files changed, 12 insertions(+), 11277 deletions(-) delete mode 100644 _unused_topics/osdk-updating-v1101-to-v1160.adoc delete mode 100644 _unused_topics/osdk-updating-v125-to-v128.adoc delete mode 100644 _unused_topics/osdk-upgrading-v180-to-v1101.adoc delete mode 120000 cli_reference/osdk/_attributes delete mode 100644 cli_reference/osdk/cli-osdk-install.adoc delete mode 100644 cli_reference/osdk/cli-osdk-ref.adoc delete mode 120000 cli_reference/osdk/images delete mode 120000 cli_reference/osdk/modules delete mode 120000 cli_reference/osdk/snippets delete mode 100644 modules/building-memcached-operator-using-osdk.adoc delete mode 100644 modules/creating-new-osdk-v0-1-0-project.adoc delete mode 100644 modules/migrating-custom-types-pkg-apis.adoc delete mode 100644 modules/migrating-reconcile-code.adoc delete mode 100644 modules/olm-enabling-operator-for-multi-arch.adoc delete mode 100644 modules/olm-enabling-operator-restricted-network.adoc delete mode 100644 modules/osdk-about-openapi-validation.adoc delete mode 100644 modules/osdk-about-pkg-format-migration.adoc delete mode 100644 modules/osdk-ansible-cr-status-about.adoc delete mode 100644 modules/osdk-ansible-cr-status-manual.adoc delete mode 100644 modules/osdk-ansible-create-api.adoc delete mode 100644 modules/osdk-ansible-custom-resource-files.adoc delete mode 100644 modules/osdk-ansible-extra-variables.adoc delete mode 100644 modules/osdk-ansible-inside-operator-local.adoc delete mode 100644 modules/osdk-ansible-inside-operator-logs-full-result.adoc delete mode 100644 modules/osdk-ansible-inside-operator-logs-verbose.adoc delete mode 100644 modules/osdk-ansible-inside-operator-logs-view.adoc delete mode 100644 modules/osdk-ansible-inside-operator-logs.adoc delete mode 100644 modules/osdk-ansible-k8s-install.adoc delete mode 100644 modules/osdk-ansible-k8s-local.adoc delete mode 100644 modules/osdk-ansible-metrics.adoc delete mode 100644 modules/osdk-ansible-modify-manager.adoc delete mode 100644 modules/osdk-ansible-project-layout.adoc delete mode 100644 modules/osdk-ansible-runner-directory.adoc delete mode 100644 modules/osdk-ansible-watches-file.adoc delete mode 100644 modules/osdk-apiservices.adoc delete mode 100644 modules/osdk-building-helm-operator.adoc delete mode 100644 modules/osdk-bundle-operator.adoc delete mode 100644 modules/osdk-bundle-upgrade-olm.adoc delete mode 100644 modules/osdk-bundle-validate-about.adoc delete mode 100644 modules/osdk-bundle-validate-run.adoc delete mode 100644 modules/osdk-bundle-validate-tests.adoc delete mode 100644 modules/osdk-cli-ref-bundle.adoc delete mode 100644 modules/osdk-cli-ref-cleanup.adoc delete mode 100644 modules/osdk-cli-ref-completion.adoc delete mode 100644 modules/osdk-cli-ref-create.adoc delete mode 100644 modules/osdk-cli-ref-generate-bundle.adoc delete mode 100644 modules/osdk-cli-ref-generate-kustomize.adoc delete mode 100644 modules/osdk-cli-ref-generate.adoc delete mode 100644 modules/osdk-cli-ref-init.adoc delete mode 100644 modules/osdk-cli-ref-run-bundle-upgrade.adoc delete mode 100644 modules/osdk-cli-ref-run-bundle.adoc delete mode 100644 modules/osdk-cli-ref-run.adoc delete mode 100644 modules/osdk-cli-ref-scorecard.adoc delete mode 100644 modules/osdk-common-prereqs.adoc delete mode 100644 modules/osdk-control-compat.adoc delete mode 100644 modules/osdk-crd-templates.adoc delete mode 100644 modules/osdk-crds.adoc delete mode 100644 modules/osdk-create-cr.adoc delete mode 100644 modules/osdk-create-project.adoc delete mode 100644 modules/osdk-csv-annotations-dep.adoc delete mode 100644 modules/osdk-csv-annotations-infra.adoc delete mode 100644 modules/osdk-csv-annotations-other.adoc delete mode 100644 modules/osdk-csv-bundle-files.adoc delete mode 100644 modules/osdk-csv-composition-configuration.adoc delete mode 100644 modules/osdk-csv-manual-annotations.adoc delete mode 100644 modules/osdk-csv-ver.adoc delete mode 100644 modules/osdk-deploy-olm.adoc delete mode 100644 modules/osdk-ensuring-operator-workloads-run-restricted-psa.adoc delete mode 100644 modules/osdk-generating-a-csv.adoc delete mode 100644 modules/osdk-golang-controller-configs.adoc delete mode 100644 modules/osdk-golang-controller-rbac-markers.adoc delete mode 100644 modules/osdk-golang-controller-reconcile-loop.adoc delete mode 100644 modules/osdk-golang-controller-resources.adoc delete mode 100644 modules/osdk-golang-create-api-controller.adoc delete mode 100644 modules/osdk-golang-define-api.adoc delete mode 100644 modules/osdk-golang-generate-crd.adoc delete mode 100644 modules/osdk-golang-implement-controller.adoc delete mode 100644 modules/osdk-golang-manager.adoc delete mode 100644 modules/osdk-golang-multi-group-apis.adoc delete mode 100644 modules/osdk-golang-project-layout.adoc delete mode 100644 modules/osdk-ha-sno-api-examples.adoc delete mode 100644 modules/osdk-ha-sno-api.adoc delete mode 100644 modules/osdk-helm-charts.adoc delete mode 100644 modules/osdk-helm-existing-chart.adoc delete mode 100644 modules/osdk-helm-logic.adoc delete mode 100644 modules/osdk-helm-modify-cr.adoc delete mode 100644 modules/osdk-helm-project-layout.adoc delete mode 100644 modules/osdk-helm-sample-chart.adoc delete mode 100644 modules/osdk-hiding-internal-objects.adoc delete mode 100644 modules/osdk-how-csv-gen-works.adoc delete mode 100644 modules/osdk-init-resource.adoc delete mode 100644 modules/osdk-installing-cli-linux-macos.adoc delete mode 100644 modules/osdk-installing-cli-macos.adoc delete mode 100644 modules/osdk-leader-election-types.adoc delete mode 100644 modules/osdk-manager-file.adoc delete mode 100644 modules/osdk-managing-psa-for-operators-with-escalated-permissions.adoc delete mode 100644 modules/osdk-manually-defined-csv-fields.adoc delete mode 100644 modules/osdk-migrating-pkgman.adoc delete mode 100644 modules/osdk-monitoring-custom-metrics.adoc delete mode 100644 modules/osdk-monitoring-prometheus-operator-support.adoc delete mode 100644 modules/osdk-multi-arch-building-images.adoc delete mode 100644 modules/osdk-multi-arch-node-affinity.adoc delete mode 100644 modules/osdk-multi-arch-node-preference.adoc delete mode 100644 modules/osdk-multi-arch-node-reqs.adoc delete mode 100644 modules/osdk-multi-arch-validate.adoc delete mode 100644 modules/osdk-operatorconditions.adoc delete mode 100644 modules/osdk-owned-crds.adoc delete mode 100644 modules/osdk-project-file.adoc delete mode 100644 modules/osdk-pruning-utility-about.adoc delete mode 100644 modules/osdk-pruning-utility-config.adoc delete mode 100644 modules/osdk-publish-catalog.adoc delete mode 100644 modules/osdk-quickstart.adoc delete mode 100644 modules/osdk-required-crds.adoc delete mode 100644 modules/osdk-run-deployment.adoc delete mode 100644 modules/osdk-run-locally.adoc delete mode 100644 modules/osdk-run-operator.adoc delete mode 100644 modules/osdk-run-proxy.adoc delete mode 100644 modules/osdk-scorecard-about.adoc delete mode 100644 modules/osdk-scorecard-config.adoc delete mode 100644 modules/osdk-scorecard-custom-tests.adoc delete mode 100644 modules/osdk-scorecard-output.adoc delete mode 100644 modules/osdk-scorecard-parallel.adoc delete mode 100644 modules/osdk-scorecard-run.adoc delete mode 100644 modules/osdk-scorecard-select-tests.adoc delete mode 100644 modules/osdk-scorecard-tests.adoc delete mode 100644 modules/osdk-suggested-namespace-node-selector.adoc delete mode 100644 modules/osdk-suggested-namespace.adoc delete mode 100644 modules/osdk-updating-128-to-131.adoc delete mode 100644 modules/osdk-updating-131-to-1361.adoc delete mode 100644 modules/osdk-updating-1361-to-138.adoc delete mode 100644 modules/osdk-workflow.adoc delete mode 120000 operators/operator_sdk/ansible/_attributes delete mode 120000 operators/operator_sdk/ansible/images delete mode 120000 operators/operator_sdk/ansible/modules delete mode 100644 operators/operator_sdk/ansible/osdk-ansible-cr-status.adoc delete mode 100644 operators/operator_sdk/ansible/osdk-ansible-inside-operator.adoc delete mode 100644 operators/operator_sdk/ansible/osdk-ansible-k8s-collection.adoc delete mode 100644 operators/operator_sdk/ansible/osdk-ansible-project-layout.adoc delete mode 100644 operators/operator_sdk/ansible/osdk-ansible-quickstart.adoc delete mode 100644 operators/operator_sdk/ansible/osdk-ansible-support.adoc delete mode 100644 operators/operator_sdk/ansible/osdk-ansible-tutorial.adoc delete mode 100644 operators/operator_sdk/ansible/osdk-ansible-updating-projects.adoc delete mode 120000 operators/operator_sdk/ansible/snippets delete mode 120000 operators/operator_sdk/golang/_attributes delete mode 120000 operators/operator_sdk/golang/images delete mode 120000 operators/operator_sdk/golang/modules delete mode 100644 operators/operator_sdk/golang/osdk-golang-project-layout.adoc delete mode 100644 operators/operator_sdk/golang/osdk-golang-quickstart.adoc delete mode 100644 operators/operator_sdk/golang/osdk-golang-tutorial.adoc delete mode 100644 operators/operator_sdk/golang/osdk-golang-updating-projects.adoc delete mode 120000 operators/operator_sdk/golang/snippets delete mode 120000 operators/operator_sdk/helm/_attributes delete mode 120000 operators/operator_sdk/helm/images delete mode 120000 operators/operator_sdk/helm/modules delete mode 100644 operators/operator_sdk/helm/osdk-helm-project-layout.adoc delete mode 100644 operators/operator_sdk/helm/osdk-helm-quickstart.adoc delete mode 100644 operators/operator_sdk/helm/osdk-helm-support.adoc delete mode 100644 operators/operator_sdk/helm/osdk-helm-tutorial.adoc delete mode 100644 operators/operator_sdk/helm/osdk-helm-updating-projects.adoc delete mode 120000 operators/operator_sdk/helm/snippets delete mode 100644 operators/operator_sdk/osdk-about.adoc delete mode 100644 operators/operator_sdk/osdk-bundle-validate.adoc delete mode 100644 operators/operator_sdk/osdk-cli-ref.adoc delete mode 100644 operators/operator_sdk/osdk-complying-with-psa.adoc delete mode 100644 operators/operator_sdk/osdk-generating-csvs.adoc delete mode 100644 operators/operator_sdk/osdk-ha-sno.adoc delete mode 100644 operators/operator_sdk/osdk-installing-cli.adoc delete mode 100644 operators/operator_sdk/osdk-leader-election.adoc delete mode 100644 operators/operator_sdk/osdk-migrating-to-v0-1-0.adoc delete mode 100644 operators/operator_sdk/osdk-monitoring-prometheus.adoc delete mode 100644 operators/operator_sdk/osdk-multi-arch-support.adoc delete mode 100644 operators/operator_sdk/osdk-pkgman-to-bundle.adoc delete mode 100644 operators/operator_sdk/osdk-pruning-utility.adoc delete mode 100644 operators/operator_sdk/osdk-scorecard.adoc delete mode 100644 operators/operator_sdk/osdk-working-bundle-images.adoc delete mode 100644 snippets/osdk-deprecation.adoc diff --git a/_attributes/common-attributes.adoc b/_attributes/common-attributes.adoc index 68c80a4db2..d73b60b2af 100644 --- a/_attributes/common-attributes.adoc +++ b/_attributes/common-attributes.adoc @@ -224,10 +224,6 @@ endif::[] // logical volume manager storage :lvms-first: Logical Volume Manager (LVM) Storage :lvms: LVM Storage -//Operator SDK version -:osdk_ver: 1.38.0 -//Operator SDK version that shipped with the previous OCP 4.x release -:osdk_ver_n1: 1.36.1 //Version-agnostic OLM :olm-first: Operator Lifecycle Manager (OLM) :olm: OLM diff --git a/_topic_maps/_topic_map.yml b/_topic_maps/_topic_map.yml index b46306975b..286fac81ef 100644 --- a/_topic_maps/_topic_map.yml +++ b/_topic_maps/_topic_map.yml @@ -1008,14 +1008,6 @@ Topics: File: cli-opm-install - Name: opm CLI reference File: cli-opm-ref -- Name: Operator SDK - Dir: osdk - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Installing the Operator SDK CLI - File: cli-osdk-install - - Name: Operator SDK CLI reference - File: cli-osdk-ref --- Name: Security and compliance Dir: security @@ -2006,59 +1998,6 @@ Topics: Dir: operator_sdk Distros: openshift-origin,openshift-enterprise Topics: - - Name: About the Operator SDK - File: osdk-about - - Name: Installing the Operator SDK CLI - File: osdk-installing-cli - - Name: Go-based Operators - Dir: golang - Topics: - - Name: Getting started - File: osdk-golang-quickstart - - Name: Tutorial - File: osdk-golang-tutorial - - Name: Project layout - File: osdk-golang-project-layout - - Name: Updating Go-based projects - File: osdk-golang-updating-projects - - Name: Ansible-based Operators - Dir: ansible - Topics: - - Name: Getting started - File: osdk-ansible-quickstart - - Name: Tutorial - File: osdk-ansible-tutorial - - Name: Project layout - File: osdk-ansible-project-layout - - Name: Updating Ansible-based projects - File: osdk-ansible-updating-projects - - Name: Ansible support - File: osdk-ansible-support - - Name: Kubernetes Collection for Ansible - File: osdk-ansible-k8s-collection - - Name: Using Ansible inside an Operator - File: osdk-ansible-inside-operator - - Name: Custom resource status management - File: osdk-ansible-cr-status - - Name: Helm-based Operators - Dir: helm - Topics: - - Name: Getting started - File: osdk-helm-quickstart - - Name: Tutorial - File: osdk-helm-tutorial - - Name: Project layout - File: osdk-helm-project-layout - - Name: Updating Helm-based projects - File: osdk-helm-updating-projects - - Name: Helm support - File: osdk-helm-support - - Name: Defining cluster service versions (CSVs) - File: osdk-generating-csvs - - Name: Working with bundle images - File: osdk-working-bundle-images - - Name: Complying with pod security admission - File: osdk-complying-with-psa - Name: Token authentication Dir: token_auth Topics: @@ -2070,27 +2009,6 @@ Topics: File: osdk-cco-azure - Name: CCO-based workflow for OLM-managed Operators with GCP Workload Identity File: osdk-cco-gcp - - Name: Validating Operators using the scorecard - File: osdk-scorecard - - Name: Validating Operator bundles - File: osdk-bundle-validate - - Name: High-availability or single-node cluster detection and support - File: osdk-ha-sno - - Name: Configuring built-in monitoring with Prometheus - File: osdk-monitoring-prometheus - - Name: Configuring leader election - File: osdk-leader-election - - Name: Configuring support for multiple platforms - File: osdk-multi-arch-support - - Name: Object pruning utility - File: osdk-pruning-utility - - Name: Migrating package manifest projects to bundle format - File: osdk-pkgman-to-bundle - - Name: Operator SDK CLI reference - File: osdk-cli-ref - - Name: Migrating to Operator SDK v0.1.0 - File: osdk-migrating-to-v0-1-0 - Distros: openshift-origin - Name: Cluster Operators reference File: operator-reference - Name: OLM v1 diff --git a/_topic_maps/_topic_map_osd.yml b/_topic_maps/_topic_map_osd.yml index d0319ac6a2..cd9d1b849e 100644 --- a/_topic_maps/_topic_map_osd.yml +++ b/_topic_maps/_topic_map_osd.yml @@ -345,14 +345,6 @@ Topics: File: cli-opm-install - Name: opm CLI reference File: cli-opm-ref -- Name: Operator SDK - Dir: osdk - Distros: openshift-dedicated - Topics: - - Name: Installing the Operator SDK CLI - File: cli-osdk-install - - Name: Operator SDK CLI reference - File: cli-osdk-ref --- Name: Cluster administration Dir: osd_cluster_admin @@ -732,83 +724,6 @@ Topics: File: olm-cs-podsched - Name: Troubleshooting Operator issues File: olm-troubleshooting-operator-issues -- Name: Developing Operators - Dir: operator_sdk - Topics: - - Name: About the Operator SDK - File: osdk-about - - Name: Installing the Operator SDK CLI - File: osdk-installing-cli - - Name: Go-based Operators - Dir: golang - Topics: -# Quick start excluded, because it requires cluster-admin permissions. -# - Name: Getting started -# File: osdk-golang-quickstart - - Name: Tutorial - File: osdk-golang-tutorial - - Name: Project layout - File: osdk-golang-project-layout - - Name: Updating Go-based projects - File: osdk-golang-updating-projects - - Name: Ansible-based Operators - Dir: ansible - Topics: -# Quick start excluded, because it requires cluster-admin permissions. -# - Name: Getting started -# File: osdk-ansible-quickstart - - Name: Tutorial - File: osdk-ansible-tutorial - - Name: Project layout - File: osdk-ansible-project-layout - - Name: Updating Ansible-based projects - File: osdk-ansible-updating-projects - - Name: Ansible support - File: osdk-ansible-support - - Name: Kubernetes Collection for Ansible - File: osdk-ansible-k8s-collection - - Name: Using Ansible inside an Operator - File: osdk-ansible-inside-operator - - Name: Custom resource status management - File: osdk-ansible-cr-status - - Name: Helm-based Operators - Dir: helm - Topics: -# Quick start excluded, because it requires cluster-admin permissions. -# - Name: Getting started -# File: osdk-helm-quickstart - - Name: Tutorial - File: osdk-helm-tutorial - - Name: Project layout - File: osdk-helm-project-layout - - Name: Updating Helm-based projects - File: osdk-helm-updating-projects - - Name: Helm support - File: osdk-helm-support - - Name: Defining cluster service versions (CSVs) - File: osdk-generating-csvs - - Name: Working with bundle images - File: osdk-working-bundle-images - - Name: Complying with pod security admission - File: osdk-complying-with-psa - - Name: Validating Operators using the scorecard - File: osdk-scorecard - - Name: Validating Operator bundles - File: osdk-bundle-validate - - Name: High-availability or single-node cluster detection and support - File: osdk-ha-sno - - Name: Configuring built-in monitoring with Prometheus - File: osdk-monitoring-prometheus - - Name: Configuring leader election - File: osdk-leader-election - - Name: Object pruning utility - File: osdk-pruning-utility - - Name: Migrating package manifest projects to bundle format - File: osdk-pkgman-to-bundle - - Name: Operator SDK CLI reference - File: osdk-cli-ref - - Name: Migrating to Operator SDK v0.1.0 - File: osdk-migrating-to-v0-1-0 # ROSA customers can't configure/edit the cluster Operators # - Name: Cluster Operators reference # File: operator-reference diff --git a/_topic_maps/_topic_map_rosa.yml b/_topic_maps/_topic_map_rosa.yml index 813d97ff28..2607979638 100644 --- a/_topic_maps/_topic_map_rosa.yml +++ b/_topic_maps/_topic_map_rosa.yml @@ -547,14 +547,6 @@ Topics: File: cli-opm-install - Name: opm CLI reference File: cli-opm-ref -- Name: Operator SDK - Dir: osdk - Distros: openshift-rosa - Topics: - - Name: Installing the Operator SDK CLI - File: cli-osdk-install - - Name: Operator SDK CLI reference - File: cli-osdk-ref - Name: ROSA CLI Dir: rosa_cli Distros: openshift-rosa @@ -981,83 +973,6 @@ Topics: File: olm-cs-podsched - Name: Troubleshooting Operator issues File: olm-troubleshooting-operator-issues -- Name: Developing Operators - Dir: operator_sdk - Topics: - - Name: About the Operator SDK - File: osdk-about - - Name: Installing the Operator SDK CLI - File: osdk-installing-cli - - Name: Go-based Operators - Dir: golang - Topics: -# Quick start excluded, because it requires cluster-admin permissions. -# - Name: Getting started -# File: osdk-golang-quickstart - - Name: Tutorial - File: osdk-golang-tutorial - - Name: Project layout - File: osdk-golang-project-layout - - Name: Updating Go-based projects - File: osdk-golang-updating-projects - - Name: Ansible-based Operators - Dir: ansible - Topics: -# Quick start excluded, because it requires cluster-admin permissions. -# - Name: Getting started -# File: osdk-ansible-quickstart - - Name: Tutorial - File: osdk-ansible-tutorial - - Name: Project layout - File: osdk-ansible-project-layout - - Name: Updating Ansible-based projects - File: osdk-ansible-updating-projects - - Name: Ansible support - File: osdk-ansible-support - - Name: Kubernetes Collection for Ansible - File: osdk-ansible-k8s-collection - - Name: Using Ansible inside an Operator - File: osdk-ansible-inside-operator - - Name: Custom resource status management - File: osdk-ansible-cr-status - - Name: Helm-based Operators - Dir: helm - Topics: -# Quick start excluded, because it requires cluster-admin permissions. -# - Name: Getting started -# File: osdk-helm-quickstart - - Name: Tutorial - File: osdk-helm-tutorial - - Name: Project layout - File: osdk-helm-project-layout - - Name: Updating Helm-based projects - File: osdk-helm-updating-projects - - Name: Helm support - File: osdk-helm-support - - Name: Defining cluster service versions (CSVs) - File: osdk-generating-csvs - - Name: Working with bundle images - File: osdk-working-bundle-images - - Name: Complying with pod security admission - File: osdk-complying-with-psa - - Name: Validating Operators using the scorecard - File: osdk-scorecard - - Name: Validating Operator bundles - File: osdk-bundle-validate - - Name: High-availability or single-node cluster detection and support - File: osdk-ha-sno - - Name: Configuring built-in monitoring with Prometheus - File: osdk-monitoring-prometheus - - Name: Configuring leader election - File: osdk-leader-election - - Name: Object pruning utility - File: osdk-pruning-utility - - Name: Migrating package manifest projects to bundle format - File: osdk-pkgman-to-bundle - - Name: Operator SDK CLI reference - File: osdk-cli-ref - - Name: Migrating to Operator SDK v0.1.0 - File: osdk-migrating-to-v0-1-0 # ROSA customers can't configure/edit the cluster Operators # - Name: Cluster Operators reference # File: operator-reference diff --git a/_topic_maps/_topic_map_rosa_hcp.yml b/_topic_maps/_topic_map_rosa_hcp.yml index 74dc38b30c..cbbe95e74e 100644 --- a/_topic_maps/_topic_map_rosa_hcp.yml +++ b/_topic_maps/_topic_map_rosa_hcp.yml @@ -304,14 +304,6 @@ Topics: File: cli-opm-install - Name: opm CLI reference File: cli-opm-ref -- Name: Operator SDK - Dir: osdk - Distros: openshift-rosa-hcp - Topics: - - Name: Installing the Operator SDK CLI - File: cli-osdk-install - - Name: Operator SDK CLI reference - File: cli-osdk-ref - Name: ROSA CLI Dir: rosa_cli Distros: openshift-rosa-hcp @@ -752,98 +744,6 @@ Topics: File: olm-cs-podsched - Name: Troubleshooting Operator issues File: olm-troubleshooting-operator-issues -- Name: Developing Operators - Dir: operator_sdk - Topics: - - Name: About the Operator SDK - File: osdk-about - - Name: Installing the Operator SDK CLI - File: osdk-installing-cli - - Name: Go-based Operators - Dir: golang - Topics: -# Quick start excluded, because it requires cluster-admin permissions. -# - Name: Getting started -# File: osdk-golang-quickstart - - Name: Tutorial - File: osdk-golang-tutorial - - Name: Project layout - File: osdk-golang-project-layout - - Name: Updating Go-based projects - File: osdk-golang-updating-projects - - Name: Ansible-based Operators - Dir: ansible - Topics: -# Quick start excluded, because it requires cluster-admin permissions. -# - Name: Getting started -# File: osdk-ansible-quickstart - - Name: Tutorial - File: osdk-ansible-tutorial - - Name: Project layout - File: osdk-ansible-project-layout - - Name: Updating Ansible-based projects - File: osdk-ansible-updating-projects - - Name: Ansible support - File: osdk-ansible-support - - Name: Kubernetes Collection for Ansible - File: osdk-ansible-k8s-collection - - Name: Using Ansible inside an Operator - File: osdk-ansible-inside-operator - - Name: Custom resource status management - File: osdk-ansible-cr-status - - Name: Helm-based Operators - Dir: helm - Topics: -# Quick start excluded, because it requires cluster-admin permissions. -# - Name: Getting started -# File: osdk-helm-quickstart - - Name: Tutorial - File: osdk-helm-tutorial - - Name: Project layout - File: osdk-helm-project-layout - - Name: Updating Helm-based projects - File: osdk-helm-updating-projects - - Name: Helm support - File: osdk-helm-support -# - Name: Hybrid Helm Operator <= Tech Preview -# File: osdk-hybrid-helm -# - Name: Updating Hybrid Helm-based projects (Technology Preview) -# File: osdk-hybrid-helm-updating-projects -# - Name: Java-based Operators <= Tech Preview -# Dir: java -# Topics: -# - Name: Getting started -# File: osdk-java-quickstart -# - Name: Tutorial -# File: osdk-java-tutorial -# - Name: Project layout -# File: osdk-java-project-layout -# - Name: Updating Java-based projects -# File: osdk-java-updating-projects - - Name: Defining cluster service versions (CSVs) - File: osdk-generating-csvs - - Name: Working with bundle images - File: osdk-working-bundle-images - - Name: Complying with pod security admission - File: osdk-complying-with-psa - - Name: Validating Operators using the scorecard - File: osdk-scorecard - - Name: Validating Operator bundles - File: osdk-bundle-validate - - Name: High-availability or single-node cluster detection and support - File: osdk-ha-sno - - Name: Configuring built-in monitoring with Prometheus - File: osdk-monitoring-prometheus - - Name: Configuring leader election - File: osdk-leader-election - - Name: Object pruning utility - File: osdk-pruning-utility - - Name: Migrating package manifest projects to bundle format - File: osdk-pkgman-to-bundle - - Name: Operator SDK CLI reference - File: osdk-cli-ref - - Name: Migrating to Operator SDK v0.1.0 - File: osdk-migrating-to-v0-1-0 # ROSA customers can't configure/edit the cluster Operators # - Name: Cluster Operators reference # File: operator-reference diff --git a/_unused_topics/osdk-updating-v1101-to-v1160.adoc b/_unused_topics/osdk-updating-v1101-to-v1160.adoc deleted file mode 100644 index 61484e0e94..0000000000 --- a/_unused_topics/osdk-updating-v1101-to-v1160.adoc +++ /dev/null @@ -1,195 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-upgrading-projects.adoc - -:osdk_ver: v1.16.0 -:osdk_ver_n1: v1.10.1 - -:_mod-docs-content-type: PROCEDURE -[id="osdk-upgrading-v1101-to-v1160_{context}"] -= Updating projects for Operator SDK {osdk_ver} - -The following procedure updates an existing Operator project for compatibility with {osdk_ver}. - -[IMPORTANT] -==== -* Operator SDK v1.16.0 supports Kubernetes 1.22. - -* Many deprecated `v1beta1` APIs were removed in Kubernetes 1.22, including `sigs.k8s.io/controller-runtime v0.10.0` and `controller-gen v0.7`. - -* Updating projects to Kubernetes 1.22 is a breaking change if you need to scaffold `v1beta1` APIs for custom resource definitions (CRDs) or webhooks to publish your project into older cluster versions. - -See link:https://docs.openshift.com/container-platform/4.9/release_notes/ocp-4-9-release-notes.html#ocp-4-9-osdk-k8s-api-bundle-validate[Validating bundle manifests for APIs removed from Kubernetes 1.22] and link:https://docs.openshift.com/container-platform/4.9/release_notes/ocp-4-9-release-notes.html#ocp-4-9-removed-kube-1-22-apis[Beta APIs removed from Kubernetes 1.22] for more information about changes introduced in Kubernetes 1.22. -==== - -.Prerequisites - -* Operator SDK {osdk_ver} installed. -* An Operator project created or maintained with Operator SDK {osdk_ver_n1}. - -.Procedure - -. Add the `protocol` field in the `config/default/manager_auth_proxy_patch.yaml` and `config/rbac/auth_proxy_service.yaml` files: -+ -[source,diff] ----- -... - ports: - - containerPort: 8443 -+ protocol: TCP - name: https ----- - -. Make the following changes to the `config/manager/manager.yaml` file: - -.. Increase the CPU and memory resource limits: -+ -[source,diff] ----- -resources: - limits: -- cpu: 100m -- memory: 30Mi -+ cpu: 200m -+ memory: 100Mi ----- - -.. Add an annotation to specify the default container manager: -+ -[source,yaml] ----- -... -template: - metadata: - annotations: - kubectl.kubernetes.io/default-container: manager -... ----- - -. Add `PHONY` targets to all of the targets in your `Makefile` file. - -. For Go-based Operator projects, make the following changes: - -.. Install the `setup-envtest` binary. - -.. Change your `go.mod` file to update the dependencies: -+ -[source,golang] ----- -k8s.io/api v0.22.1 -k8s.io/apimachinery v0.22.1 -k8s.io/client-go v0.22.1 -sigs.k8s.io/controller-runtime v0.10.0 ----- - -.. Run the `go mod tidy` command to download the dependencies: -+ -[source,terminal] ----- -$ go mod tidy ----- - -.. Make the following changes to your `Makefile` file: -+ -[source,diff] ----- -... - -+ ENVTEST_K8S_VERSION = 1.22 - - test: manifests generate fmt vet envtest ## Run tests. -- go test ./... -coverprofile cover.out -+ KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test ./... -coverprofile cover.out -... - -- $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases -+ $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases -... - -# Produce CRDs that work back to Kubernetes 1.11 (no version conversion) -- CRD_OPTIONS ?= "crd:trivialVersions=true,preserveUnknownFields=false" -... -- admissionReviewVersions={v1,v1beta1} -+ admissionReviewVersions=v1 -... - -+ ifndef ignore-not-found -+ ignore-not-found = false -+ endif - -##@ Deployment -... -- sh kubectl delete -f - -+ sh kubectl delete --ignore-not-found=$(ignore-not-found) -f - ----- - -.. Run the `make manifest` command to generate your manifests with the updated version of Kubernetes: -+ -[source,terminal] ----- -$ make manifest ----- - -. For Ansible-based Operator projects, make the following changes: -+ -.. Change your `requirements.yml` file to include the following: - -... Replace the `community.kubernetes` collection with the `kubernetes.core` collection: -+ -[source,yaml] ----- -... -- name: kubernetes.core - version: "2.2.0" -... ----- - -... Update the `operator_sdk.util` utility from version `0.2.0` to `0.3.1`: -+ -[source,yaml] ----- -... -- name: operator_sdk.util - version: "0.3.1" ----- - -.. Verify the default resource limits in the `config/manager/manager.yaml` file: -+ -[source,yaml] ----- -... - # TODO(user): Configure the resources accordingly based on the project requirements. - # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - -resources: - limits: - cpu: 500m - memory: 768Mi - requests: - cpu: 10m - memory: 256Mi ----- -+ -[IMPORTANT] -==== -Operator SDK scaffolds these values as a reasonable default setting. Operator authors should set and optimize resource limits based on the requirements of their project. -==== - -.. Optional: Make the following changes if you want to run your Ansible-based Operator locally by using the `make run` command: - -... Change the run target in the `Makefile` file: -+ -[source,terminal] ----- -ANSIBLE_ROLES_PATH="$(ANSIBLE_ROLES_PATH):$(shell pwd)/roles" $(ANSIBLE_OPERATOR) run ----- - -... Update the local version of `ansible-runner` to 2.0.2 or later. -+ -[IMPORTANT] -==== -As of version 2.0, the `ansible-runner` tool includes changes in the command signature that are not compatible with earlier versions. -==== - -:!osdk_ver: -:!osdk_ver_n1: diff --git a/_unused_topics/osdk-updating-v125-to-v128.adoc b/_unused_topics/osdk-updating-v125-to-v128.adoc deleted file mode 100644 index 1bbbe0a635..0000000000 --- a/_unused_topics/osdk-updating-v125-to-v128.adoc +++ /dev/null @@ -1,130 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/golang/osdk-golang-updating-projects.adoc -// * operators/operator_sdk/ansible/osdk-ansible-updating-projects.adoc -// * operators/operator_sdk/helm/osdk-helm-updating-projects.adoc -// * operators/operator_sdk/helm/ - -ifeval::["{context}" == "osdk-golang-updating-projects"] -:golang: -:type: Go -endif::[] -ifeval::["{context}" == "osdk-ansible-updating-projects"] -:ansible: -:type: Ansible -endif::[] -ifeval::["{context}" == "osdk-helm-updating-projects"] -:helm: -:type: Helm -endif::[] - -:_mod-docs-content-type: PROCEDURE -[id="osdk-upgrading-projects_{context}"] -= Updating {type}-based Operator projects for Operator SDK {osdk_ver} - -The following procedure updates an existing {type}-based Operator project for compatibility with {osdk_ver}. - -.Prerequisites - -* Operator SDK {osdk_ver} installed -* An Operator project created or maintained with Operator SDK {osdk_ver_n1} - -.Procedure - -ifdef::helm[] -* Find the `ose-kube-rbac-proxy` pull spec in the following files, and update the image tag to `v4.14`: -endif::[] -ifdef::ansible,golang[] -. Find the `ose-kube-rbac-proxy` pull spec in the following files, and update the image tag to `v4.14`: -endif::[] -+ --- -* `config/default/manager_auth_proxy_patch.yaml` -* `bundle/manifests/memcached-operator.clusterserviceversion.yaml` --- -+ -[source,yaml] ----- -… - containers: - - name: kube-rbac-proxy - image: registry.redhat.io/openshift4/ose-kube-rbac-proxy:v4.14 <1> -… ----- -<1> Update the tag version from `v4.13` to `v4.14`. - -ifdef::ansible[] -. Update your Makefile's `run` target to the following: -+ -[source,make] ----- -.PHONY: run -ANSIBLE_ROLES_PATH?="$(shell pwd)/roles" -run: ansible-operator ## Run against the configured Kubernetes cluster in ~/.kube/config - $(ANSIBLE_OPERATOR) run ----- - -. To upgrade the `kubernetes.core` collection to v2.4.0, replace the following in your project's `requirements.yaml` file: -+ -[source,yaml] ----- - - name: kubernetes.core - version: "2.3.1" ----- -+ -with: -+ -[source,yaml] ----- - - name: kubernetes.core - version: "2.4.0" ----- -endif::[] - -ifdef::golang[] -. Modify your `go.mod` file to include the following dependencies and updated versions: -+ -[source,go] ----- -k8s.io/api v0.26.2 -k8s.io/apiextensions-apiserver v0.26.2 -k8s.io/apimachinery v0.26.2 -k8s.io/cli-runtime v0.26.2 -k8s.io/client-go v0.26.2 -k8s.io/kubectl v0.26.2 -sigs.k8s.io/controller-runtime v0.14.5 -sigs.k8s.io/controller-tools v0.11.3 -sigs.k8s.io/kubebuilder/v3 v3.9.1 ----- - -. Download the latest dependencies by running the following command: -+ -[source,terminal] ----- -$ go mod tidy ----- - -. Modify your Makefile with the following changes: - -.. Change the `ENVTEST_K8S_VERSION` field from `1.26` to `1.27`. -.. Change the `build` target from `generate fmt vet` to `manifests generate fmt vet`: -+ -[source,diff] ----- - - build: generate fmt vet ## Build manager binary. - + build: manifests generate fmt vet ## Build manager binary. ----- -endif::[] - -ifeval::["{context}" == "osdk-golang-updating-projects"] -:!golang: -:!type: -endif::[] -ifeval::["{context}" == "osdk-ansible-updating-projects"] -:!ansible: -:!type: -endif::[] -ifeval::["{context}" == "osdk-helm-updating-projects"] -:!helm: -:!type: -endif::[] \ No newline at end of file diff --git a/_unused_topics/osdk-upgrading-v180-to-v1101.adoc b/_unused_topics/osdk-upgrading-v180-to-v1101.adoc deleted file mode 100644 index 89e8643443..0000000000 --- a/_unused_topics/osdk-upgrading-v180-to-v1101.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-upgrading-projects.adoc - -:osdk_ver: v1.10.1 -:osdk_ver_n1: v1.8.0 - -:_mod-docs-content-type: PROCEDURE -[id="osdk-upgrading-v180-to-v1101_{context}"] -= Upgrading projects for Operator SDK {osdk_ver} - -The following upgrade steps must be performed to upgrade an existing Operator project for compatibility with {osdk_ver}. - -.Prerequisites - -- Operator SDK {osdk_ver} installed -- Operator project that was previously created or maintained with Operator SDK {osdk_ver_n1} - -.Procedure - -* For Ansible-based Operator projects, update the command in the `Set pull policy` section of the `molecule/default/prepare.yml` file: -+ -.`molecule/default/prepare.yml` file diff -[%collapsible] -==== -[source,diff] ----- - - name: Set pull policy -- command: '{{ "{{ kustomize }}" }} edit add patch pull_policy/{{ "{{ operator_pull_policy }}" }}.yaml' -+ command: '{{ "{{ kustomize }}" }} edit add patch --path pull_policy/{{ "{{ operator_pull_policy }}" }}.yaml' ----- -==== -+ -Ansible projects are now scaffolded with Kustomize version 3.8.7. This version of Kustomize requires that the path to patch files be provided with the `--path` flag in the `add patch` command. - -Your Operator project is now compatible with Operator SDK {osdk_ver}. - -:!osdk_ver: -:!osdk_ver_n1: diff --git a/architecture/control-plane.adoc b/architecture/control-plane.adoc index fb2fa4b8e0..4d68160fc8 100644 --- a/architecture/control-plane.adoc +++ b/architecture/control-plane.adoc @@ -54,8 +54,8 @@ include::modules/arch-olm-operators.adoc[leveloffset=+2] [role="_additional-resources"] .Additional resources -* For more details on running add-on Operators in {product-title}, see the _Operators_ guide sections on xref:../operators/understanding/olm/olm-understanding-olm.adoc#olm-understanding-olm[Operator Lifecycle Manager (OLM)] and xref:../operators/understanding/olm-understanding-operatorhub.adoc#olm-understanding-operatorhub[OperatorHub]. -* For more details on the Operator SDK, see xref:../operators/operator_sdk/osdk-about.adoc#osdk-about[Developing Operators]. +* xref:../operators/understanding/olm/olm-understanding-olm.adoc#olm-understanding-olm[Operator Lifecycle Manager (OLM) concepts and resources] +* xref:../operators/understanding/olm-understanding-operatorhub.adoc#olm-understanding-operatorhub[Understanding OperatorHub]. include::modules/etcd-overview.adoc[leveloffset=+1] @@ -71,4 +71,4 @@ ifndef::openshift-dedicated,openshift-rosa[] .Additional resources * xref:../scalability_and_performance/recommended-performance-scale-practices/recommended-etcd-practices.adoc#recommended-etcd-practices[Recommended etcd practices] * xref:../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backing-up-etcd[Backing up etcd] -endif::openshift-dedicated,openshift-rosa[] \ No newline at end of file +endif::openshift-dedicated,openshift-rosa[] diff --git a/cli_reference/index.adoc b/cli_reference/index.adoc index 4c3eeb9190..787c69ea6f 100644 --- a/cli_reference/index.adoc +++ b/cli_reference/index.adoc @@ -15,7 +15,6 @@ such as the following: * Managing clusters * Building, deploying, and managing applications * Managing deployment processes -* Developing Operators * Creating and maintaining Operator catalogs ifndef::openshift-rosa[] @@ -60,8 +59,6 @@ using the terminal. Unlike the web console, it allows the user to work directly * xref:../cli_reference/opm/cli-opm-install.adoc#cli-opm-install[opm CLI]: The `opm` CLI tool helps the Operator developers and cluster administrators to create and maintain the catalogs of Operators from the terminal. -* xref:../cli_reference/osdk/cli-osdk-install.adoc#cli-osdk-install[Operator SDK]: The Operator SDK, a component of the Operator Framework, provides a CLI tool that Operator developers can use to build, test, and deploy an Operator from the terminal. It simplifies the process of building Kubernetes-native applications, which can require deep, application-specific operational knowledge. - ifdef::openshift-rosa,openshift-rosa-hcp[] * xref:../cli_reference/rosa_cli/rosa-get-started-cli.adoc#rosa-get-started-cli[ROSA CLI (`rosa`)]: Use the `rosa` CLI to create, update, manage, and delete ROSA clusters and resources. -endif::openshift-rosa,openshift-rosa-hcp[] \ No newline at end of file +endif::openshift-rosa,openshift-rosa-hcp[] diff --git a/cli_reference/opm/cli-opm-install.adoc b/cli_reference/opm/cli-opm-install.adoc index 8709cefc1a..fe80190213 100644 --- a/cli_reference/opm/cli-opm-install.adoc +++ b/cli_reference/opm/cli-opm-install.adoc @@ -13,8 +13,7 @@ ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] .Additional resources * See xref:../../operators/understanding/olm-packaging-format.adoc#olm-bundle-format_olm-packaging-format[Operator Framework packaging format] for more information about the bundle format. -* To create a bundle image using the Operator SDK, see -xref:../../operators/operator_sdk/osdk-working-bundle-images.adoc#osdk-working-bundle-images[Working with bundle images]. + endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] include::modules/olm-installing-opm.adoc[leveloffset=+1] @@ -24,4 +23,4 @@ ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] == Additional resources * See xref:../../operators/admin/olm-managing-custom-catalogs.adoc#olm-managing-custom-catalogs[Managing custom catalogs] for `opm` procedures including creating, updating, and pruning catalogs. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] \ No newline at end of file +endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] diff --git a/cli_reference/osdk/_attributes b/cli_reference/osdk/_attributes deleted file mode 120000 index 20cc1dcb77..0000000000 --- a/cli_reference/osdk/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/cli_reference/osdk/cli-osdk-install.adoc b/cli_reference/osdk/cli-osdk-install.adoc deleted file mode 100644 index 41819e53df..0000000000 --- a/cli_reference/osdk/cli-osdk-install.adoc +++ /dev/null @@ -1,25 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="cli-osdk-install"] -= Installing the Operator SDK CLI -include::_attributes/common-attributes.adoc[] -:context: cli-osdk-install - -toc::[] - -The Operator SDK provides a command-line interface (CLI) tool that Operator developers can use to build, test, and deploy an Operator. You can install the Operator SDK CLI on your workstation so that you are prepared to start authoring your own Operators. - -include::snippets/osdk-deprecation.adoc[] - -Operator authors with cluster administrator access to a Kubernetes-based cluster, such as {product-title}, can use the Operator SDK CLI to develop their own Operators based on Go, Ansible, Java, or Helm. link:https://kubebuilder.io/[Kubebuilder] is embedded into the Operator SDK as the scaffolding solution for Go-based Operators, which means existing Kubebuilder projects can be used as is with the Operator SDK and continue to work. -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -See xref:../../operators/operator_sdk/osdk-about.adoc#osdk-about[Developing Operators] for full documentation on the Operator SDK. - -[NOTE] -==== -{product-title} {product-version} supports Operator SDK {osdk_ver}. -==== -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] - -include::modules/osdk-installing-cli-linux-macos.adoc[leveloffset=+1] - -include::modules/osdk-installing-cli-macos.adoc[leveloffset=+1] diff --git a/cli_reference/osdk/cli-osdk-ref.adoc b/cli_reference/osdk/cli-osdk-ref.adoc deleted file mode 100644 index 2a475f03ac..0000000000 --- a/cli_reference/osdk/cli-osdk-ref.adoc +++ /dev/null @@ -1,56 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="cli-osdk-ref"] -= Operator SDK CLI reference -include::_attributes/common-attributes.adoc[] -:context: cli-osdk-ref - -toc::[] - -The Operator SDK command-line interface (CLI) is a development kit designed to make writing Operators easier. - -include::snippets/osdk-deprecation.adoc[] - -.Operator SDK CLI syntax -[source,terminal] ----- -$ operator-sdk [] [] [] ----- -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -See xref:../../operators/operator_sdk/osdk-about.adoc#osdk-about[Developing Operators] for full documentation on the Operator SDK. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] - -include::modules/osdk-cli-ref-bundle.adoc[leveloffset=+1] -include::modules/osdk-cli-ref-cleanup.adoc[leveloffset=+1] -include::modules/osdk-cli-ref-completion.adoc[leveloffset=+1] -include::modules/osdk-cli-ref-create.adoc[leveloffset=+1] -include::modules/osdk-cli-ref-generate.adoc[leveloffset=+1] -include::modules/osdk-cli-ref-generate-bundle.adoc[leveloffset=+2] - -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -[role="_additional-resources"] -.Additional resources - -* See xref:../../operators/operator_sdk/osdk-working-bundle-images.adoc#osdk-bundle-deploy-olm_osdk-working-bundle-images[Bundling an Operator and deploying with Operator Lifecycle Manager] for a full procedure that includes using the `make bundle` command to call the `generate bundle` subcommand. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] - -include::modules/osdk-cli-ref-generate-kustomize.adoc[leveloffset=+2] - -include::modules/osdk-cli-ref-init.adoc[leveloffset=+1] -include::modules/osdk-cli-ref-run.adoc[leveloffset=+1] -include::modules/osdk-cli-ref-run-bundle.adoc[leveloffset=+2] -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] -[role="_additional-resources"] -.Additional resources - -* See xref:../../operators/understanding/olm/olm-understanding-operatorgroups.adoc#olm-operatorgroups-membership_olm-understanding-operatorgroups[Operator group membership] for details on possible install modes. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] - -include::modules/osdk-cli-ref-run-bundle-upgrade.adoc[leveloffset=+2] -include::modules/osdk-cli-ref-scorecard.adoc[leveloffset=+1] -ifndef::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../operators/operator_sdk/osdk-scorecard.adoc#osdk-scorecard[Validating Operators using the scorecard tool] for details about running the scorecard tool. -endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] diff --git a/cli_reference/osdk/images b/cli_reference/osdk/images deleted file mode 120000 index 847b03ed05..0000000000 --- a/cli_reference/osdk/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/cli_reference/osdk/modules b/cli_reference/osdk/modules deleted file mode 120000 index 36719b9de7..0000000000 --- a/cli_reference/osdk/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/cli_reference/osdk/snippets b/cli_reference/osdk/snippets deleted file mode 120000 index 5a3f5add14..0000000000 --- a/cli_reference/osdk/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/disconnected/mirroring/installing-mirroring-installation-images.adoc b/disconnected/mirroring/installing-mirroring-installation-images.adoc index 7b0c50c46f..9264d26baa 100644 --- a/disconnected/mirroring/installing-mirroring-installation-images.adoc +++ b/disconnected/mirroring/installing-mirroring-installation-images.adoc @@ -119,16 +119,8 @@ $ REG_CREDS=${XDG_RUNTIME_DIR}/containers/auth.json include::modules/olm-mirroring-catalog-extracting.adoc[leveloffset=+2] include::modules/olm-mirroring-catalog-colocated.adoc[leveloffset=+3] -[role="_additional-resources"] -.Additional resources -* xref:../../operators/operator_sdk/osdk-generating-csvs.adoc#olm-arch-os-support_osdk-generating-csvs[Architecture and operating system support for Operators] - include::modules/olm-mirroring-catalog-airgapped.adoc[leveloffset=+3] -[role="_additional-resources"] -.Additional resources -* xref:../../operators/operator_sdk/osdk-generating-csvs.adoc#olm-arch-os-support_osdk-generating-csvs[Architecture and operating system support for Operators] - include::modules/olm-mirroring-catalog-manifests.adoc[leveloffset=+2] include::modules/olm-mirroring-catalog-post.adoc[leveloffset=+2] diff --git a/disconnected/using-olm.adoc b/disconnected/using-olm.adoc index fb0ab8ad3d..674afea679 100644 --- a/disconnected/using-olm.adoc +++ b/disconnected/using-olm.adoc @@ -39,7 +39,6 @@ Infrastructure features:: Disconnected .Additional resources * xref:../operators/understanding/olm-rh-catalogs.adoc#olm-rh-catalogs[Red{nbsp}Hat-provided Operator catalogs] -* xref:../operators/operator_sdk/osdk-generating-csvs.adoc#olm-enabling-operator-for-restricted-network_osdk-generating-csvs[Enabling your Operator for restricted network environments] [id="olm-restricted-network-prereqs"] == Prerequisites @@ -76,4 +75,4 @@ include::modules/olm-creating-catalog-from-index.adoc[leveloffset=+1] [id="next-steps_olm-restricted-networks"] == Next steps -* xref:../operators/admin/olm-upgrading-operators.adoc#olm-upgrading-operators[Updating installed Operators] \ No newline at end of file +* xref:../operators/admin/olm-upgrading-operators.adoc#olm-upgrading-operators[Updating installed Operators] diff --git a/extensions/catalogs/managing-catalogs.adoc b/extensions/catalogs/managing-catalogs.adoc index 530a8a5703..297924b695 100644 --- a/extensions/catalogs/managing-catalogs.adoc +++ b/extensions/catalogs/managing-catalogs.adoc @@ -15,8 +15,6 @@ _File-based catalogs_ are the latest iteration of the catalog format in Operator [IMPORTANT] ==== Kubernetes periodically deprecates certain APIs that are removed in subsequent releases. As a result, Operators are unable to use removed APIs starting with the version of {product-title} that uses the Kubernetes version that removed the API. - -If your cluster is using custom catalogs, see xref:../../operators/operator_sdk/osdk-working-bundle-images.adoc#osdk-control-compat_osdk-working-bundle-images[Controlling Operator compatibility with {product-title} versions] for more details about how Operator authors can update their projects to help avoid workload issues and prevent incompatible upgrades. ==== include::modules/olmv1-about-catalogs.adoc[leveloffset=+1] diff --git a/getting_started/openshift-overview.adoc b/getting_started/openshift-overview.adoc index 7646eb7a72..00b2c0fcc4 100644 --- a/getting_started/openshift-overview.adoc +++ b/getting_started/openshift-overview.adoc @@ -76,9 +76,6 @@ describes an application that can be deployed using the Helm CLI. * **xref:../operators/understanding/olm-what-operators-are.adoc#olm-what-operators-are[Understand Operators]**: Operators are the preferred method for creating on-cluster applications for {product-title} {product-version}. Learn about the Operator Framework and how to deploy applications using installed Operators into your projects. -* **xref:../operators/operator_sdk/osdk-about.adoc#osdk-about[Develop Operators]**: Operators are the preferred method for creating on-cluster applications for {product-title} {product-version}. Learn the workflow for building, testing, and deploying Operators. Then, create your own Operators based on xref:../operators/operator_sdk/ansible/osdk-ansible-support.adoc#osdk-ansible-support[Ansible] or -xref:../operators/operator_sdk/helm/osdk-helm-support.adoc#osdk-helm-support[Helm], or configure xref:../operators/operator_sdk/osdk-monitoring-prometheus.adoc#osdk-monitoring-prometheus[built-in Prometheus monitoring] using the Operator SDK. - * **xref:../rest_api/overview/index.adoc#api-index[REST API reference]**: Learn about {product-title} application programming interface endpoints. === For administrators diff --git a/modules/arch-olm-operators.adoc b/modules/arch-olm-operators.adoc index d2bc7d9db1..9aac5dddbb 100644 --- a/modules/arch-olm-operators.adoc +++ b/modules/arch-olm-operators.adoc @@ -32,8 +32,6 @@ All Operators listed in the Operator Hub marketplace should be available for ins ==== endif::openshift-dedicated,openshift-rosa[] -Developers can use the Operator SDK to help author custom Operators that take advantage of OLM features, as well. Their Operator can then be bundled and added to a custom catalog source, which can be added to a cluster and made available to users. - [NOTE] ==== OLM does not manage the cluster Operators that comprise the {product-title} architecture. diff --git a/modules/building-memcached-operator-using-osdk.adoc b/modules/building-memcached-operator-using-osdk.adoc deleted file mode 100644 index 5406c77dec..0000000000 --- a/modules/building-memcached-operator-using-osdk.adoc +++ /dev/null @@ -1,443 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-getting-started.adoc - -[id="building-memcached-operator-using-osdk_{context}"] -= Building a Go-based Operator using the Operator SDK - -This procedure walks through an example of building a simple Memcached Operator using tools and libraries provided by the SDK. - -.Prerequisites - -- Operator SDK CLI installed on the development workstation -- Operator Lifecycle Manager (OLM) installed on a Kubernetes-based cluster (v1.8 -or above to support the `apps/v1beta2` API group), for example {product-title} {product-version} -- Access to the cluster using an account with `cluster-admin` permissions -- OpenShift CLI (`oc`) v{product-version}+ installed - -.Procedure - -. *Create a new project.* -+ -Use the CLI to create a new `memcached-operator` project: -+ -[source,terminal] ----- -$ mkdir -p $GOPATH/src/github.com/example-inc/ ----- -+ -[source,terminal] ----- -$ cd $GOPATH/src/github.com/example-inc/ ----- -+ -[source,terminal] ----- -$ operator-sdk new memcached-operator ----- -+ -[source,terminal] ----- -$ cd memcached-operator ----- - -. *Add a new custom resource definition (CRD).* - -.. Use the CLI to add a new CRD API called `Memcached`, with `APIVersion` set to `cache.example.com/v1apha1` and `Kind` set to `Memcached`: -+ -[source,terminal] ----- -$ operator-sdk add api \ - --api-version=cache.example.com/v1alpha1 \ - --kind=Memcached ----- -+ -This scaffolds the Memcached resource API under `pkg/apis/cache/v1alpha1/`. - -.. Modify the spec and status of the `Memcached` custom resource (CR) at the `pkg/apis/cache/v1alpha1/memcached_types.go` file: -+ -[source,go] ----- -type MemcachedSpec struct { - // Size is the size of the memcached deployment - Size int32 `json:"size"` -} -type MemcachedStatus struct { - // Nodes are the names of the memcached pods - Nodes []string `json:"nodes"` -} ----- - -.. After modifying the `*_types.go` file, always run the following command to update the generated code for that resource type: -+ -[source,terminal] ----- -$ operator-sdk generate k8s ----- - -. *Optional: Add custom validation to your CRD.* -+ -OpenAPI v3.0 schemas are added to CRD manifests in the `spec.validation` block when the manifests are generated. This validation block allows Kubernetes to validate the properties in a Memcached CR when it is created or updated. -+ -Additionally, a `pkg/apis///zz_generated.openapi.go` file is generated. This file contains the Go representation of this validation block if the `+k8s:openapi-gen=true annotation` is present above the `Kind` type declaration, which is present by default. This auto-generated code is the OpenAPI model of your Go `Kind` type, from which you can create a full OpenAPI Specification and generate a client. -+ -As an Operator author, you can use Kubebuilder markers (annotations) to configure custom validations for your API. These markers must always have a `+kubebuilder:validation` prefix. For example, adding an enum-type specification can be done by adding the following marker: -+ -[source,go] ----- -// +kubebuilder:validation:Enum=Lion;Wolf;Dragon -type Alias string ----- -+ -Usage of markers in API code is discussed in the Kubebuilder link:https://book.kubebuilder.io/reference/generating-crd.html[Generating CRDs] and link:https://book.kubebuilder.io/reference/markers.html[Markers for Config/Code Generation] documentation. A full list of OpenAPIv3 validation markers is also available in the Kubebuilder link:https://book.kubebuilder.io/reference/markers/crd-validation.html[CRD Validation] documentation. -+ -If you add any custom validations, run the following command to update the OpenAPI validation section in the `deploy/crds/cache.example.com_memcacheds_crd.yaml` file for the CRD: -+ -[source,terminal] ----- -$ operator-sdk generate crds ----- -+ -.Example generated YAML -[source,yaml] ----- -spec: - validation: - openAPIV3Schema: - properties: - spec: - properties: - size: - format: int32 - type: integer ----- - -. *Add a new controller.* - -.. Add a new controller to the project to watch and reconcile the `Memcached` resource: -+ -[source,terminal] ----- -$ operator-sdk add controller \ - --api-version=cache.example.com/v1alpha1 \ - --kind=Memcached ----- -+ -This scaffolds a new controller implementation under `pkg/controller/memcached/`. - -.. For this example, replace the generated controller file `pkg/controller/memcached/memcached_controller.go` with the link:https://github.com/operator-framework/operator-sdk/blob/master/example/memcached-operator/memcached_controller.go.tmpl[example implementation]. -+ -The example controller executes the following reconciliation logic for each `Memcached` resource: -+ --- -* Create a Memcached deployment if it does not exist. -* Ensure that the Deployment size is the same as specified by the `Memcached` CR spec. -* Update the `Memcached` resource status with the names of the Memcached pods. --- -+ -The next two sub-steps inspect how the controller watches resources and how the reconcile loop is triggered. You can skip these steps to go directly to building and running the Operator. - -.. Inspect the controller implementation at the `pkg/controller/memcached/memcached_controller.go` file to see how the controller watches resources. -+ -The first watch is for the `Memcached` type as the primary resource. For each add, update, or delete event, the reconcile loop is sent a reconcile `Request` (a `:` key) for that `Memcached` object: -+ -[source,go] ----- -err := c.Watch( - &source.Kind{Type: &cachev1alpha1.Memcached{}}, &handler.EnqueueRequestForObject{}) ----- -+ -The next watch is for `Deployment` objects, but the event handler maps each event to a reconcile `Request` for the owner of the deployment. In this case, this is the `Memcached` object for which the deployment was created. This allows the controller to watch deployments as a secondary resource: -+ -[source,go] ----- -err := c.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForOwner{ - IsController: true, - OwnerType: &cachev1alpha1.Memcached{}, - }) ----- - -.. Every controller has a `Reconciler` object with a `Reconcile()` method that implements the reconcile loop. The reconcile loop is passed the `Request` argument which is a `:` key used to lookup the primary resource object, `Memcached`, from the cache: -+ -[source,go] ----- -func (r *ReconcileMemcached) Reconcile(request reconcile.Request) (reconcile.Result, error) { - // Lookup the Memcached instance for this reconcile request - memcached := &cachev1alpha1.Memcached{} - err := r.client.Get(context.TODO(), request.NamespacedName, memcached) - ... -} ----- -+ -Based on the return value of the `Reconcile()` function, the reconcile `Request` might be requeued, and the loop might be triggered again: -+ -[source,go] ----- -// Reconcile successful - don't requeue -return reconcile.Result{}, nil -// Reconcile failed due to error - requeue -return reconcile.Result{}, err -// Requeue for any reason other than error -return reconcile.Result{Requeue: true}, nil ----- -[id="building-memcached-operator-using-osdk-build-and-run_{context}"] - -. *Build and run the Operator.* - -.. Before running the Operator, the CRD must be registered with the Kubernetes API server: -+ -[source,terminal] ----- -$ oc create \ - -f deploy/crds/cache_v1alpha1_memcached_crd.yaml ----- - -.. After registering the CRD, there are two options for running the Operator: -+ --- -* As a Deployment inside a Kubernetes cluster -* As Go program outside a cluster --- -+ -Choose one of the following methods. - -... _Option A:_ Running as a deployment inside the cluster. - -.... Build the `memcached-operator` image and push it to a registry: -+ -[source,terminal] ----- -$ operator-sdk build quay.io/example/memcached-operator:v0.0.1 ----- - -.... The deployment manifest is generated at `deploy/operator.yaml`. Update the deployment image as follows since the default is just a placeholder: -+ -[source,terminal] ----- -$ sed -i 's|REPLACE_IMAGE|quay.io/example/memcached-operator:v0.0.1|g' deploy/operator.yaml ----- - -.... Ensure you have an account on link:https://quay.io[Quay.io] for the next step, or substitute your preferred container registry. On the registry, link:https://quay.io/new/[create a new public image] repository named `memcached-operator`. - -.... Push the image to the registry: -+ -[source,terminal] ----- -$ podman push quay.io/example/memcached-operator:v0.0.1 ----- - -.... Set up RBAC and create the `memcached-operator` manifests: -+ -[source,terminal] ----- -$ oc create -f deploy/role.yaml ----- -+ -[source,terminal] ----- -$ oc create -f deploy/role_binding.yaml ----- -+ -[source,terminal] ----- -$ oc create -f deploy/service_account.yaml ----- -+ -[source,terminal] ----- -$ oc create -f deploy/operator.yaml ----- - -.... Verify that the `memcached-operator` deploy is up and running: -+ -[source,terminal] ----- -$ oc get deployment ----- -+ -.Example output -[source,terminal] ----- -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -memcached-operator 1 1 1 1 1m ----- - -... _Option B:_ Running locally outside the cluster. -+ -This method is preferred during development cycle to deploy and test faster. -+ -Run the Operator locally with the default Kubernetes configuration file present at `$HOME/.kube/config`: -+ -[source,terminal] ----- -$ operator-sdk run --local --namespace=default ----- -+ -You can use a specific `kubeconfig` using the flag `--kubeconfig=`. - -. *Verify that the Operator can deploy a Memcached application* by creating a `Memcached` CR. - -.. Create the example `Memcached` CR that was generated at `deploy/crds/cache_v1alpha1_memcached_cr.yaml`. - -.. View the file: -+ -[source,terminal] ----- -$ cat deploy/crds/cache_v1alpha1_memcached_cr.yaml ----- -+ -.Example output -[source,terminal] ----- -apiVersion: "cache.example.com/v1alpha1" -kind: "Memcached" -metadata: - name: "example-memcached" -spec: - size: 3 ----- - -.. Create the object: -+ -[source,terminal] ----- -$ oc apply -f deploy/crds/cache_v1alpha1_memcached_cr.yaml ----- - -.. Ensure that `memcached-operator` creates the deployment for the CR: -+ -[source,terminal] ----- -$ oc get deployment ----- -+ -.Example output -[source,terminal] ----- -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -memcached-operator 1 1 1 1 2m -example-memcached 3 3 3 3 1m ----- - -.. Check the pods and CR to confirm the CR status is updated with the pod names: -+ -[source,terminal] ----- -$ oc get pods ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -example-memcached-6fd7c98d8-7dqdr 1/1 Running 0 1m -example-memcached-6fd7c98d8-g5k7v 1/1 Running 0 1m -example-memcached-6fd7c98d8-m7vn7 1/1 Running 0 1m -memcached-operator-7cc7cfdf86-vvjqk 1/1 Running 0 2m ----- -+ -[source,terminal] ----- -$ oc get memcached/example-memcached -o yaml ----- -+ -.Example output -[source,terminal] ----- -apiVersion: cache.example.com/v1alpha1 -kind: Memcached -metadata: - clusterName: "" - creationTimestamp: 2018-03-31T22:51:08Z - generation: 0 - name: example-memcached - namespace: default - resourceVersion: "245453" - selfLink: /apis/cache.example.com/v1alpha1/namespaces/default/memcacheds/example-memcached - uid: 0026cc97-3536-11e8-bd83-0800274106a1 -spec: - size: 3 -status: - nodes: - - example-memcached-6fd7c98d8-7dqdr - - example-memcached-6fd7c98d8-g5k7v - - example-memcached-6fd7c98d8-m7vn7 ----- - -. *Verify that the Operator can manage a deployed Memcached application* by updating the size of the deployment. - -.. Change the `spec.size` field in the `memcached` CR from `3` to `4`: -+ -[source,terminal] ----- -$ cat deploy/crds/cache_v1alpha1_memcached_cr.yaml ----- -+ -.Example output -[source,terminal] ----- -apiVersion: "cache.example.com/v1alpha1" -kind: "Memcached" -metadata: - name: "example-memcached" -spec: - size: 4 ----- - -.. Apply the change: -+ -[source,terminal] ----- -$ oc apply -f deploy/crds/cache_v1alpha1_memcached_cr.yaml ----- - -.. Confirm that the Operator changes the deployment size: -+ -[source,terminal] ----- -$ oc get deployment ----- -+ -.Example output -[source,terminal] ----- -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -example-memcached 4 4 4 4 5m ----- - -. *Clean up the resources:* -+ -[source,terminal] ----- -$ oc delete -f deploy/crds/cache_v1alpha1_memcached_cr.yaml ----- -+ -[source,terminal] ----- -$ oc delete -f deploy/crds/cache_v1alpha1_memcached_crd.yaml ----- -+ -[source,terminal] ----- -$ oc delete -f deploy/operator.yaml ----- -+ -[source,terminal] ----- -$ oc delete -f deploy/role.yaml ----- -+ -[source,terminal] ----- -$ oc delete -f deploy/role_binding.yaml ----- -+ -[source,terminal] ----- -$ oc delete -f deploy/service_account.yaml ----- - -[role="_additional-resources"] -.Additional resources - -* For more information about OpenAPI v3.0 validation schemas in CRDs, refer to the link:https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#specifying-a-structural-schema[Kubernetes documentation]. diff --git a/modules/creating-new-osdk-v0-1-0-project.adoc b/modules/creating-new-osdk-v0-1-0-project.adoc deleted file mode 100644 index dd0f192caf..0000000000 --- a/modules/creating-new-osdk-v0-1-0-project.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-migrating-to-v0-1-0.adoc - -:_mod-docs-content-type: PROCEDURE -[id="creating-new-operator-sdk-v0-1-0-project_{context}"] -= Creating a new Operator SDK v0.1.0 project - -Rename your Operator SDK v0.0.x project and create a new v0.1.0 project in its -place. - -.Prerequisites - -- Operator SDK v0.1.0 CLI installed on the development workstation -- `memcached-operator` project previously deployed using an earlier version of -Operator SDK - -.Procedure - -. Ensure the SDK version is v0.1.0: -+ -[source,terminal] ----- -$ operator-sdk --version -operator-sdk version 0.1.0 ----- - -. Create a new project: -+ -[source,terminal] ----- -$ mkdir -p $GOPATH/src/github.com/example-inc/ -$ cd $GOPATH/src/github.com/example-inc/ -$ mv memcached-operator old-memcached-operator -$ operator-sdk new memcached-operator --skip-git-init -$ ls -memcached-operator old-memcached-operator ----- - -. Copy `.git` from the old project: -+ -[source,terminal] ----- -$ cp -rf old-memcached-operator/.git memcached-operator/.git ----- diff --git a/modules/migrating-custom-types-pkg-apis.adoc b/modules/migrating-custom-types-pkg-apis.adoc deleted file mode 100644 index 4a2e159a09..0000000000 --- a/modules/migrating-custom-types-pkg-apis.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-migrating-to-v0-1-0.adoc - -:_mod-docs-content-type: PROCEDURE -[id="migrating-custom-types-from-pkg-apis_{context}"] -= Migrating custom types from pkg/apis - -Migrate your project's custom types to the updated Operator SDK v0.1.0 usage. - -.Prerequisites - -- Operator SDK v0.1.0 CLI installed on the development workstation -- `memcached-operator` project previously deployed using an earlier version of -Operator SDK -- New project created using Operator SDK v0.1.0 - -.Procedure - -. *Create the scaffold API for custom types.* - -.. Create the API for your custom resource (CR) in the new project with -`operator-sdk add api --api-version= --kind=`: -+ -[source,terminal] ----- -$ cd memcached-operator -$ operator-sdk add api --api-version=cache.example.com/v1alpha1 --kind=Memcached - -$ tree pkg/apis -pkg/apis/ -├── addtoscheme_cache_v1alpha1.go -├── apis.go -└── cache - └── v1alpha1 - ├── doc.go - ├── memcached_types.go - ├── register.go - └── zz_generated.deepcopy.go ----- - -.. Repeat the previous command for as many custom types as you had defined in your -old project. Each type will be defined in the file -`pkg/apis///_types.go`. - -. *Copy the contents of the type.* - -.. Copy the `Spec` and `Status` contents of the -`pkg/apis///types.go` file from the old project to the new -project's `pkg/apis///_types.go` file. - -.. Each `_types.go` file has an `init()` function. Be sure not to remove that -since that registers the type with the Manager's scheme: -+ -[source,golang] ----- -func init() { - SchemeBuilder.Register(&Memcached{}, &MemcachedList{}) ----- diff --git a/modules/migrating-reconcile-code.adoc b/modules/migrating-reconcile-code.adoc deleted file mode 100644 index aef9419ca7..0000000000 --- a/modules/migrating-reconcile-code.adoc +++ /dev/null @@ -1,313 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-migrating-to-v0-1-0.adoc - -:_mod-docs-content-type: PROCEDURE -[id="migrating-reconcile-code_{context}"] -= Migrating reconcile code - -Migrate your project's reconcile code to the update Operator SDK v0.1.0 usage. - -.Prerequisites - -- Operator SDK v0.1.0 CLI installed on the development workstation -- `memcached-operator` project previously deployed using an earlier version of -Operator SDK -- Custom types migrated from `pkg/apis/` - -.Procedure - -. *Add a controller to watch your CR.* -+ -In v0.0.x projects, resources to be watched were previously defined in -`cmd//main.go`: -+ -[source,golang] ----- -sdk.Watch("cache.example.com/v1alpha1", "Memcached", "default", time.Duration(5)*time.Second) ----- -+ -For v0.1.0 projects, you must define a -link:https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg#hdr-Controller[Controller] -to watch resources: - -.. Add a controller to watch your CR type with `operator-sdk add controller --api-version= --kind=`. -+ -[source,terminal] ----- -$ operator-sdk add controller --api-version=cache.example.com/v1alpha1 --kind=Memcached - -$ tree pkg/controller -pkg/controller/ -├── add_memcached.go -├── controller.go -└── memcached - └── memcached_controller.go ----- - -.. Inspect the `add()` function in your `pkg/controller//_controller.go` file: -+ -[source,golang] ----- -import ( - cachev1alpha1 "github.com/example-inc/memcached-operator/pkg/apis/cache/v1alpha1" - ... -) - -func add(mgr manager.Manager, r reconcile.Reconciler) error { - c, err := controller.New("memcached-controller", mgr, controller.Options{Reconciler: r}) - - // Watch for changes to the primary resource Memcached - err = c.Watch(&source.Kind{Type: &cachev1alpha1.Memcached{}}, &handler.EnqueueRequestForObject{}) - - // Watch for changes to the secondary resource pods and enqueue reconcile requests for the owner Memcached - err = c.Watch(&source.Kind{Type: &corev1.Pod{}}, &handler.EnqueueRequestForOwner{ - IsController: true, - OwnerType: &cachev1alpha1.Memcached{}, - }) -} ----- -+ -Remove the second `Watch()` or modify it to watch a secondary resource type that -is owned by your CR. -+ -Watching multiple resources lets you trigger the reconcile loop for multiple -resources relevant to your application. See the -link:https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg#hdr-Watching_and_EventHandling[watching and eventhandling] -documentation and the Kubernetes -link:https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/controllers.md[controller conventions] -documentation for more details. -+ -If your Operator is watching more than one CR type, you can do one of the -following depending on your application: -+ --- -** If the CR is owned by your primary CR, watch it as a secondary resource in -the same controller to trigger the reconcile loop for the primary resource. -+ -[source,golang] ----- -// Watch for changes to the primary resource Memcached - err = c.Watch(&source.Kind{Type: &cachev1alpha1.Memcached{}}, &handler.EnqueueRequestForObject{}) - - // Watch for changes to the secondary resource AppService and enqueue reconcile requests for the owner Memcached - err = c.Watch(&source.Kind{Type: &appv1alpha1.AppService{}}, &handler.EnqueueRequestForOwner{ - IsController: true, - OwnerType: &cachev1alpha1.Memcached{}, - }) ----- - -** Add a new controller to watch and reconcile the CR independently of the other CR. -+ -[source,terminal] ----- -$ operator-sdk add controller --api-version=app.example.com/v1alpha1 --kind=AppService ----- -+ -[source,golang] ----- - // Watch for changes to the primary resource AppService - err = c.Watch(&source.Kind{Type: &appv1alpha1.AppService{}}, &handler.EnqueueRequestForObject{}) ----- --- - -. *Copy and modify reconcile code from `pkg/stub/handler.go`.* -+ -In a v0.1.0 project, the reconcile code is defined in the `Reconcile()` method -of a controller's -link:https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/reconcile#Reconciler[Reconciler]. -This is similar to the `Handle()` function in the older project. Note the -difference in the arguments and return values: -+ --- -- Reconcile: -+ -[source,golang] ----- - func (r *ReconcileMemcached) Reconcile(request reconcile.Request) (reconcile.Result, error) ----- - -- Handle: -+ -[source,golang] ----- - func (h *Handler) Handle(ctx context.Context, event sdk.Event) error ----- --- -+ -Instead of receiving an `sdk.Event` (with the object), the `Reconcile()` -function receives a -link:https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/reconcile#Request[Request] -(`Name`/`Namespace` key) to look up the object. -+ -If the `Reconcile()` function returns an error, the controller will requeue and -retry the `Request`. If no error is returned, then depending on the -link:https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/reconcile#Result[Result], -the controller will either not retry the `Request`, immediately retry, or retry -after a specified duration. - -.. Copy the code from the old project's `Handle()` function to the existing code -in your controller's `Reconcile()` function. Be sure to keep the initial section -in the `Reconcile()` code that looks up the object for the `Request` and checks -to see if it is deleted. -+ -[source,golang] ----- -import ( - apierrors "k8s.io/apimachinery/pkg/api/errors" - cachev1alpha1 "github.com/example-inc/memcached-operator/pkg/apis/cache/v1alpha1" - ... -) -func (r *ReconcileMemcached) Reconcile(request reconcile.Request) (reconcile.Result, error) { - // Fetch the Memcached instance - instance := &cachev1alpha1.Memcached{} - err := r.client.Get(context.TODO() - request.NamespacedName, instance) - if err != nil { - if apierrors.IsNotFound(err) { - // Request object not found, could have been deleted after reconcile request. - // Owned objects are automatically garbage collected. - // Return and don't requeue - return reconcile.Result{}, nil - } - // Error reading the object - requeue the request. - return reconcile.Result{}, err - } - - // Rest of your reconcile code goes here. - ... -} ----- - -.. Change the return values in your reconcile code: - -... Replace `return err` with `return reconcile.Result{}, err`. - -... Replace `return nil` with `return reconcile.Result{}, nil`. - -.. To periodically reconcile a CR in your controller, you can set the -link:https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/reconcile#Result[RequeueAfter] -field for `reconcile.Result`. This will cause the controller to requeue the -`Request` and trigger the reconcile after the desired duration. Note that the -default value of `0` means no requeue. -+ -[source,golang] ----- -reconcilePeriod := 30 * time.Second -reconcileResult := reconcile.Result{RequeueAfter: reconcilePeriod} -... - -// Update the status -err := r.client.Update(context.TODO(), memcached) -if err != nil { - log.Printf("failed to update memcached status: %v", err) - return reconcileResult, err -} -return reconcileResult, nil ----- - -.. Replace the calls to the SDK client (Create, Update, Delete, Get, List) with the -reconciler's client. -+ -See the examples below and the `controller-runtime` -link:https://sdk.operatorframework.io/docs/building-operators/golang/references/client/[client API documentation] -in the `operator-sdk` project for more details: -+ -[source,golang] ----- -// Create -dep := &appsv1.Deployment{...} -err := sdk.Create(dep) -// v0.0.1 -err := r.client.Create(context.TODO(), dep) - -// Update -err := sdk.Update(dep) -// v0.0.1 -err := r.client.Update(context.TODO(), dep) - -// Delete -err := sdk.Delete(dep) -// v0.0.1 -err := r.client.Delete(context.TODO(), dep) - -// List -podList := &corev1.PodList{} -labelSelector := labels.SelectorFromSet(labelsForMemcached(memcached.Name)) -listOps := &metav1.ListOptions{LabelSelector: labelSelector} -err := sdk.List(memcached.Namespace, podList, sdk.WithListOptions(listOps)) -// v0.1.0 -listOps := &client.ListOptions{Namespace: memcached.Namespace, LabelSelector: labelSelector} -err := r.client.List(context.TODO(), listOps, podList) - -// Get -dep := &appsv1.Deployment{APIVersion: "apps/v1", Kind: "Deployment", Name: name, Namespace: namespace} -err := sdk.Get(dep) -// v0.1.0 -dep := &appsv1.Deployment{} -err = r.client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, dep) ----- - -.. Copy and initialize any other fields from your `Handler` struct into the `Reconcile` struct: -+ -[source,golang] ----- -// newReconciler returns a new reconcile.Reconciler -func newReconciler(mgr manager.Manager) reconcile.Reconciler { - return &ReconcileMemcached{client: mgr.GetClient(), scheme: mgr.GetScheme(), foo: "bar"} -} - -// ReconcileMemcached reconciles a Memcached object -type ReconcileMemcached struct { - client client.Client - scheme *runtime.Scheme - // Other fields - foo string -} ----- - -. *Copy changes from `main.go`.* -+ -The main function for a v0.1.0 Operator in `cmd/manager/main.go` sets up the -link:https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/manager[Manager], -which registers the custom resources and starts all of the controllers. -+ -There is no requirement to migrate the SDK functions `sdk.Watch()`,`sdk.Handle()`, and `sdk.Run()` from the old `main.go` since that logic is now defined in a -controller. -+ -However, if there are any Operator-specific flags or settings defined in the old -`main.go` file, copy them over. -+ -If you have any third party resource types registered with the SDK's scheme, see -link:https://sdk.operatorframework.io/docs/building-operators/golang/advanced-topics/#adding-3rd-party-resources-to-your-operator[Advanced Topics] -in the `operator-sdk` project for how to register them with the Manager's -scheme in the new project. - -. *Copy user-defined files.* -+ -If there are any user-defined `pkgs`, scripts, or documentation in the older -project, copy those files into the new project. - -. *Copy changes to deployment manifests.* -+ -For any updates made to the following manifests in the old project, copy the changes to their corresponding files in the new project. Be careful not to -directly overwrite the files, but inspect and make any changes necessary: -+ --- -* `tmp/build/Dockerfile` to `build/Dockerfile` -** There is no tmp directory in the new project layout -* RBAC rules updates from `deploy/rbac.yaml` to `deploy/role.yaml` and -`deploy/role_binding.yaml` -* `deploy/cr.yaml` to `deploy/crds/___cr.yaml` -* `deploy/crd.yaml` to `deploy/crds/___crd.yaml` --- - -. *Copy user-defined dependencies.* -+ -For any user-defined dependencies added to the old project's `Gopkg.toml`, copy -and append them to the new project's `Gopkg.toml`. Run `dep ensure` to update -the vendor in the new project. - -. *Confirm your changes.* -+ -Build and run your Operator to verify that it works. diff --git a/modules/olm-about-catalogs.adoc b/modules/olm-about-catalogs.adoc index 4dc03f6696..e7f48791f1 100644 --- a/modules/olm-about-catalogs.adoc +++ b/modules/olm-about-catalogs.adoc @@ -17,8 +17,6 @@ As a cluster administrator, you can create your own custom index image, either b [IMPORTANT] ==== Kubernetes periodically deprecates certain APIs that are removed in subsequent releases. As a result, Operators are unable to use removed APIs starting with the version of {product-title} that uses the Kubernetes version that removed the API. - -If your cluster is using custom catalogs, see xref:../../operators/operator_sdk/osdk-working-bundle-images#osdk-control-compat_osdk-working-bundle-images[Controlling Operator compatibility with {product-title} versions] for more details about how Operator authors can update their projects to help avoid workload issues and prevent incompatible upgrades. ==== [NOTE] diff --git a/modules/olm-enabling-operator-for-multi-arch.adoc b/modules/olm-enabling-operator-for-multi-arch.adoc deleted file mode 100644 index b2631fa60d..0000000000 --- a/modules/olm-enabling-operator-for-multi-arch.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-generating-csvs.adoc - -:_mod-docs-content-type: PROCEDURE -[id="olm-enabling-operator-for-multi-arch_{context}"] -= Enabling your Operator for multiple architectures and operating systems - -Operator Lifecycle Manager (OLM) assumes that all Operators run on Linux hosts. However, as an Operator author, you can specify whether your Operator supports managing workloads on other architectures, if worker nodes are available in the {product-title} cluster. - -If your Operator supports variants other than AMD64 and Linux, you can add labels to the cluster service version (CSV) that provides the Operator to list the supported variants. Labels indicating supported architectures and operating systems are defined by the following: - -[source,yaml] ----- -labels: - operatorframework.io/arch.: supported <1> - operatorframework.io/os.: supported <2> ----- -<1> Set `` to a supported string. -<2> Set `` to a supported string. - -[NOTE] -==== -Only the labels on the channel head of the default channel are considered for filtering package manifests by label. This means, for example, that providing an additional architecture for an Operator in the non-default channel is possible, but that architecture is not available for filtering in the `PackageManifest` API. -==== - -If a CSV does not include an `os` label, it is treated as if it has the following Linux support label by default: - -[source,yaml] ----- -labels: - operatorframework.io/os.linux: supported ----- - -If a CSV does not include an `arch` label, it is treated as if it has the following AMD64 support label by default: - -[source,yaml] ----- -labels: - operatorframework.io/arch.amd64: supported ----- - -If an Operator supports multiple node architectures or operating systems, you can add multiple labels, as well. - -.Prerequisites - -* An Operator project with a CSV. -* To support listing multiple architectures and operating systems, your Operator image referenced in the CSV must be a manifest list image. -* For the Operator to work properly in restricted network, or disconnected, environments, the image referenced must also be specified using a digest (SHA) and not by a tag. - -.Procedure - -* Add a label in the `metadata.labels` of your CSV for each supported architecture and operating system that your Operator supports: -+ -[source,yaml] ----- -labels: - operatorframework.io/arch.s390x: supported - operatorframework.io/os.zos: supported - operatorframework.io/os.linux: supported <1> - operatorframework.io/arch.amd64: supported <1> ----- -<1> After you add a new architecture or operating system, you must also now include the default `os.linux` and `arch.amd64` variants explicitly. diff --git a/modules/olm-enabling-operator-restricted-network.adoc b/modules/olm-enabling-operator-restricted-network.adoc deleted file mode 100644 index 8bd777a012..0000000000 --- a/modules/olm-enabling-operator-restricted-network.adoc +++ /dev/null @@ -1,201 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-generating-csvs.adoc - -:_mod-docs-content-type: PROCEDURE -[id="olm-enabling-operator-for-restricted-network_{context}"] -= Enabling your Operator for restricted network environments - -As an Operator author, your Operator must meet additional requirements to run properly in a restricted network, or disconnected, environment. - -.Operator requirements for supporting disconnected mode - -* Replace hard-coded image references with environment variables. -* In the cluster service version (CSV) of your Operator: -** List any _related images_, or other container images that your Operator might require to perform their functions. -** Reference all specified images by a digest (SHA) and not by a tag. -* All dependencies of your Operator must also support running in a disconnected mode. -* Your Operator must not require any off-cluster resources. -// TODO: Include more info w/ better steps on how to do this: -//* You must understand the {product-title} proxy configuration. - -.Prerequisites - -* An Operator project with a CSV. The following procedure uses the Memcached Operator as an example for Go-, Ansible-, and Helm-based projects. - -.Procedure - -. Set an environment variable for the additional image references used by the Operator in the `config/manager/manager.yaml` file: -+ -.Example `config/manager/manager.yaml` file -[%collapsible] -==== -[source,yaml] ----- -... -spec: - ... - spec: - ... - containers: - - command: - - /manager - ... - env: - - name: <1> - value: "" <2> ----- -<1> Define the environment variable, such as `RELATED_IMAGE_MEMCACHED`. -<2> Set the related image reference and tag, such as `docker.io/memcached:1.4.36-alpine`. -==== - -. Replace hard-coded image references with environment variables in the relevant file for your Operator project type: - -* For Go-based Operator projects, add the environment variable to the `controllers/memcached_controller.go` file as shown in the following example: -+ -.Example `controllers/memcached_controller.go` file -[%collapsible] -==== -[source,diff] ----- - // deploymentForMemcached returns a memcached Deployment object - -... - - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ -- Image: "memcached:1.4.36-alpine", <1> -+ Image: os.Getenv(""), <2> - Name: "memcached", - Command: []string{"memcached", "-m=64", "-o", "modern", "-v"}, - Ports: []corev1.ContainerPort{{ - -... ----- -<1> Delete the image reference and tag. -<2> Use the `os.Getenv` function to call the ``. - -[NOTE] -===== -The `os.Getenv` function returns an empty string if a variable is not set. Set the `` before changing the file. -===== -==== - -* For Ansible-based Operator projects, add the environment variable to the `roles/memcached/tasks/main.yml` file as shown in the following example: -+ -.Example `roles/memcached/tasks/main.yml` file -[%collapsible] -==== -[source,diff] ----- -spec: - containers: - - name: memcached - command: - - memcached - - -m=64 - - -o - - modern - - -v -- image: "docker.io/memcached:1.4.36-alpine" <1> -+ image: "{{ lookup('env', '') }}" <2> - ports: - - containerPort: 11211 - -... ----- -<1> Delete the image reference and tag. -<2> Use the `lookup` function to call the ``. -==== - -* For Helm-based Operator projects, add the `overrideValues` field to the `watches.yaml` file as shown in the following example: -+ -.Example `watches.yaml` file -[%collapsible] -==== -[source,yaml] ----- -... -- group: demo.example.com - version: v1alpha1 - kind: Memcached - chart: helm-charts/memcached - overrideValues: <1> - relatedImage: ${} <2> ----- -<1> Add the `overrideValues` field. -<2> Define the `overrideValues` field by using the ``, such as `RELATED_IMAGE_MEMCACHED`. -==== - -.. Add the value of the `overrideValues` field to the `helm-charts/memchached/values.yaml` file as shown in the following example: -+ -.Example `helm-charts/memchached/values.yaml` file -[source,yaml] ----- -... -relatedImage: "" ----- - -.. Edit the chart template in the `helm-charts/memcached/templates/deployment.yaml` file as shown in the following example: -+ -.Example `helm-charts/memcached/templates/deployment.yaml` file -[%collapsible] -==== -[source,yaml] ----- -containers: - - name: {{ .Chart.Name }} - securityContext: - - toYaml {{ .Values.securityContext | nindent 12 }} - image: "{{ .Values.image.pullPolicy }} - env: <1> - - name: related_image <2> - value: "{{ .Values.relatedImage }}" <3> ----- -<1> Add the `env` field. -<2> Name the environment variable. -<3> Define the value of the environment variable. -==== - -. Add the `BUNDLE_GEN_FLAGS` variable definition to your `Makefile` with the following changes: -+ -.Example `Makefile` -[source,diff] ----- - BUNDLE_GEN_FLAGS ?= -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS) - - # USE_IMAGE_DIGESTS defines if images are resolved via tags or digests - # You can enable this value if you would like to use SHA Based Digests - # To enable set flag to true - USE_IMAGE_DIGESTS ?= false - ifeq ($(USE_IMAGE_DIGESTS), true) - BUNDLE_GEN_FLAGS += --use-image-digests - endif - -... - -- $(KUSTOMIZE) build config/manifests | operator-sdk generate bundle -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS) <1> -+ $(KUSTOMIZE) build config/manifests | operator-sdk generate bundle $(BUNDLE_GEN_FLAGS) <2> - -... ----- -<1> Delete this line in the `Makefile`. -<2> Replace the line above with this line. - -. To update your Operator image to use a digest (SHA) and not a tag, run the `make bundle` command and set `USE_IMAGE_DIGESTS` to `true` : -+ -[source,terminal] ----- -$ make bundle USE_IMAGE_DIGESTS=true ----- - -. Add the `disconnected` annotation, which indicates that the Operator works in a disconnected environment: -+ -[source,yaml] ----- -metadata: - annotations: - operators.openshift.io/infrastructure-features: '["disconnected"]' ----- -+ -Operators can be filtered in OperatorHub by this infrastructure feature. diff --git a/modules/olm-operator-framework.adoc b/modules/olm-operator-framework.adoc index e0680616c1..cb055fd599 100644 --- a/modules/olm-operator-framework.adoc +++ b/modules/olm-operator-framework.adoc @@ -7,9 +7,6 @@ The Operator Framework is a family of tools and capabilities to deliver on the customer experience described above. It is not just about writing code; testing, delivering, and updating Operators is just as important. The Operator Framework components consist of open source tools to tackle these problems: -Operator SDK:: -The Operator SDK assists Operator authors in bootstrapping, building, testing, and packaging their own Operator based on their expertise without requiring knowledge of Kubernetes API complexities. - Operator Lifecycle Manager:: Operator Lifecycle Manager (OLM) controls the installation, upgrade, and role-based access control (RBAC) of Operators in a cluster. It is deployed by default in {product-title} {product-version}. diff --git a/modules/olm-operator-maturity-model.adoc b/modules/olm-operator-maturity-model.adoc index 7bc8121bd6..67c623236e 100644 --- a/modules/olm-operator-maturity-model.adoc +++ b/modules/olm-operator-maturity-model.adoc @@ -11,6 +11,3 @@ One can however generalize the scale of the maturity of the encapsulated operati .Operator maturity model image::operator-maturity-model.png[] - -The above model also shows how these capabilities can best be developed through -the Helm, Go, and Ansible capabilities of the Operator SDK. diff --git a/modules/olm-operatorhub-overview.adoc b/modules/olm-operatorhub-overview.adoc index 8d09f3abc0..2d6b1d797b 100644 --- a/modules/olm-operatorhub-overview.adoc +++ b/modules/olm-operatorhub-overview.adoc @@ -33,5 +33,3 @@ Cluster administrators can choose from catalogs grouped into the following categ endif::[] Operators on OperatorHub are packaged to run on OLM. This includes a YAML file called a cluster service version (CSV) containing all of the CRDs, RBAC rules, deployments, and container images required to install and securely run the Operator. It also contains user-visible information like a description of its features and supported Kubernetes versions. - -The Operator SDK can be used to assist developers packaging their Operators for use on OLM and OperatorHub. If you have a commercial application that you want to make accessible to your customers, get it included using the certification workflow provided on the Red Hat Partner Connect portal at link:https://connect.redhat.com[connect.redhat.com]. diff --git a/modules/osdk-about-openapi-validation.adoc b/modules/osdk-about-openapi-validation.adoc deleted file mode 100644 index b3872df2ab..0000000000 --- a/modules/osdk-about-openapi-validation.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/golang/osdk-golang-tutorial.adoc - -:_mod-docs-content-type: CONCEPT -[id="osdk-about-openapi-validation_{context}"] -= About OpenAPI validation - -OpenAPIv3 schemas are added to CRD manifests in the `spec.validation` block when the manifests are generated. This validation block allows Kubernetes to validate the properties in a Memcached custom resource (CR) when it is created or updated. - -Markers, or annotations, are available to configure validations for your API. These markers always have a `+kubebuilder:validation` prefix. - -[role="_additional-resources"] -.Additional resources - -* For more details on the usage of markers in API code, see the following Kubebuilder documentation: -** link:https://book.kubebuilder.io/reference/generating-crd.html[CRD generation] -** link:https://book.kubebuilder.io/reference/markers.html[Markers] -** link:https://book.kubebuilder.io/reference/markers/crd-validation.html[List of OpenAPIv3 validation markers] - -* For more details about OpenAPIv3 validation schemas in CRDs, see the link:https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#specifying-a-structural-schema[Kubernetes documentation]. diff --git a/modules/osdk-about-pkg-format-migration.adoc b/modules/osdk-about-pkg-format-migration.adoc deleted file mode 100644 index fd68acb5b7..0000000000 --- a/modules/osdk-about-pkg-format-migration.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-pkgman-to-bundle.adoc - -:_mod-docs-content-type: CONCEPT -[id="osdk-about-pkg-format-migration_{context}"] -= About packaging format migration - -The Operator SDK `pkgman-to-bundle` command helps in migrating Operator Lifecycle Manager (OLM) package manifests to bundles. The command takes an input package manifest directory and generates bundles for each of the versions of manifests present in the input directory. You can also then build bundle images for each of the generated bundles. - -For example, consider the following `packagemanifests/` directory for a project in the package manifest format: - -.Example package manifest format layout -[source,terminal] ----- -packagemanifests/ -└── etcd - ├── 0.0.1 - │ ├── etcdcluster.crd.yaml - │ └── etcdoperator.clusterserviceversion.yaml - ├── 0.0.2 - │ ├── etcdbackup.crd.yaml - │ ├── etcdcluster.crd.yaml - │ ├── etcdoperator.v0.0.2.clusterserviceversion.yaml - │ └── etcdrestore.crd.yaml - └── etcd.package.yaml ----- - -After running the migration, the following bundles are generated in the `bundle/` directory: - -.Example bundle format layout -[source,terminal] ----- -bundle/ -├── bundle-0.0.1 -│   ├── bundle.Dockerfile -│   ├── manifests -│   │   ├── etcdcluster.crd.yaml -│   │   ├── etcdoperator.clusterserviceversion.yaml -│   ├── metadata -│   │   └── annotations.yaml -│   └── tests -│   └── scorecard -│   └── config.yaml -└── bundle-0.0.2 - ├── bundle.Dockerfile - ├── manifests - │   ├── etcdbackup.crd.yaml - │   ├── etcdcluster.crd.yaml - │   ├── etcdoperator.v0.0.2.clusterserviceversion.yaml - │   ├── etcdrestore.crd.yaml - ├── metadata - │   └── annotations.yaml - └── tests - └── scorecard - └── config.yaml ----- - -Based on this generated layout, bundle images for both of the bundles are also built with the following names: - -* `quay.io/example/etcd:0.0.1` -* `quay.io/example/etcd:0.0.2` diff --git a/modules/osdk-ansible-cr-status-about.adoc b/modules/osdk-ansible-cr-status-about.adoc deleted file mode 100644 index 9be5ea09bd..0000000000 --- a/modules/osdk-ansible-cr-status-about.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/ansible/osdk-ansible-cr-status.adoc - -:_mod-docs-content-type: CONCEPT -[id="osdk-ansible-cr-status-about_{context}"] -= About custom resource status in Ansible-based Operators - -Ansible-based Operators automatically update custom resource (CR) link:https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#status-subresource[`status` subresources] with generic information about the previous Ansible run. This includes the number of successful and failed tasks and relevant error messages as shown: - -[source,yaml] ----- -status: - conditions: - - ansibleResult: - changed: 3 - completion: 2018-12-03T13:45:57.13329 - failures: 1 - ok: 6 - skipped: 0 - lastTransitionTime: 2018-12-03T13:45:57Z - message: 'Status code was -1 and not [200]: Request failed: ' - reason: Failed - status: "True" - type: Failure - - lastTransitionTime: 2018-12-03T13:46:13Z - message: Running reconciliation - reason: Running - status: "True" - type: Running ----- - -Ansible-based Operators also allow Operator authors to supply custom status values with the `k8s_status` Ansible module, which is included in the link:https://galaxy.ansible.com/operator_sdk/util[`operator_sdk.util` collection]. This allows the author to update the `status` from within Ansible with any key-value pair as desired. - -By default, Ansible-based Operators always include the generic Ansible run output as shown above. If you would prefer your application did _not_ update the status with Ansible output, you can track the status manually from your application. diff --git a/modules/osdk-ansible-cr-status-manual.adoc b/modules/osdk-ansible-cr-status-manual.adoc deleted file mode 100644 index 4ffcd4dd36..0000000000 --- a/modules/osdk-ansible-cr-status-manual.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/ansible/osdk-ansible-cr-status.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-ansible-cr-status-manual_{context}"] -= Tracking custom resource status manually - -You can use the `operator_sdk.util` collection to modify your Ansible-based Operator to track custom resource (CR) status manually from your application. - -.Prerequisites - -* Ansible-based Operator project created by using the Operator SDK - -.Procedure - -. Update the `watches.yaml` file with a `manageStatus` field set to `false`: -+ -[source,yaml] ----- -- version: v1 - group: api.example.com - kind: - role: - manageStatus: false ----- - -. Use the `operator_sdk.util.k8s_status` Ansible module to update the subresource. For example, to update with key `test` and value `data`, `operator_sdk.util` can be used as shown: -+ -[source,yaml] ----- -- operator_sdk.util.k8s_status: - api_version: app.example.com/v1 - kind: - name: "{{ ansible_operator_meta.name }}" - namespace: "{{ ansible_operator_meta.namespace }}" - status: - test: data ----- - -. You can declare collections in the `meta/main.yml` file for the role, which is included for scaffolded Ansible-based Operators: -+ -[source,yaml] ----- -collections: - - operator_sdk.util ----- - -. After declaring collections in the role meta, you can invoke the `k8s_status` module directly: -+ -[source,yaml] ----- -k8s_status: - ... - status: - key1: value1 ----- diff --git a/modules/osdk-ansible-create-api.adoc b/modules/osdk-ansible-create-api.adoc deleted file mode 100644 index a6401574cf..0000000000 --- a/modules/osdk-ansible-create-api.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/ansible/osdk-ansible-tutorial.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-ansible-create-api-controller_{context}"] -= Creating an API - -Use the Operator SDK CLI to create a Memcached API. - -.Procedure - -* Run the following command to create an API with group `cache`, version, `v1`, and kind `Memcached`: -+ -[source,terminal] ----- -$ operator-sdk create api \ - --group cache \ - --version v1 \ - --kind Memcached \ - --generate-role <1> ----- -<1> Generates an Ansible role for the API. - -After creating the API, your Operator project updates with the following structure: - -Memcached CRD:: Includes a sample `Memcached` resource - -Manager:: Program that reconciles the state of the cluster to the desired state by using: -+ --- -* A reconciler, either an Ansible role or playbook -* A `watches.yaml` file, which connects the `Memcached` resource to the `memcached` Ansible role --- diff --git a/modules/osdk-ansible-custom-resource-files.adoc b/modules/osdk-ansible-custom-resource-files.adoc deleted file mode 100644 index d94ed33786..0000000000 --- a/modules/osdk-ansible-custom-resource-files.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/ansible/osdk-ansible-support.adoc - -[id="osdk-ansible-custom-resource-files_{context}"] -= Custom resource files - -Operators use the Kubernetes extension mechanism, custom resource definitions (CRDs), so your custom resource (CR) looks and acts just like the built-in, native Kubernetes objects. - -The CR file format is a Kubernetes resource file. The object has mandatory and optional fields: - -.Custom resource fields -[cols="3,7",options="header"] -|=== -|Field -|Description - -|`apiVersion` -|Version of the CR to be created. - -|`kind` -|Kind of the CR to be created. - -|`metadata` -|Kubernetes-specific metadata to be created. - -|`spec` (optional) -|Key-value list of variables which are passed to Ansible. This field is empty by default. - -|`status` -|Summarizes the current state of the object. For Ansible-based Operators, the link:https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#status-subresource[`status` subresource] is enabled for CRDs and managed by the `operator_sdk.util.k8s_status` Ansible module by default, which includes `condition` information to the CR `status`. - -|`annotations` -|Kubernetes-specific annotations to be appended to the CR. -|=== - -The following list of CR annotations modify the behavior of the Operator: - -.Ansible-based Operator annotations -[cols="3,7",options="header"] -|=== -|Annotation -|Description - -|`ansible.operator-sdk/reconcile-period` -|Specifies the reconciliation interval for the CR. This value is parsed using the standard Golang package link:https://golang.org/pkg/time/[`time`]. Specifically, link:https://golang.org/pkg/time/#ParseDuration[`ParseDuration`] is used which applies the default suffix of `s`, giving the value in seconds. -|=== - -.Example Ansible-based Operator annotation -[source,yaml] ----- -apiVersion: "test1.example.com/v1alpha1" -kind: "Test1" -metadata: - name: "example" -annotations: - ansible.operator-sdk/reconcile-period: "30s" ----- diff --git a/modules/osdk-ansible-extra-variables.adoc b/modules/osdk-ansible-extra-variables.adoc deleted file mode 100644 index 05564f9aff..0000000000 --- a/modules/osdk-ansible-extra-variables.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/ansible/osdk-ansible-support.adoc - -[id="osdk-ansible-extra-variables_{context}"] -= Extra variables sent to Ansible - -Extra variables can be sent to Ansible, which are then managed by the Operator. The `spec` section of the custom resource (CR) passes along the key-value pairs as extra variables. This is equivalent to extra variables passed in to the `ansible-playbook` command. - -The Operator also passes along additional variables under the `meta` field for the name of the CR and the namespace of the CR. - -For the following CR example: - -[source,yaml] ----- -apiVersion: "app.example.com/v1alpha1" -kind: "Database" -metadata: - name: "example" -spec: - message: "Hello world 2" - newParameter: "newParam" ----- - -The structure passed to Ansible as extra variables is: - -[source,json] ----- -{ "meta": { - "name": "", - "namespace": "", - }, - "message": "Hello world 2", - "new_parameter": "newParam", - "_app_example_com_database": { - - }, -} ----- - -The `message` and `newParameter` fields are set in the top level as extra variables, and `meta` provides the relevant metadata for the CR as defined in the Operator. The `meta` fields can be accessed using dot notation in Ansible, for example: - -[source,yaml] ----- ---- -- debug: - msg: "name: {{ ansible_operator_meta.name }}, {{ ansible_operator_meta.namespace }}" ----- diff --git a/modules/osdk-ansible-inside-operator-local.adoc b/modules/osdk-ansible-inside-operator-local.adoc deleted file mode 100644 index baf30de36f..0000000000 --- a/modules/osdk-ansible-inside-operator-local.adoc +++ /dev/null @@ -1,124 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/ansible/osdk-ansible-inside-operator.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-ansible-inside-operator-local_{context}"] -= Testing an Ansible-based Operator locally - -You can test the logic inside of an Ansible-based Operator running locally by using the `make run` command from the top-level directory of your Operator project. The `make run` Makefile target runs the `ansible-operator` binary locally, which reads from the `watches.yaml` file and uses your `~/.kube/config` file to communicate with a Kubernetes cluster just as the `k8s` modules do. - -[NOTE] -==== -You can customize the roles path by setting the environment variable `ANSIBLE_ROLES_PATH` or by using the `ansible-roles-path` flag. If the role is not found in the `ANSIBLE_ROLES_PATH` value, the Operator looks for it in `{{current directory}}/roles`. -==== - -.Prerequisites - -- link:https://ansible-runner.readthedocs.io/en/latest/install.html[Ansible Runner] v2.3.3+ -- link:https://github.com/ansible/ansible-runner-http[Ansible Runner HTTP Event Emitter plugin] v1.0.0+ -- Performed the previous steps for testing the Kubernetes Collection locally - -.Procedure - -. Install your custom resource definition (CRD) and proper role-based access control (RBAC) definitions for your custom resource (CR): -+ -[source,terminal] ----- -$ make install ----- -+ -.Example output -[source,terminal] ----- -/usr/bin/kustomize build config/crd | kubectl apply -f - -customresourcedefinition.apiextensions.k8s.io/memcacheds.cache.example.com created ----- - -. Run the `make run` command: -+ -[source,terminal] ----- -$ make run ----- -+ -.Example output -[source,terminal] ----- -/home/user/memcached-operator/bin/ansible-operator run -{"level":"info","ts":1612739145.2871568,"logger":"cmd","msg":"Version","Go Version":"go1.15.5","GOOS":"linux","GOARCH":"amd64","ansible-operator":"v1.10.1","commit":"1abf57985b43bf6a59dcd18147b3c574fa57d3f6"} -... -{"level":"info","ts":1612739148.347306,"logger":"controller-runtime.metrics","msg":"metrics server is starting to listen","addr":":8080"} -{"level":"info","ts":1612739148.3488882,"logger":"watches","msg":"Environment variable not set; using default value","envVar":"ANSIBLE_VERBOSITY_MEMCACHED_CACHE_EXAMPLE_COM","default":2} -{"level":"info","ts":1612739148.3490262,"logger":"cmd","msg":"Environment variable not set; using default value","Namespace":"","envVar":"ANSIBLE_DEBUG_LOGS","ANSIBLE_DEBUG_LOGS":false} -{"level":"info","ts":1612739148.3490646,"logger":"ansible-controller","msg":"Watching resource","Options.Group":"cache.example.com","Options.Version":"v1","Options.Kind":"Memcached"} -{"level":"info","ts":1612739148.350217,"logger":"proxy","msg":"Starting to serve","Address":"127.0.0.1:8888"} -{"level":"info","ts":1612739148.3506632,"logger":"controller-runtime.manager","msg":"starting metrics server","path":"/metrics"} -{"level":"info","ts":1612739148.350784,"logger":"controller-runtime.manager.controller.memcached-controller","msg":"Starting EventSource","source":"kind source: cache.example.com/v1, Kind=Memcached"} -{"level":"info","ts":1612739148.5511978,"logger":"controller-runtime.manager.controller.memcached-controller","msg":"Starting Controller"} -{"level":"info","ts":1612739148.5512562,"logger":"controller-runtime.manager.controller.memcached-controller","msg":"Starting workers","worker count":8} ----- -+ -With the Operator now watching your CR for events, the creation of a CR will trigger your Ansible role to run. -+ -[NOTE] -==== -Consider an example `config/samples/.yaml` CR manifest: - -[source,yaml] ----- -apiVersion: .example.com/v1alpha1 -kind: -metadata: - name: "-sample" ----- - -Because the `spec` field is not set, Ansible is invoked with no extra variables. Passing extra variables from a CR to Ansible is covered in another section. It is important to set reasonable defaults for the Operator. -==== - -. Create an instance of your CR with the default variable `state` set to `present`: -+ -[source,terminal] ----- -$ oc apply -f config/samples/.yaml ----- - -. Check that the `example-config` config map was created: -+ -[source,terminal] ----- -$ oc get configmaps ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS AGE -example-config Active 3s ----- - -. Modify your `config/samples/.yaml` file to set the `state` field to `absent`. For example: -+ -[source,yaml] ----- -apiVersion: cache.example.com/v1 -kind: Memcached -metadata: - name: memcached-sample -spec: - state: absent ----- - -. Apply the changes: -+ -[source,terminal] ----- -$ oc apply -f config/samples/.yaml ----- - -. Confirm that the config map is deleted: -+ -[source,terminal] ----- -$ oc get configmap ----- diff --git a/modules/osdk-ansible-inside-operator-logs-full-result.adoc b/modules/osdk-ansible-inside-operator-logs-full-result.adoc deleted file mode 100644 index d346d28e1e..0000000000 --- a/modules/osdk-ansible-inside-operator-logs-full-result.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/ansible/osdk-ansible-inside-operator.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-ansible-inside-operator-logs-full-result_{context}"] -= Enabling full Ansible results in logs - -You can set the environment variable `ANSIBLE_DEBUG_LOGS` to `True` to enable checking the full Ansible result in logs, which can be helpful when debugging. - -.Procedure - -* Edit the `config/manager/manager.yaml` and `config/default/manager_metrics_patch.yaml` files to include the following configuration: -+ -[source,terminal] ----- - containers: - - name: manager - env: - - name: ANSIBLE_DEBUG_LOGS - value: "True" ----- diff --git a/modules/osdk-ansible-inside-operator-logs-verbose.adoc b/modules/osdk-ansible-inside-operator-logs-verbose.adoc deleted file mode 100644 index 34f64b6a91..0000000000 --- a/modules/osdk-ansible-inside-operator-logs-verbose.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/ansible/osdk-ansible-inside-operator.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-ansible-inside-operator-logs-verbose_{context}"] -= Enabling verbose debugging in logs - -While developing an Ansible-based Operator, it can be helpful to enable additional debugging in logs. - -.Procedure - -* Add the `ansible.sdk.operatorframework.io/verbosity` annotation to your custom resource to enable the verbosity level that you want. For example: -+ -[source,terminal] ----- -apiVersion: "cache.example.com/v1alpha1" -kind: "Memcached" -metadata: - name: "example-memcached" - annotations: - "ansible.sdk.operatorframework.io/verbosity": "4" -spec: - size: 4 ----- diff --git a/modules/osdk-ansible-inside-operator-logs-view.adoc b/modules/osdk-ansible-inside-operator-logs-view.adoc deleted file mode 100644 index eaccaf4365..0000000000 --- a/modules/osdk-ansible-inside-operator-logs-view.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/ansible/osdk-ansible-inside-operator.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-ansible-inside-operator-logs-view_{context}"] -= Viewing Ansible logs - -.Prerequisites - -* Ansible-based Operator running as a deployment on a cluster - -.Procedure - -* To view logs from an Ansible-based Operator, run the following command: -+ -[source,terminal] ----- -$ oc logs deployment/-controller-manager \ - -c manager \//<1> - -n <2> ----- -<1> View logs from the `manager` container. -<2> If you used the `make deploy` command to run the Operator as a deployment, use the `-system` namespace. -+ -.Example output -[source,terminal] ----- -{"level":"info","ts":1612732105.0579333,"logger":"cmd","msg":"Version","Go Version":"go1.15.5","GOOS":"linux","GOARCH":"amd64","ansible-operator":"v1.10.1","commit":"1abf57985b43bf6a59dcd18147b3c574fa57d3f6"} -{"level":"info","ts":1612732105.0587437,"logger":"cmd","msg":"WATCH_NAMESPACE environment variable not set. Watching all namespaces.","Namespace":""} -I0207 21:08:26.110949 7 request.go:645] Throttling request took 1.035521578s, request: GET:https://172.30.0.1:443/apis/flowcontrol.apiserver.k8s.io/v1alpha1?timeout=32s -{"level":"info","ts":1612732107.768025,"logger":"controller-runtime.metrics","msg":"metrics server is starting to listen","addr":"127.0.0.1:8080"} -{"level":"info","ts":1612732107.768796,"logger":"watches","msg":"Environment variable not set; using default value","envVar":"ANSIBLE_VERBOSITY_MEMCACHED_CACHE_EXAMPLE_COM","default":2} -{"level":"info","ts":1612732107.7688773,"logger":"cmd","msg":"Environment variable not set; using default value","Namespace":"","envVar":"ANSIBLE_DEBUG_LOGS","ANSIBLE_DEBUG_LOGS":false} -{"level":"info","ts":1612732107.7688901,"logger":"ansible-controller","msg":"Watching resource","Options.Group":"cache.example.com","Options.Version":"v1","Options.Kind":"Memcached"} -{"level":"info","ts":1612732107.770032,"logger":"proxy","msg":"Starting to serve","Address":"127.0.0.1:8888"} -I0207 21:08:27.770185 7 leaderelection.go:243] attempting to acquire leader lease memcached-operator-system/memcached-operator... -{"level":"info","ts":1612732107.770202,"logger":"controller-runtime.manager","msg":"starting metrics server","path":"/metrics"} -I0207 21:08:27.784854 7 leaderelection.go:253] successfully acquired lease memcached-operator-system/memcached-operator -{"level":"info","ts":1612732107.7850506,"logger":"controller-runtime.manager.controller.memcached-controller","msg":"Starting EventSource","source":"kind source: cache.example.com/v1, Kind=Memcached"} -{"level":"info","ts":1612732107.8853772,"logger":"controller-runtime.manager.controller.memcached-controller","msg":"Starting Controller"} -{"level":"info","ts":1612732107.8854098,"logger":"controller-runtime.manager.controller.memcached-controller","msg":"Starting workers","worker count":4} ----- diff --git a/modules/osdk-ansible-inside-operator-logs.adoc b/modules/osdk-ansible-inside-operator-logs.adoc deleted file mode 100644 index 37ee2096fb..0000000000 --- a/modules/osdk-ansible-inside-operator-logs.adoc +++ /dev/null @@ -1,8 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/ansible/osdk-ansible-inside-operator.adoc - -[id="osdk-ansible-inside-operator-logs_{context}"] -= Ansible logs - -Ansible-based Operators provide logs about the Ansible run, which can be useful for debugging your Ansible tasks. The logs can also contain detailed information about the internals of the Operator and its interactions with Kubernetes. diff --git a/modules/osdk-ansible-k8s-install.adoc b/modules/osdk-ansible-k8s-install.adoc deleted file mode 100644 index 3b104c6086..0000000000 --- a/modules/osdk-ansible-k8s-install.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/ansible/osdk-ansible-k8s-collection.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-ansible-installing-k8s-collection_{context}"] -= Installing the Kubernetes Collection for Ansible - -You can install the Kubernetes Collection for Ansible on your local workstation. - -.Procedure - -. Install Ansible 2.15+: -+ -[source,terminal] ----- -$ sudo dnf install ansible ----- - -. Install the link:https://pypi.org/project/kubernetes/[Python Kubernetes client] package: -+ -[source,terminal] ----- -$ pip install kubernetes ----- - -. Install the Kubernetes Collection using one of the following methods: - -* You can install the collection directly from Ansible Galaxy: -+ -[source,terminal] ----- -$ ansible-galaxy collection install community.kubernetes ----- - -* If you have already initialized your Operator, you might have a `requirements.yml` file at the top level of your project. This file specifies Ansible dependencies that must be installed for your Operator to function. By default, this file installs the `community.kubernetes` collection as well as the `operator_sdk.util` collection, which provides modules and plugins for Operator-specific functions. -+ -To install the dependent modules from the `requirements.yml` file: -+ -[source,terminal] ----- -$ ansible-galaxy collection install -r requirements.yml ----- diff --git a/modules/osdk-ansible-k8s-local.adoc b/modules/osdk-ansible-k8s-local.adoc deleted file mode 100644 index 71e0e3ade3..0000000000 --- a/modules/osdk-ansible-k8s-local.adoc +++ /dev/null @@ -1,122 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/ansible/osdk-ansible-k8s-collection.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-ansible-k8s-local_{context}"] -= Testing the Kubernetes Collection locally - -Operator developers can run the Ansible code from their local machine as opposed to running and rebuilding the Operator each time. - -.Prerequisites - -* Initialize an Ansible-based Operator project and create an API that has a generated Ansible role by using the Operator SDK -* Install the Kubernetes Collection for Ansible - -.Procedure - -. In your Ansible-based Operator project directory, modify the `roles//tasks/main.yml` file with the Ansible logic that you want. The `roles//` directory is created when you use the `--generate-role` flag while creating an API. The `` replaceable matches the kind that you specified for the API. -+ -The following example creates and deletes a config map based on the value of a variable named `state`: -+ -[source,yaml] ----- ---- -- name: set ConfigMap example-config to {{ state }} - community.kubernetes.k8s: - api_version: v1 - kind: ConfigMap - name: example-config - namespace: <1> - state: "{{ state }}" - ignore_errors: true <2> ----- -<1> Specify the namespace where you want the config map created. -<2> Setting `ignore_errors: true` ensures that deleting a nonexistent config map does not fail. - -. Modify the `roles//defaults/main.yml` file to set `state` to `present` by default: -+ -[source,yaml] ----- ---- -state: present ----- - -. Create an Ansible playbook by creating a `playbook.yml` file in the top-level of your project directory, and include your `` role: -+ -[source,yaml] ----- ---- -- hosts: localhost - roles: - - ----- - -. Run the playbook: -+ -[source,terminal] ----- -$ ansible-playbook playbook.yml ----- -+ -.Example output -[source,terminal] ----- -[WARNING]: provided hosts list is empty, only localhost is available. Note that the implicit localhost does not match 'all' - -PLAY [localhost] ******************************************************************************** - -TASK [Gathering Facts] ******************************************************************************** -ok: [localhost] - -TASK [memcached : set ConfigMap example-config to present] ******************************************************************************** -changed: [localhost] - -PLAY RECAP ******************************************************************************** -localhost : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 ----- - -. Verify that the config map was created: -+ -[source,terminal] ----- -$ oc get configmaps ----- -+ -.Example output -[source,terminal] ----- -NAME DATA AGE -example-config 0 2m1s ----- - -. Rerun the playbook setting `state` to `absent`: -+ -[source,terminal] ----- -$ ansible-playbook playbook.yml --extra-vars state=absent ----- -+ -.Example output -[source,terminal] ----- -[WARNING]: provided hosts list is empty, only localhost is available. Note that the implicit localhost does not match 'all' - -PLAY [localhost] ******************************************************************************** - -TASK [Gathering Facts] ******************************************************************************** -ok: [localhost] - -TASK [memcached : set ConfigMap example-config to absent] ******************************************************************************** -changed: [localhost] - -PLAY RECAP ******************************************************************************** -localhost : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 ----- - -. Verify that the config map was deleted: -+ -[source,terminal] ----- -$ oc get configmaps ----- diff --git a/modules/osdk-ansible-metrics.adoc b/modules/osdk-ansible-metrics.adoc deleted file mode 100644 index 7bbe620b91..0000000000 --- a/modules/osdk-ansible-metrics.adoc +++ /dev/null @@ -1,240 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-monitoring-prometheus.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-ansible-metrics_{context}"] -= Exposing custom metrics for Ansible-based Operators - -As an Operator author creating Ansible-based Operators, you can use the Operator SDK's `osdk_metrics` module to expose custom Operator and Operand metrics, emit events, and support logging. - -.Prerequisites - -* Ansible-based Operator generated using the Operator SDK -* Prometheus Operator, which is deployed by default on {product-title} clusters - -.Procedure - -. Generate an Ansible-based Operator. This example uses a `testmetrics.com` domain: -+ -[source,terminal] ----- -$ operator-sdk init \ - --plugins=ansible \ - --domain=testmetrics.com ----- - -. Create a `metrics` API. This example uses a `kind` named `Testmetrics`: -+ -[source,terminal] ----- -$ operator-sdk create api \ - --group metrics \ - --version v1 \ - --kind Testmetrics \ - --generate-role ----- - -. Edit the `roles/testmetrics/tasks/main.yml` file and use the `osdk_metrics` module to create custom metrics for your Operator project: -+ -.Example `roles/testmetrics/tasks/main.yml` file -[%collapsible] -==== -[source,yaml] ----- ---- -# tasks file for Memcached -- name: start k8sstatus - k8s: - definition: - kind: Deployment - apiVersion: apps/v1 - metadata: - name: '{{ ansible_operator_meta.name }}-memcached' - namespace: '{{ ansible_operator_meta.namespace }}' - spec: - replicas: "{{size}}" - selector: - matchLabels: - app: memcached - template: - metadata: - labels: - app: memcached - spec: - containers: - - name: memcached - command: - - memcached - - -m=64 - - -o - - modern - - -v - image: "docker.io/memcached:1.4.36-alpine" - ports: - - containerPort: 11211 - -- osdk_metric: - name: my_thing_counter - description: This metric counts things - counter: {} - -- osdk_metric: - name: my_counter_metric - description: Add 3.14 to the counter - counter: - increment: yes - -- osdk_metric: - name: my_gauge_metric - description: Create my gauge and set it to 2. - gauge: - set: 2 - -- osdk_metric: - name: my_histogram_metric - description: Observe my histogram - histogram: - observe: 2 - -- osdk_metric: - name: my_summary_metric - description: Observe my summary - summary: - observe: 2 ----- -==== - -.Verification - -. Run your Operator on a cluster. For example, to use the "run as a deployment" method: - - -.. Build the Operator image and push it to a registry: -+ -[source,terminal] ----- -$ make docker-build docker-push IMG=//: ----- - -.. Install the Operator on a cluster: -+ -[source,terminal] ----- -$ make install ----- - -.. Deploy the Operator: -+ -[source,terminal] ----- -$ make deploy IMG=//: ----- - -. Create a `Testmetrics` custom resource (CR): - -.. Define the CR spec: -+ -.Example `config/samples/metrics_v1_testmetrics.yaml` file -[%collapsible] -==== -[source,yaml] ----- -apiVersion: metrics.testmetrics.com/v1 -kind: Testmetrics -metadata: - name: testmetrics-sample -spec: - size: 1 ----- -==== - -.. Create the object: -+ -[source,terminal] ----- -$ oc create -f config/samples/metrics_v1_testmetrics.yaml ----- - -. Get the pod details: -+ -[source,terminal] ----- -$ oc get pods ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -ansiblemetrics-controller-manager- 2/2 Running 0 149m -testmetrics-sample-memcached- 1/1 Running 0 147m ----- - -. Get the endpoint details: -+ -[source,terminal] ----- -$ oc get ep ----- -+ -.Example output -[source,terminal] ----- -NAME ENDPOINTS AGE -ansiblemetrics-controller-manager-metrics-service 10.129.2.70:8443 150m ----- - -. Request a custom metrics token: -+ -[source,terminal] ----- -$ token=`oc create token prometheus-k8s -n openshift-monitoring` ----- - -. Check the metrics values: - -.. Check the `my_counter_metric` value: -+ -[source,terminal] ----- -$ oc exec ansiblemetrics-controller-manager- -- curl -k -H "Authoriza -tion: Bearer $token" 'https://10.129.2.70:8443/metrics' | grep my_counter ----- -+ -.Example output -[source,terminal] ----- -HELP my_counter_metric Add 3.14 to the counter -TYPE my_counter_metric counter -my_counter_metric 2 ----- - -.. Check the `my_gauge_metric` value: -+ -[source,terminal] ----- -$ oc exec ansiblemetrics-controller-manager- -- curl -k -H "Authoriza -tion: Bearer $token" 'https://10.129.2.70:8443/metrics' | grep gauge ----- -+ -.Example output -[source,terminal] ----- -HELP my_gauge_metric Create my gauge and set it to 2. ----- - -.. Check the `my_histogram_metric` and `my_summary_metric` values: -+ -[source,terminal] ----- -$ oc exec ansiblemetrics-controller-manager- -- curl -k -H "Authoriza -tion: Bearer $token" 'https://10.129.2.70:8443/metrics' | grep Observe ----- -+ -.Example output -[source,terminal] ----- -HELP my_histogram_metric Observe my histogram -HELP my_summary_metric Observe my summary ----- diff --git a/modules/osdk-ansible-modify-manager.adoc b/modules/osdk-ansible-modify-manager.adoc deleted file mode 100644 index 2d1e3d22fb..0000000000 --- a/modules/osdk-ansible-modify-manager.adoc +++ /dev/null @@ -1,85 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/ansible/osdk-ansible-tutorial.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-ansible-modify-manager_{context}"] -= Modifying the manager - -Update your Operator project to provide the reconcile logic, in the form of an Ansible role, which runs every time a `Memcached` resource is created, updated, or deleted. - -.Procedure - -. Update the `roles/memcached/tasks/main.yml` file with the following structure: -+ -[source,yaml] ----- ---- -- name: start memcached - k8s: - definition: - kind: Deployment - apiVersion: apps/v1 - metadata: - name: '{{ ansible_operator_meta.name }}-memcached' - namespace: '{{ ansible_operator_meta.namespace }}' - spec: - replicas: "{{size}}" - selector: - matchLabels: - app: memcached - template: - metadata: - labels: - app: memcached - spec: - containers: - - name: memcached - command: - - memcached - - -m=64 - - -o - - modern - - -v - image: "docker.io/memcached:1.4.36-alpine" - ports: - - containerPort: 11211 ----- -+ -This `memcached` role ensures a `memcached` deployment exist and sets the deployment size. - -. Set default values for variables used in your Ansible role by editing the `roles/memcached/defaults/main.yml` file: -+ -[source,yaml] ----- ---- -# defaults file for Memcached -size: 1 ----- - -. Update the `Memcached` sample resource in the `config/samples/cache_v1_memcached.yaml` file with the following structure: -+ -[source,yaml] ----- -apiVersion: cache.example.com/v1 -kind: Memcached -metadata: - labels: - app.kubernetes.io/name: memcached - app.kubernetes.io/instance: memcached-sample - app.kubernetes.io/part-of: memcached-operator - app.kubernetes.io/managed-by: kustomize - app.kubernetes.io/created-by: memcached-operator - name: memcached-sample -spec: - size: 3 ----- -+ -The key-value pairs in the custom resource (CR) spec are passed to Ansible as extra variables. - -[NOTE] -==== -The names of all variables in the `spec` field are converted to snake case, meaning lowercase with an underscore, by the Operator before running Ansible. For example, `serviceAccount` in the spec becomes `service_account` in Ansible. - -You can disable this case conversion by setting the `snakeCaseParameters` option to `false` in your `watches.yaml` file. It is recommended that you perform some type validation in Ansible on the variables to ensure that your application is receiving expected input. -==== diff --git a/modules/osdk-ansible-project-layout.adoc b/modules/osdk-ansible-project-layout.adoc deleted file mode 100644 index 04cbcd601b..0000000000 --- a/modules/osdk-ansible-project-layout.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/ansible/osdk-ansible-project-layout.adoc - -[id="osdk-ansible-project-layout_{context}"] -= Ansible-based project layout - -Ansible-based Operator projects generated using the `operator-sdk init --plugins ansible` command contain the following directories and files: - -[options="header",cols="1,4"] -|=== - -|File or directory |Purpose - -|`Dockerfile` -|Dockerfile for building the container image for the Operator. - -|`Makefile` -|Targets for building, publishing, deploying the container image that wraps the Operator binary, and targets for installing and uninstalling the custom resource definition (CRD). - -|`PROJECT` -|YAML file containing metadata information for the Operator. - -|`config/crd` -|Base CRD files and the `kustomization.yaml` file settings. - -|`config/default` -|Collects all Operator manifests for deployment. Use by the `make deploy` command. - -|`config/manager` -|Controller manager deployment. - -|`config/prometheus` -|`ServiceMonitor` resource for monitoring the Operator. - -|`config/rbac` -|Role and role binding for leader election and authentication proxy. - -|`config/samples` -|Sample resources created for the CRDs. - -|`config/testing` -|Sample configurations for testing. - -|`playbooks/` -|A subdirectory for the playbooks to run. - -|`roles/` -|Subdirectory for the roles tree to run. - -|`watches.yaml` -|Group/version/kind (GVK) of the resources to watch, and the Ansible invocation method. New entries are added by using the `create api` command. - -|`requirements.yml` -|YAML file containing the Ansible collections and role dependencies to install during a build. - -|`molecule/` -|Molecule scenarios for end-to-end testing of your role and Operator. - -|=== diff --git a/modules/osdk-ansible-runner-directory.adoc b/modules/osdk-ansible-runner-directory.adoc deleted file mode 100644 index 640770dfda..0000000000 --- a/modules/osdk-ansible-runner-directory.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/ansible/osdk-ansible-support.adoc - -[id="osdk-ansible-runner-directory_{context}"] -= Ansible Runner directory - -Ansible Runner keeps information about Ansible runs in the container. This is located at `/tmp/ansible-operator/runner/////`. - -[role="_additional-resources"] -.Additional resources - -* To learn more about the `runner` directory, see the link:https://ansible-runner.readthedocs.io/en/latest/index.html[Ansible Runner documentation]. diff --git a/modules/osdk-ansible-watches-file.adoc b/modules/osdk-ansible-watches-file.adoc deleted file mode 100644 index 8a6ed2ba4d..0000000000 --- a/modules/osdk-ansible-watches-file.adoc +++ /dev/null @@ -1,120 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/ansible/osdk-ansible-support.adoc - -[id="osdk-ansible-watches-file_{context}"] -= watches.yaml file - -A _group/version/kind (GVK)_ is a unique identifier for a Kubernetes API. The `watches.yaml` file contains a list of mappings from custom resources (CRs), identified by its GVK, to an Ansible role or playbook. The Operator expects this mapping file in a predefined location at `/opt/ansible/watches.yaml`. - -.`watches.yaml` file mappings -[cols="3,7",options="header"] -|=== -|Field -|Description - -|`group` -|Group of CR to watch. - -|`version` -|Version of CR to watch. - -|`kind` -|Kind of CR to watch - -|`role` (default) -|Path to the Ansible role added to the container. For example, if your `roles` directory is at `/opt/ansible/roles/` and your role is named `busybox`, this value would be `/opt/ansible/roles/busybox`. This field is mutually exclusive with the `playbook` field. - -|`playbook` -|Path to the Ansible playbook added to the container. This playbook is expected to be a way to call roles. This field is mutually exclusive with the `role` field. - -|`reconcilePeriod` (optional) -|The reconciliation interval, how often the role or playbook is run, for a given CR. - -|`manageStatus` (optional) -|When set to `true` (default), the Operator manages the status of the CR generically. When set to `false`, the status of the CR is managed elsewhere, by the specified role or playbook or in a separate controller. -|=== - -.Example `watches.yaml` file -[source,yaml] ----- -- version: v1alpha1 <1> - group: test1.example.com - kind: Test1 - role: /opt/ansible/roles/Test1 - -- version: v1alpha1 <2> - group: test2.example.com - kind: Test2 - playbook: /opt/ansible/playbook.yml - -- version: v1alpha1 <3> - group: test3.example.com - kind: Test3 - playbook: /opt/ansible/test3.yml - reconcilePeriod: 0 - manageStatus: false ----- -<1> Simple example mapping `Test1` to the `test1` role. -<2> Simple example mapping `Test2` to a playbook. -<3> More complex example for the `Test3` kind. Disables re-queuing and managing the CR status in the playbook. - -[id="osdk-ansible-watches-file-advanced_{context}"] -== Advanced options - -Advanced features can be enabled by adding them to your `watches.yaml` file per GVK. They can go below the `group`, `version`, `kind` and `playbook` or `role` fields. - -Some features can be overridden per resource using an annotation on that CR. The options that can be overridden have the annotation specified below. - -.Advanced watches.yaml file options -[cols="3,2,4,2,1",options="header"] -|=== -|Feature -|YAML key -|Description -|Annotation for override -|Default value - -|Reconcile period -|`reconcilePeriod` -|Time between reconcile runs for a particular CR. -|`ansible.operator-sdk/reconcile-period` -|`1m` - -|Manage status -|`manageStatus` -|Allows the Operator to manage the `conditions` section of each CR `status` section. -| -|`true` - -|Watch dependent resources -|`watchDependentResources` -|Allows the Operator to dynamically watch resources that are created by Ansible. -| -|`true` - -|Watch cluster-scoped resources -|`watchClusterScopedResources` -|Allows the Operator to watch cluster-scoped resources that are created by Ansible. -| -|`false` - -|Max runner artifacts -|`maxRunnerArtifacts` -|Manages the number of link:https://ansible-runner.readthedocs.io/en/latest/intro.html#runner-artifacts-directory-hierarchy[artifact directories] that Ansible Runner keeps in the Operator container for each individual resource. -|`ansible.operator-sdk/max-runner-artifacts` -|`20` -|=== - -.Example watches.yml file with advanced options -[source,yaml] ----- -- version: v1alpha1 - group: app.example.com - kind: AppService - playbook: /opt/ansible/playbook.yml - maxRunnerArtifacts: 30 - reconcilePeriod: 5s - manageStatus: False - watchDependentResources: False ----- diff --git a/modules/osdk-apiservices.adoc b/modules/osdk-apiservices.adoc deleted file mode 100644 index d3c268fa42..0000000000 --- a/modules/osdk-apiservices.adoc +++ /dev/null @@ -1,109 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-generating-csvs.adoc - -:_mod-docs-content-type: CONCEPT -[id="osdk-apiservices_{context}"] -= Understanding your API services - -As with CRDs, there are two types of API services that your Operator may use: _owned_ and _required_. - -[id="osdk-apiservices-owned_{context}"] -== Owned API services - -When a CSV owns an API service, it is responsible for describing the deployment of the extension `api-server` that backs it and the group/version/kind (GVK) it provides. - -An API service is uniquely identified by the group/version it provides and can be listed multiple times to denote the different kinds it is expected to provide. - -.Owned API service fields -[cols="2a,5a,2",options="header"] -|=== -|Field |Description |Required/optional - -|`Group` -|Group that the API service provides, for example `database.example.com`. -|Required - -|`Version` -|Version of the API service, for example `v1alpha1`. -|Required - -|`Kind` -|A kind that the API service is expected to provide. -|Required - -|`Name` -|The plural name for the API service provided. -|Required - -|`DeploymentName` -|Name of the deployment defined by your CSV that corresponds to your API service (required for owned API services). During the CSV pending phase, the OLM Operator searches the `InstallStrategy` of your CSV for a `Deployment` spec with a matching name, and if not found, does not transition the CSV to the "Install Ready" phase. -|Required - -|`DisplayName` -|A human readable version of your API service name, for example `MongoDB Standalone`. -|Required - -|`Description` -|A short description of how this API service is used by the Operator or a description of the functionality provided by the API service. -|Required - -|`Resources` -a|Your API services own one or more types of Kubernetes objects. These are listed in the resources section to inform your users of the objects they might need to troubleshoot or how to connect to the application, such as the service or ingress rule that exposes a database. - -It is recommended to only list out the objects that are important to a human, not an exhaustive list of everything you orchestrate. For example, do not list config maps that store internal state that are not meant to be modified by a user. -|Optional - -|`SpecDescriptors`, `StatusDescriptors`, and `ActionDescriptors` -|Essentially the same as for owned CRDs. -|Optional -|=== - -[id="osdk-apiservices-resource-creation_{context}"] -=== API service resource creation - -Operator Lifecycle Manager (OLM) is responsible for creating or replacing the service and API service resources for each unique owned API service: - -* Service pod selectors are copied from the CSV deployment matching the `DeploymentName` field of the API service description. - -* A new CA key/certificate pair is generated for each installation and the base64-encoded CA bundle is embedded in the respective API service resource. - -[id="osdk-apiservices-service-certs_{context}"] -=== API service serving certificates - -OLM handles generating a serving key/certificate pair whenever an owned API service is being installed. The serving certificate has a common name (CN) containing the hostname of the generated `Service` resource and is signed by the private key of the CA bundle embedded in the corresponding API service resource. - -The certificate is stored as a type `kubernetes.io/tls` secret in the deployment namespace, and a volume named `apiservice-cert` is automatically appended to the volumes section of the deployment in the CSV matching the `DeploymentName` field of the API service description. - -If one does not already exist, a volume mount with a matching name is also appended to all containers of that deployment. This allows users to define a volume mount with the expected name to accommodate any custom path requirements. The path of the generated volume mount defaults to `/apiserver.local.config/certificates` and any existing volume mounts with the same path are replaced. - -[id="osdk-apiservice-required_{context}"] -== Required API services - -OLM ensures all required CSVs have an API service that is available and all expected GVKs are discoverable before attempting installation. This allows a CSV to rely on specific kinds provided by API services it does not own. - -.Required API service fields -[cols="2a,5a,2",options="header"] -|=== -|Field |Description |Required/optional - -|`Group` -|Group that the API service provides, for example `database.example.com`. -|Required - -|`Version` -|Version of the API service, for example `v1alpha1`. -|Required - -|`Kind` -|A kind that the API service is expected to provide. -|Required - -|`DisplayName` -|A human readable version of your API service name, for example `MongoDB Standalone`. -|Required - -|`Description` -|A short description of how this API service is used by the Operator or a description of the functionality provided by the API service. -|Required -|=== diff --git a/modules/osdk-building-helm-operator.adoc b/modules/osdk-building-helm-operator.adoc deleted file mode 100644 index bccdb7cc72..0000000000 --- a/modules/osdk-building-helm-operator.adoc +++ /dev/null @@ -1,350 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-helm.adoc - -[id="osdk-building-helm-operator_{context}"] -= Building a Helm-based Operator using the Operator SDK - -This procedure walks through an example of building a simple Nginx Operator powered by a Helm chart using tools and libraries provided by the Operator SDK. - -[TIP] -==== -It is best practice to build a new Operator for each chart. This can allow for more native-behaving Kubernetes APIs (for example, `oc get Nginx`) and flexibility if you ever want to write a fully-fledged Operator in Go, migrating away from a Helm-based Operator. -==== - -.Prerequisites - -- Operator SDK CLI installed on the development workstation -- Access to a Kubernetes-based cluster v1.11.3+ (for example {product-title} {product-version}) using an account with `cluster-admin` permissions -- OpenShift CLI (`oc`) v{product-version}+ installed - -.Procedure - -. *Create a new Operator project.* A namespace-scoped Operator watches and manages resources in a single namespace. Namespace-scoped Operators are preferred because of their flexibility. They enable decoupled upgrades, namespace isolation for failures and monitoring, and differing API definitions. -+ -To create a new Helm-based, namespace-scoped `nginx-operator` project, use the following command: -+ -[source,terminal] ----- -$ operator-sdk new nginx-operator \ - --api-version=example.com/v1alpha1 \ - --kind=Nginx \ - --type=helm ----- -+ -[source,terminal] ----- -$ cd nginx-operator ----- -+ -This creates the `nginx-operator` project specifically for watching the Nginx resource with API version `example.com/v1apha1` and kind `Nginx`. - -. *Customize the Operator logic.* -+ -For this example, the `nginx-operator` executes the following reconciliation logic for each `Nginx` custom resource (CR): -+ --- -* Create an Nginx deployment if it does not exist. -* Create an Nginx service if it does not exist. -* Create an Nginx ingress if it is enabled and does not exist. -* Ensure that the deployment, service, and optional ingress match the desired configuration (for example, replica count, image, service type) as specified by the Nginx CR. --- -+ -By default, the `nginx-operator` watches `Nginx` resource events as shown in the `watches.yaml` file and executes Helm releases using the specified chart: -+ -[source,yaml] ----- -- version: v1alpha1 - group: example.com - kind: Nginx - chart: /opt/helm/helm-charts/nginx ----- - -.. *Review the Nginx Helm chart.* -+ -When a Helm Operator project is created, the Operator SDK creates an example Helm chart that contains a set of templates for a simple Nginx release. -+ -For this example, templates are available for deployment, service, and ingress resources, along with a `NOTES.txt` template, which Helm chart developers use to convey helpful information about a release. -+ -If you are not already familiar with Helm Charts, review the link:https://docs.helm.sh/developing_charts/[Helm Chart developer documentation]. - -.. *Understand the Nginx CR spec.* -+ -Helm uses a concept called link:https://docs.helm.sh/using_helm/#customizing-the-chart-before-installing[values] to provide customizations to the defaults of a Helm chart, which are defined in the `values.yaml` file. -+ -Override these defaults by setting the desired values in the CR spec. You can use the number of replicas as an example: - -... First, inspect the `helm-charts/nginx/values.yaml` file to find that the chart has a value called `replicaCount` and it is set to `1` by default. To have 2 Nginx instances in your deployment, your CR spec must contain `replicaCount: 2`. -+ -Update the `deploy/crds/example.com_v1alpha1_nginx_cr.yaml` file to look like the following: -+ -[source,yaml] ----- -apiVersion: example.com/v1alpha1 -kind: Nginx -metadata: - name: example-nginx -spec: - replicaCount: 2 ----- - -... Similarly, the default service port is set to `80`. To instead use `8080`, update the `deploy/crds/example.com_v1alpha1_nginx_cr.yaml` file again by adding the service port override: -+ -[source,yaml] ----- -apiVersion: example.com/v1alpha1 -kind: Nginx -metadata: - name: example-nginx -spec: - replicaCount: 2 - service: - port: 8080 ----- -+ -The Helm Operator applies the entire spec as if it was the contents of a values file, just like the `helm install -f ./overrides.yaml` command works. - -. *Deploy the CRD.* -+ -Before running the Operator, Kubernetes must know about the new custom resource definition (CRD) that the Operator will be watching. Deploy the following CRD: -+ -[source,terminal] ----- -$ oc create -f deploy/crds/example_v1alpha1_nginx_crd.yaml ----- - -. *Build and run the Operator.* -+ -There are two ways to build and run the Operator: -+ --- -* As a pod inside a Kubernetes cluster. -* As a Go program outside the cluster using the `operator-sdk up` command. --- -+ -Choose one of the following methods: - -.. *Run as a pod* inside a Kubernetes cluster. This is the preferred -method for production use. - -... Build the `nginx-operator` image and push it to a registry: -+ -[source,terminal] ----- -$ operator-sdk build quay.io/example/nginx-operator:v0.0.1 ----- -+ -[source,terminal] ----- -$ podman push quay.io/example/nginx-operator:v0.0.1 ----- - -... Deployment manifests are generated in the `deploy/operator.yaml` file. The deployment image in this file needs to be modified from the placeholder `REPLACE_IMAGE` to the previous built image. To do this, run: -+ -[source,terminal] ----- -$ sed -i 's|REPLACE_IMAGE|quay.io/example/nginx-operator:v0.0.1|g' deploy/operator.yaml ----- - -... Deploy the `nginx-operator` manifests: -+ -[source,terminal] ----- -$ oc create -f deploy/service_account.yaml ----- -+ -[source,terminal] ----- -$ oc create -f deploy/role.yaml ----- -+ -[source,terminal] ----- -$ oc create -f deploy/role_binding.yaml ----- -+ -[source,terminal] ----- -$ oc create -f deploy/operator.yaml ----- - -... Verify that the `nginx-operator` deployment is up and running: -+ -[source,terminal] ----- -$ oc get deployment ----- -+ -.Example output -[source,terminal] ----- -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -nginx-operator 1 1 1 1 1m ----- - -.. *Run outside the cluster.* This method is preferred during the development cycle to speed up deployment and testing. -+ -It is important that the chart path referenced in the `watches.yaml` file exists on your machine. By default, the `watches.yaml` file is scaffolded to work with an Operator image built with the `operator-sdk build` command. When developing and testing your Operator with the `operator-sdk run --local` command, the SDK looks in your local file system for this path. - -... Create a symlink at this location to point to the path of your Helm chart: -+ -[source,terminal] ----- -$ sudo mkdir -p /opt/helm/helm-charts ----- -+ -[source,terminal] ----- -$ sudo ln -s $PWD/helm-charts/nginx /opt/helm/helm-charts/nginx ----- - -... To run the Operator locally with the default Kubernetes configuration file present at `$HOME/.kube/config`: -+ -[source,terminal] ----- -$ operator-sdk run --local ----- -+ -To run the Operator locally with a provided Kubernetes configuration file: -+ -[source,terminal] ----- -$ operator-sdk run --local --kubeconfig= ----- - -. *Deploy the `Nginx` CR.* -+ -Apply the `Nginx` CR that you modified earlier: -+ -[source,terminal] ----- -$ oc apply -f deploy/crds/example.com_v1alpha1_nginx_cr.yaml ----- -+ -Ensure that the `nginx-operator` creates the deployment for the CR: -+ -[source,terminal] ----- -$ oc get deployment ----- -+ -.Example output -[source,terminal] ----- -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -example-nginx-b9phnoz9spckcrua7ihrbkrt1 2 2 2 2 1m ----- -+ -Check the pods to confirm two replicas were created: -+ -[source,terminal] ----- -$ oc get pods ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -example-nginx-b9phnoz9spckcrua7ihrbkrt1-f8f9c875d-fjcr9 1/1 Running 0 1m -example-nginx-b9phnoz9spckcrua7ihrbkrt1-f8f9c875d-ljbzl 1/1 Running 0 1m ----- -+ -Check that the service port is set to `8080`: -+ -[source,terminal] ----- -$ oc get service ----- -+ -.Example output -[source,terminal] ----- -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -example-nginx-b9phnoz9spckcrua7ihrbkrt1 ClusterIP 10.96.26.3 8080/TCP 1m ----- - -. *Update the `replicaCount` and remove the port.* -+ -Change the `spec.replicaCount` field from `2` to `3`, remove the `spec.service` field, and apply the change: -+ -[source,terminal] ----- -$ cat deploy/crds/example.com_v1alpha1_nginx_cr.yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: "example.com/v1alpha1" -kind: "Nginx" -metadata: - name: "example-nginx" -spec: - replicaCount: 3 ----- -+ -[source,terminal] ----- -$ oc apply -f deploy/crds/example.com_v1alpha1_nginx_cr.yaml ----- -+ -Confirm that the Operator changes the deployment size: -+ -[source,terminal] ----- -$ oc get deployment ----- -+ -.Example output -[source,terminal] ----- -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -example-nginx-b9phnoz9spckcrua7ihrbkrt1 3 3 3 3 1m ----- -+ -Check that the service port is set to the default `80`: -+ -[source,terminal] ----- -$ oc get service ----- -+ -.Example output -[source,terminal] ----- -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -example-nginx-b9phnoz9spckcrua7ihrbkrt1 ClusterIP 10.96.26.3 80/TCP 1m ----- - -. *Clean up the resources:* -+ -[source,terminal] ----- -$ oc delete -f deploy/crds/example.com_v1alpha1_nginx_cr.yaml ----- -+ -[source,terminal] ----- -$ oc delete -f deploy/operator.yaml ----- -+ -[source,terminal] ----- -$ oc delete -f deploy/role_binding.yaml ----- -+ -[source,terminal] ----- -$ oc delete -f deploy/role.yaml ----- -+ -[source,terminal] ----- -$ oc delete -f deploy/service_account.yaml ----- -+ -[source,terminal] ----- -$ oc delete -f deploy/crds/example_v1alpha1_nginx_crd.yaml ----- diff --git a/modules/osdk-bundle-operator.adoc b/modules/osdk-bundle-operator.adoc deleted file mode 100644 index d27dbcdfdd..0000000000 --- a/modules/osdk-bundle-operator.adoc +++ /dev/null @@ -1,92 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/golang/osdk-golang-tutorial.adoc -// * operators/operator_sdk/ansible/osdk-ansible-tutorial.adoc -// * operators/operator_sdk/helm/osdk-helm-tutorial.adoc -// * operators/operator_sdk/osdk-working-bundle-images.adoc - -ifeval::["{context}" == "osdk-golang-tutorial"] -:golang: -endif::[] -ifeval::["{context}" == "osdk-working-bundle-images"] -:golang: -endif::[] - -:_mod-docs-content-type: PROCEDURE -[id="osdk-bundle-operator_{context}"] -= Bundling an Operator - -The Operator bundle format is the default packaging method for Operator SDK and Operator Lifecycle Manager (OLM). You can get your Operator ready for use on OLM by using the Operator SDK to build and push your Operator project as a bundle image. - -.Prerequisites - -- Operator SDK CLI installed on a development workstation -- OpenShift CLI (`oc`) v{product-version}+ installed -- Operator project initialized by using the Operator SDK -ifdef::golang[] -- If your Operator is Go-based, your project must be updated to use supported images for running on {product-title} -endif::[] - -.Procedure - -. Run the following `make` commands in your Operator project directory to build and push your Operator image. Modify the `IMG` argument in the following steps to reference a repository that you have access to. You can obtain an account for storing containers at repository sites such as Quay.io. - -.. Build the image: -+ -[source,terminal] ----- -$ make docker-build IMG=//: ----- -+ -[NOTE] -==== -The Dockerfile generated by the SDK for the Operator explicitly references `GOARCH=amd64` for `go build`. This can be amended to `GOARCH=$TARGETARCH` for non-AMD64 architectures. Docker will automatically set the environment variable to the value specified by `–platform`. With Buildah, the `–build-arg` will need to be used for the purpose. For more information, see link:https://sdk.operatorframework.io/docs/advanced-topics/multi-arch/#supporting-multiple-architectures[Multiple Architectures]. -==== - -.. Push the image to a repository: -+ -[source,terminal] ----- -$ make docker-push IMG=//: ----- - -. Create your Operator bundle manifest by running the `make bundle` command, which invokes several commands, including the Operator SDK `generate bundle` and `bundle validate` subcommands: -+ -[source,terminal] ----- -$ make bundle IMG=//: ----- -+ -Bundle manifests for an Operator describe how to display, create, and manage an application. The `make bundle` command creates the following files and directories in your Operator project: -+ --- -* A bundle manifests directory named `bundle/manifests` that contains a `ClusterServiceVersion` object -* A bundle metadata directory named `bundle/metadata` -* All custom resource definitions (CRDs) in a `config/crd` directory -* A Dockerfile `bundle.Dockerfile` --- -+ -These files are then automatically validated by using `operator-sdk bundle validate` to ensure the on-disk bundle representation is correct. - -. Build and push your bundle image by running the following commands. OLM consumes Operator bundles using an index image, which reference one or more bundle images. - -.. Build the bundle image. Set `BUNDLE_IMG` with the details for the registry, user namespace, and image tag where you intend to push the image: -+ -[source,terminal] ----- -$ make bundle-build BUNDLE_IMG=//: ----- - -.. Push the bundle image: -+ -[source,terminal] ----- -$ docker push //: ----- - -ifeval::["{context}" == "osdk-golang-tutorial"] -:!golang: -endif::[] -ifeval::["{context}" == "osdk-working-bundle-images"] -:!golang: -endif::[] diff --git a/modules/osdk-bundle-upgrade-olm.adoc b/modules/osdk-bundle-upgrade-olm.adoc deleted file mode 100644 index eeb78f9575..0000000000 --- a/modules/osdk-bundle-upgrade-olm.adoc +++ /dev/null @@ -1,82 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-working-bundle-images.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-bundle-upgrade-olm_{context}"] -= Testing an Operator upgrade on Operator Lifecycle Manager - -You can quickly test upgrading your Operator by using Operator Lifecycle Manager (OLM) integration in the Operator SDK, without requiring you to manually manage index images and catalog sources. - -The `run bundle-upgrade` subcommand automates triggering an installed Operator to upgrade to a later version by specifying a bundle image for the later version. - -.Prerequisites - -- Operator installed with OLM either by using the `run bundle` subcommand or with traditional OLM installation -- A bundle image that represents a later version of the installed Operator - -.Procedure - -. If your Operator has not already been installed with OLM, install the earlier version either by using the `run bundle` subcommand or with traditional OLM installation. -+ -[NOTE] -==== -If the earlier version of the bundle was installed traditionally using OLM, the newer bundle that you intend to upgrade to must not exist in the index image referenced by the catalog source. Otherwise, running the `run bundle-upgrade` subcommand will cause the registry pod to fail because the newer bundle is already referenced by the index that provides the package and cluster service version (CSV). -==== -+ -For example, you can use the following `run bundle` subcommand for a Memcached Operator by specifying the earlier bundle image: -+ -[source,terminal] ----- -$ operator-sdk run bundle //memcached-operator:v0.0.1 ----- -+ -.Example output -[source,terminal] ----- -INFO[0006] Creating a File-Based Catalog of the bundle "quay.io/demo/memcached-operator:v0.0.1" -INFO[0008] Generated a valid File-Based Catalog -INFO[0012] Created registry pod: quay-io-demo-memcached-operator-v1-0-1 -INFO[0012] Created CatalogSource: memcached-operator-catalog -INFO[0012] OperatorGroup "operator-sdk-og" created -INFO[0012] Created Subscription: memcached-operator-v0-0-1-sub -INFO[0015] Approved InstallPlan install-h9666 for the Subscription: memcached-operator-v0-0-1-sub -INFO[0015] Waiting for ClusterServiceVersion "my-project/memcached-operator.v0.0.1" to reach 'Succeeded' phase -INFO[0015] Waiting for ClusterServiceVersion ""my-project/memcached-operator.v0.0.1" to appear -INFO[0026] Found ClusterServiceVersion "my-project/memcached-operator.v0.0.1" phase: Pending -INFO[0028] Found ClusterServiceVersion "my-project/memcached-operator.v0.0.1" phase: Installing -INFO[0059] Found ClusterServiceVersion "my-project/memcached-operator.v0.0.1" phase: Succeeded -INFO[0059] OLM has successfully installed "memcached-operator.v0.0.1" ----- - -. Upgrade the installed Operator by specifying the bundle image for the later Operator version: -+ -[source,terminal] ----- -$ operator-sdk run bundle-upgrade //memcached-operator:v0.0.2 ----- -+ -.Example output -[source,terminal] ----- -INFO[0002] Found existing subscription with name memcached-operator-v0-0-1-sub and namespace my-project -INFO[0002] Found existing catalog source with name memcached-operator-catalog and namespace my-project -INFO[0008] Generated a valid Upgraded File-Based Catalog -INFO[0009] Created registry pod: quay-io-demo-memcached-operator-v0-0-2 -INFO[0009] Updated catalog source memcached-operator-catalog with address and annotations -INFO[0010] Deleted previous registry pod with name "quay-io-demo-memcached-operator-v0-0-1" -INFO[0041] Approved InstallPlan install-gvcjh for the Subscription: memcached-operator-v0-0-1-sub -INFO[0042] Waiting for ClusterServiceVersion "my-project/memcached-operator.v0.0.2" to reach 'Succeeded' phase -INFO[0019] Found ClusterServiceVersion "my-project/memcached-operator.v0.0.2" phase: Pending -INFO[0042] Found ClusterServiceVersion "my-project/memcached-operator.v0.0.2" phase: InstallReady -INFO[0043] Found ClusterServiceVersion "my-project/memcached-operator.v0.0.2" phase: Installing -INFO[0044] Found ClusterServiceVersion "my-project/memcached-operator.v0.0.2" phase: Succeeded -INFO[0044] Successfully upgraded to "memcached-operator.v0.0.2" ----- - -. Clean up the installed Operators: -+ -[source,terminal] ----- -$ operator-sdk cleanup memcached-operator ----- diff --git a/modules/osdk-bundle-validate-about.adoc b/modules/osdk-bundle-validate-about.adoc deleted file mode 100644 index 621a147e46..0000000000 --- a/modules/osdk-bundle-validate-about.adoc +++ /dev/null @@ -1,65 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-bundle-validate.adoc - -:_mod-docs-content-type: CONCEPT -[id="osdk-about-bundle-validate_{context}"] -= About the bundle validate command - -While the Operator SDK `scorecard` command can run tests on your Operator based on a configuration file and test images, the `bundle validate` subcommand can validate local bundle directories and remote bundle images for content and structure. - -.`bundle validate` command syntax -[source,terminal] ----- -$ operator-sdk bundle validate ----- - -[NOTE] -==== -The `bundle validate` command runs automatically when you build your bundle using the `make bundle` command. -==== - -Bundle images are pulled from a remote registry and built locally before they are validated. Local bundle directories must contain Operator metadata and manifests. The bundle metadata and manifests must have a structure similar to the following bundle layout: - -.Example bundle layout -[source,terminal] ----- -./bundle - ├── manifests - │   ├── cache.my.domain_memcacheds.yaml - │   └── memcached-operator.clusterserviceversion.yaml - └── metadata - └── annotations.yaml ----- - -Bundle tests pass validation and finish with an exit code of `0` if no errors are detected. - -.Example output -[source,terminal] ----- -INFO[0000] All validation tests have completed successfully ----- - -Tests fail validation and finish with an exit code of `1` if errors are detected. - -.Example output -[source,terminal] ----- -ERRO[0000] Error: Value cache.example.com/v1alpha1, Kind=Memcached: CRD "cache.example.com/v1alpha1, Kind=Memcached" is present in bundle "" but not defined in CSV ----- - -Bundle tests that result in warnings can still pass validation with an exit code of `0` as long as no errors are detected. Tests only fail on errors. - -.Example output -[source,terminal] ----- -WARN[0000] Warning: Value : (memcached-operator.v0.0.1) annotations not found -INFO[0000] All validation tests have completed successfully ----- - -For further information about the `bundle validate` subcommand, run: - -[source,terminal] ----- -$ operator-sdk bundle validate -h ----- diff --git a/modules/osdk-bundle-validate-run.adoc b/modules/osdk-bundle-validate-run.adoc deleted file mode 100644 index 057c3f4f1f..0000000000 --- a/modules/osdk-bundle-validate-run.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-bundle-validate.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-bundle-validate-run_{context}"] -= Running the bundle validate command - -The default validator runs a test every time you enter the `bundle validate` command. You can run optional validators using the `--select-optional` flag. Optional validators run tests in addition to the default test. - -.Prerequisites - -* Operator project generated by using the Operator SDK - -.Procedure - -. If you want to run the default validator against a local bundle directory, enter the following command from your Operator project directory: -+ -[source,terminal] ----- -$ operator-sdk bundle validate ./bundle ----- - -. If you want to run the default validator against a remote Operator bundle image, enter the following command: -+ -[source,terminal] ----- -$ operator-sdk bundle validate \ - /: ----- -+ -where: - -:: Specifies the registry where the bundle is hosted, such as `quay.io/example`. -:: Specifies the name of the bundle image, such as `memcached-operator`. -:: Specifies the tag of the bundle image, such as `v{osdk_ver}`. -+ -[NOTE] -==== -If you want to validate an Operator bundle image, you must host your image in a remote registry. The Operator SDK pulls the image and builds it locally before running tests. The `bundle validate` command does not support testing local bundle images. -==== - -. If you want to run an additional validator against an Operator bundle, enter the following command: -+ -[source,terminal] ----- -$ operator-sdk bundle validate \ - \ - --select-optional ----- -+ -where: - -:: Specifies the local bundle directory or remote bundle image, such as `~/projects/memcached` or `quay.io/example/memcached-operator:v{osdk_ver}`. -:: Specifies the name of the validator you want to run, such as `name=good-practices`. -+ -.Example output -[source,terminal] ----- -ERRO[0000] Error: Value apiextensions.k8s.io/v1, Kind=CustomResource: unsupported media type registry+v1 for bundle object -WARN[0000] Warning: Value k8sevent.v0.0.1: owned CRD "k8sevents.k8s.k8sevent.com" has an empty description ----- diff --git a/modules/osdk-bundle-validate-tests.adoc b/modules/osdk-bundle-validate-tests.adoc deleted file mode 100644 index e90b0e7df4..0000000000 --- a/modules/osdk-bundle-validate-tests.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-bundle-validate.adoc - -:_mod-docs-content-type: REFERENCE -[id="osdk-bundle-validate-tests_{context}"] -= Built-in bundle validate tests - -The Operator SDK ships with pre-defined validators arranged into suites. If you run the `bundle validate` command without specifying a validator, the default test runs. The default test verifies that a bundle adheres to the specifications defined by the Operator Framework community. For more information, see "Bundle format". - -You can run optional validators to test for issues such as OperatorHub compatibility or deprecated Kubernetes APIs. Optional validators always run in addition to the default test. - -.`bundle validate` command syntax for optional test suites -[source,terminal] ----- -$ operator-sdk bundle validate - --select-optional ----- - -[id="osdk-bundle-validate-additional-tests_{context}"] -.Addtional `bundle validate` validators -[cols="3,7,3",options="header"] -|=== -|Name |Description |Label - -|Operator Framework -|This validator tests an Operator bundle against the entire suite of validators provided by the Operator Framework. -|`suite=operatorframework` - -|OperatorHub -|This validator tests an Operator bundle for compatibility with OperatorHub. -|`name=operatorhub` - -|Good Practices -|This validator tests whether an Operator bundle complies with good practices as defined by the Operator Framework. It checks for issues, such as an empty CRD description or unsupported Operator Lifecycle Manager (OLM) resources. -|`name=good-practices` -|=== diff --git a/modules/osdk-cli-ref-bundle.adoc b/modules/osdk-cli-ref-bundle.adoc deleted file mode 100644 index 13f66844cd..0000000000 --- a/modules/osdk-cli-ref-bundle.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/osdk/cli-osdk-ref.adoc -// * operators/operator_sdk/osdk-cli-ref.adoc - -[id="osdk-cli-ref-bundle_{context}"] -= bundle - -The `operator-sdk bundle` command manages Operator bundle metadata. - -[id="osdk-cli-ref-bundle-validate_{context}"] -== validate - -The `bundle validate` subcommand validates an Operator bundle. - -.`bundle validate` flags -[options="header",cols="1,3"] -|=== -|Flag |Description - -|`-h`, `--help` -|Help output for the `bundle validate` subcommand. - -|`--index-builder` (string) -|Tool to pull and unpack bundle images. Only used when validating a bundle image. Available options are `docker`, which is the default, `podman`, or `none`. - -|`--list-optional` -|List all optional validators available. When set, no validators are run. - -|`--select-optional` (string) -|Label selector to select optional validators to run. When run with the `--list-optional` flag, lists available optional validators. - -|=== diff --git a/modules/osdk-cli-ref-cleanup.adoc b/modules/osdk-cli-ref-cleanup.adoc deleted file mode 100644 index d416c1e9e4..0000000000 --- a/modules/osdk-cli-ref-cleanup.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/osdk/cli-osdk-ref.adoc -// * operators/operator_sdk/osdk-cli-ref.adoc - -:_mod-docs-content-type: REFERENCE -[id="osdk-cli-ref-cleanup_{context}"] -= cleanup - -The `operator-sdk cleanup` command destroys and removes resources that were created for an Operator that was deployed with the `run` command. - -.`cleanup` flags -[options="header",cols="1,3"] -|=== -|Flag |Description - -|`-h`, `--help` -|Help output for the `run bundle` subcommand. - -|`--kubeconfig` (string) -|Path to the `kubeconfig` file to use for CLI requests. - -|`-n`, `--namespace` (string) -|If present, namespace in which to run the CLI request. - -|`--timeout ` -|Time to wait for the command to complete before failing. The default value is `2m0s`. - -|=== diff --git a/modules/osdk-cli-ref-completion.adoc b/modules/osdk-cli-ref-completion.adoc deleted file mode 100644 index e8c402509c..0000000000 --- a/modules/osdk-cli-ref-completion.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/osdk/cli-osdk-ref.adoc -// * operators/operator_sdk/osdk-cli-ref.adoc - -[id="osdk-cli-ref-completion_{context}"] -= completion - -The `operator-sdk completion` command generates shell completions to make issuing CLI commands quicker and easier. - -.`completion` subcommands -[options="header",cols="1,3"] -|=== -|Subcommand |Description - -|`bash` -|Generate bash completions. - -|`zsh` -|Generate zsh completions. -|=== - -.`completion` flags -[options="header",cols="1,3"] -|=== -|Flag |Description - -|`-h, --help` -|Usage help output. -|=== - -For example: - -[source,terminal] ----- -$ operator-sdk completion bash ----- - -.Example output -[source,terminal] ----- -# bash completion for operator-sdk -*- shell-script -*- -... -# ex: ts=4 sw=4 et filetype=sh ----- diff --git a/modules/osdk-cli-ref-create.adoc b/modules/osdk-cli-ref-create.adoc deleted file mode 100644 index 6ec86e4e10..0000000000 --- a/modules/osdk-cli-ref-create.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/osdk/cli-osdk-ref.adoc -// * operators/operator_sdk/osdk-cli-ref.adoc - -[id="osdk-cli-ref-create_{context}"] -= create - -The `operator-sdk create` command is used to create, or _scaffold_, a Kubernetes API. - -[id="osdk-cli-ref-create-api_{context}"] -== api - -The `create api` subcommand scaffolds a Kubernetes API. The subcommand must be run in a project that was initialized with the `init` command. - -.`create api` flags -[options="header",cols="1,3"] -|=== -|Flag |Description - -|`-h`, `--help` -|Help output for the `run bundle` subcommand. - -|=== diff --git a/modules/osdk-cli-ref-generate-bundle.adoc b/modules/osdk-cli-ref-generate-bundle.adoc deleted file mode 100644 index a92cadc699..0000000000 --- a/modules/osdk-cli-ref-generate-bundle.adoc +++ /dev/null @@ -1,66 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/osdk/cli-osdk-ref.adoc -// * operators/operator_sdk/osdk-cli-ref.adoc - -[id="osdk-cli-ref-generate-bundle_{context}"] -= bundle - -The `generate bundle` subcommand generates a set of bundle manifests, metadata, and a `bundle.Dockerfile` file for your Operator project. - -[NOTE] -==== -Typically, you run the `generate kustomize manifests` subcommand first to generate the input link:https://kustomize.io/[Kustomize] bases that are used by the `generate bundle` subcommand. However, you can use the `make bundle` command in an initialized project to automate running these commands in sequence. -==== - -.`generate bundle` flags -[options="header",cols="1,3"] -|=== -|Flag |Description - -|`--channels` (string) -|Comma-separated list of channels to which the bundle belongs. The default value is `alpha`. - -|`--crds-dir` (string) -|Root directory for `CustomResoureDefinition` manifests. - -|`--default-channel` (string) -|The default channel for the bundle. - -|`--deploy-dir` (string) -|Root directory for Operator manifests, such as deployments and RBAC. This directory is different from the directory passed to the `--input-dir` flag. - -|`-h`, `--help` -|Help for `generate bundle` - -|`--input-dir` (string) -|Directory from which to read an existing bundle. This directory is the parent of your bundle `manifests` directory and is different from the `--deploy-dir` directory. - -|`--kustomize-dir` (string) -|Directory containing Kustomize bases and a `kustomization.yaml` file for bundle manifests. The default path is `config/manifests`. - -|`--manifests` -|Generate bundle manifests. - -|`--metadata` -|Generate bundle metadata and Dockerfile. - -|`--output-dir` (string) -|Directory to write the bundle to. - -|`--overwrite` -|Overwrite the bundle metadata and Dockerfile if they exist. The default value is `true`. - -|`--package` (string) -|Package name for the bundle. - -|`-q`, `--quiet` -|Run in quiet mode. - -|`--stdout` -|Write bundle manifest to standard out. - -|`--version` (string) -|Semantic version of the Operator in the generated bundle. Set only when creating a new bundle or upgrading the Operator. - -|=== diff --git a/modules/osdk-cli-ref-generate-kustomize.adoc b/modules/osdk-cli-ref-generate-kustomize.adoc deleted file mode 100644 index de4e3efc94..0000000000 --- a/modules/osdk-cli-ref-generate-kustomize.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/osdk/cli-osdk-ref.adoc -// * operators/operator_sdk/osdk-cli-ref.adoc - -[id="osdk-cli-ref-generate-kustomize_{context}"] -= kustomize - -The `generate kustomize` subcommand contains subcommands that generate link:https://kustomize.io/[Kustomize] data for the Operator. - -[id="osdk-cli-ref-generate-kustomize-manifests_{context}"] -== manifests - -The `generate kustomize manifests` subcommand generates or regenerates Kustomize bases and a `kustomization.yaml` file in the `config/manifests` directory, which are used to build bundle manifests by other Operator SDK commands. This command interactively asks for UI metadata, an important component of manifest bases, by default unless a base already exists or you set the `--interactive=false` flag. - -.`generate kustomize manifests` flags -[options="header",cols="1,3"] -|=== -|Flag |Description - -|`--apis-dir` (string) -|Root directory for API type definitions. - -|`-h`, `--help` -|Help for `generate kustomize manifests`. - -|`--input-dir` (string) -|Directory containing existing Kustomize files. - -|`--interactive` -|When set to `false`, if no Kustomize base exists, an interactive command prompt is presented to accept custom metadata. - -|`--output-dir` (string) -|Directory where to write Kustomize files. - -|`--package` (string) -|Package name. - -|`-q`, `--quiet` -|Run in quiet mode. - -|=== diff --git a/modules/osdk-cli-ref-generate.adoc b/modules/osdk-cli-ref-generate.adoc deleted file mode 100644 index f102a98685..0000000000 --- a/modules/osdk-cli-ref-generate.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/osdk/cli-osdk-ref.adoc -// * operators/operator_sdk/osdk-cli-ref.adoc - -[id="osdk-cli-ref-generate_{context}"] -= generate - -The `operator-sdk generate` command invokes a specific generator to generate code or manifests. diff --git a/modules/osdk-cli-ref-init.adoc b/modules/osdk-cli-ref-init.adoc deleted file mode 100644 index 07ae73d2a3..0000000000 --- a/modules/osdk-cli-ref-init.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/osdk/cli-osdk-ref.adoc -// * operators/operator_sdk/osdk-cli-ref.adoc - -[id="osdk-cli-ref-init_{context}"] -= init - -The `operator-sdk init` command initializes an Operator project and generates, or _scaffolds_, a default project directory layout for the given plugin. - -This command writes the following files: - -* Boilerplate license file -* `PROJECT` file with the domain and repository -* `Makefile` to build the project -* `go.mod` file with project dependencies -* `kustomization.yaml` file for customizing manifests -* Patch file for customizing images for manager manifests -* Patch file for enabling Prometheus metrics -* `main.go` file to run - -.`init` flags -[options="header",cols="1,3"] -|=== -|Flag |Description - -|`--help, -h` -|Help output for the `init` command. - -|`--plugins` (string) -|Name and optionally version of the plugin to initialize the project with. Available plugins are `ansible.sdk.operatorframework.io/v1`, `go.kubebuilder.io/v2`, `go.kubebuilder.io/v3`, and `helm.sdk.operatorframework.io/v1`. - -|`--project-version` -|Project version. Available values are `2` and `3-alpha`, which is the default. -|=== diff --git a/modules/osdk-cli-ref-run-bundle-upgrade.adoc b/modules/osdk-cli-ref-run-bundle-upgrade.adoc deleted file mode 100644 index cbba00b680..0000000000 --- a/modules/osdk-cli-ref-run-bundle-upgrade.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/osdk/cli-osdk-ref.adoc -// * operators/operator_sdk/osdk-cli-ref.adoc - -:_mod-docs-content-type: REFERENCE -[id="osdk-cli-ref-run-bundle-upgrade_{context}"] -= bundle-upgrade - -The `run bundle-upgrade` subcommand upgrades an Operator that was previously installed in the bundle format with Operator Lifecycle Manager (OLM). - -.`run bundle-upgrade` flags -[options="header",cols="1,3"] -|=== -|Flag |Description - -|`--timeout ` -|Upgrade timeout. The default value is `2m0s`. - -|`--kubeconfig` (string) -|Path to the `kubeconfig` file to use for CLI requests. - -|`-n`, `--namespace` (string) -|If present, namespace in which to run the CLI request. - -|`--security-context-config ` -|Specifies the security context to use for the catalog pod. Allowed values include `restricted` and `legacy`. The default value is `legacy`. ^[1]^ - -|`-h`, `--help` -|Help output for the `run bundle` subcommand. - -|=== -[.small] --- -1. The `restricted` security context is not compatible with the `default` namespace. To configure your Operator's pod security admission in your production environment, see "Complying with pod security admission". For more information about pod security admission, see "Understanding and managing pod security admission". --- diff --git a/modules/osdk-cli-ref-run-bundle.adoc b/modules/osdk-cli-ref-run-bundle.adoc deleted file mode 100644 index 081ceda0a1..0000000000 --- a/modules/osdk-cli-ref-run-bundle.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/osdk/cli-osdk-ref.adoc -// * operators/operator_sdk/osdk-cli-ref.adoc - -:_mod-docs-content-type: REFERENCE -[id="osdk-cli-ref-run-bundle_{context}"] -= bundle - -The `run bundle` subcommand deploys an Operator in the bundle format with Operator Lifecycle Manager (OLM). - -.`run bundle` flags -[options="header",cols="1,3"] -|=== -|Flag |Description - -|`--index-image` (string) -|Index image in which to inject a bundle. The default image is `quay.io/operator-framework/upstream-opm-builder:latest`. - -|`--install-mode ` -|Install mode supported by the cluster service version (CSV) of the Operator, for example `AllNamespaces` or `SingleNamespace`. - -|`--timeout ` -|Install timeout. The default value is `2m0s`. - -|`--kubeconfig` (string) -|Path to the `kubeconfig` file to use for CLI requests. - -|`-n`, `--namespace` (string) -|If present, namespace in which to run the CLI request. - -|`--security-context-config ` -|Specifies the security context to use for the catalog pod. Allowed values include `restricted` and `legacy`. The default value is `legacy`. ^[1]^ - -|`-h`, `--help` -|Help output for the `run bundle` subcommand. - -|=== -[.small] --- -1. The `restricted` security context is not compatible with the `default` namespace. To configure your Operator's pod security admission in your production environment, see "Complying with pod security admission". For more information about pod security admission, see "Understanding and managing pod security admission". --- diff --git a/modules/osdk-cli-ref-run.adoc b/modules/osdk-cli-ref-run.adoc deleted file mode 100644 index 3263b9c15a..0000000000 --- a/modules/osdk-cli-ref-run.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/osdk/cli-osdk-ref.adoc -// * operators/operator_sdk/osdk-cli-ref.adoc - -[id="osdk-cli-ref-run_{context}"] -= run - -The `operator-sdk run` command provides options that can launch the Operator in various environments. diff --git a/modules/osdk-cli-ref-scorecard.adoc b/modules/osdk-cli-ref-scorecard.adoc deleted file mode 100644 index eb427a71a0..0000000000 --- a/modules/osdk-cli-ref-scorecard.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/osdk/cli-osdk-ref.adoc -// * operators/operator_sdk/osdk-cli-ref.adoc - -[id="osdk-cli-ref-scorecard_{context}"] -= scorecard - -The `operator-sdk scorecard` command runs the scorecard tool to validate an Operator bundle and provide suggestions for improvements. The command takes one argument, either a bundle image or directory containing manifests and metadata. If the argument holds an image tag, the image must be present remotely. - -.`scorecard` flags -[options="header",cols="1,3"] -|=== -|Flag |Description - -|`-c`, `--config` (string) -|Path to scorecard configuration file. The default path is `bundle/tests/scorecard/config.yaml`. - -|`-h`, `--help` -|Help output for the `scorecard` command. - -|`--kubeconfig` (string) -|Path to `kubeconfig` file. - -|`-L`, `--list` -|List which tests are available to run. - -|`-n`, --namespace (string) -|Namespace in which to run the test images. - -|`-o`, `--output` (string) -|Output format for results. Available values are `text`, which is the default, and `json`. - -|`--pod-security ` -|Option to run scorecard with the specified security context. Allowed values include `restricted` and `legacy`. The default value is `legacy`. ^[1]^ - -|`-l`, `--selector` (string) -|Label selector to determine which tests are run. - -|`-s`, `--service-account` (string) -|Service account to use for tests. The default value is `default`. - -|`-x`, `--skip-cleanup` -|Disable resource cleanup after tests are run. - -|`-w`, `--wait-time ` -|Seconds to wait for tests to complete, for example `35s`. The default value is `30s`. - -|=== -[.small] --- -1. The `restricted` security context is not compatible with the `default` namespace. To configure your Operator's pod security admission in your production environment, see "Complying with pod security admission". For more information about pod security admission, see "Understanding and managing pod security admission". --- diff --git a/modules/osdk-common-prereqs.adoc b/modules/osdk-common-prereqs.adoc deleted file mode 100644 index a02d83933e..0000000000 --- a/modules/osdk-common-prereqs.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/golang/osdk-golang-quickstart.adoc -// * operators/operator_sdk/golang/osdk-golang-tutorial.adoc -// * operators/operator_sdk/ansible/osdk-ansible-quickstart.adoc -// * operators/operator_sdk/ansible/osdk-ansible-tutorial.adoc -// * operators/operator_sdk/helm/osdk-helm-quickstart.adoc -// * operators/operator_sdk/helm/osdk-helm-tutorial.adoc -// * operators/operator_sdk/osdk-working-bundle-images.adoc - -ifeval::["{context}" == "osdk-ansible-quickstart"] -:ansible: -endif::[] -ifeval::["{context}" == "osdk-ansible-tutorial"] -:ansible: -endif::[] -ifeval::["{context}" == "osdk-golang-quickstart"] -:golang: -endif::[] -ifeval::["{context}" == "osdk-golang-tutorial"] -:golang: -endif::[] - -[id="osdk-common-prereqs_{context}"] -= Prerequisites - -* Operator SDK CLI installed -* OpenShift CLI (`oc`) {product-version}+ installed -ifdef::golang[] -* link:https://golang.org/dl/[Go] 1.21+ -endif::[] -ifdef::ansible[] -* link:https://docs.ansible.com/ansible/latest/roadmap/ROADMAP_2_15.html[Ansible] 2.15.0 -* link:https://ansible-runner.readthedocs.io/en/latest/install.html[Ansible Runner] 2.3.3+ -* link:https://github.com/ansible/ansible-runner-http[Ansible Runner HTTP Event Emitter plugin] 1.0.0+ -* link:https://www.python.org/downloads/[Python] 3.9+ -* link:https://pypi.org/project/kubernetes/[Python Kubernetes client] -endif::[] -ifndef::openshift-dedicated,openshift-rosa[] -* Logged into an {product-title} {product-version} cluster with `oc` with an account that has `cluster-admin` permissions -endif::openshift-dedicated,openshift-rosa[] -ifdef::openshift-dedicated,openshift-rosa[] -* Logged into an {product-title} cluster with `oc` with an account that has `dedicated-admin` permissions -endif::openshift-dedicated,openshift-rosa[] -* To allow the cluster to pull the image, the repository where you push your image must be set as public, or you must configure an image pull secret - -ifeval::["{context}" == "osdk-ansible-quickstart"] -:!ansible: -endif::[] -ifeval::["{context}" == "osdk-ansible-tutorial"] -:!ansible: -endif::[] -ifeval::["{context}" == "osdk-golang-quickstart"] -:!golang: -endif::[] -ifeval::["{context}" == "osdk-golang-tutorial"] -:!golang: -endif::[] diff --git a/modules/osdk-control-compat.adoc b/modules/osdk-control-compat.adoc deleted file mode 100644 index d4d2ba230e..0000000000 --- a/modules/osdk-control-compat.adoc +++ /dev/null @@ -1,90 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-working-bundle-images.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-control-compat_{context}"] -= Controlling Operator compatibility with {product-title} versions - -[IMPORTANT] -==== -Kubernetes periodically deprecates certain APIs that are removed in subsequent releases. If your Operator is using a deprecated API, it might no longer work after the {product-title} cluster is upgraded to the Kubernetes version where the API has been removed. - -As an Operator author, it is strongly recommended that you review the link:https://kubernetes.io/docs/reference/using-api/deprecation-guide/[Deprecated API Migration Guide] in Kubernetes documentation and keep your Operator projects up to date to avoid using deprecated and removed APIs. Ideally, you should update your Operator before the release of a future version of {product-title} that would make the Operator incompatible. -==== - -When an API is removed from an {product-title} version, Operators running on that cluster version that are still using removed APIs will no longer work properly. As an Operator author, you should plan to update your Operator projects to accommodate API deprecation and removal to avoid interruptions for users of your Operator. - -[TIP] -==== -You can check the event alerts of your Operators to find whether there are any warnings about APIs currently in use. The following alerts fire when they detect an API in use that will be removed in the next release: - -`APIRemovedInNextReleaseInUse`:: -APIs that will be removed in the next {product-title} release. - -`APIRemovedInNextEUSReleaseInUse`:: -APIs that will be removed in the next {product-title} link:https://access.redhat.com/support/policy/updates/openshift#ocp4_phases[Extended Update Support (EUS)] release. -==== - -If a cluster administrator has installed your Operator, before they upgrade to the next version of {product-title}, they must ensure a version of your Operator is installed that is compatible with that next cluster version. While it is recommended that you update your Operator projects to no longer use deprecated or removed APIs, if you still need to publish your Operator bundles with removed APIs for continued use on earlier versions of {product-title}, ensure that the bundle is configured accordingly. - -The following procedure helps prevent administrators from installing versions of your Operator on an incompatible version of {product-title}. These steps also prevent administrators from upgrading to a newer version of {product-title} that is incompatible with the version of your Operator that is currently installed on their cluster. - -This procedure is also useful when you know that the current version of your Operator will not work well, for any reason, on a specific {product-title} version. By defining the cluster versions where the Operator should be distributed, you ensure that the Operator does not appear in a catalog of a cluster version which is outside of the allowed range. - -[IMPORTANT] -==== -Operators that use deprecated APIs can adversely impact critical workloads when cluster administrators upgrade to a future version of {product-title} where the API is no longer supported. If your Operator is using deprecated APIs, you should configure the following settings in your Operator project as soon as possible. -==== - -.Prerequisites - -- An existing Operator project - -.Procedure - -. If you know that a specific bundle of your Operator is not supported and will not work correctly on {product-title} later than a certain cluster version, configure the maximum version of {product-title} that your Operator is compatible with. In your Operator project's cluster service version (CSV), set the `olm.maxOpenShiftVersion` annotation to prevent administrators from upgrading their cluster before upgrading the installed Operator to a compatible version: -+ -[IMPORTANT] -==== -You must use `olm.maxOpenShiftVersion` annotation only if your Operator bundle version cannot work in later versions. Be aware that cluster admins cannot upgrade their clusters with your solution installed. If you do not provide later version and a valid upgrade path, administrators may uninstall your Operator and can upgrade the cluster version. -==== -+ -.Example CSV with `olm.maxOpenShiftVersion` annotation -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: ClusterServiceVersion -metadata: - annotations: - "olm.properties": '[{"type": "olm.maxOpenShiftVersion", "value": ""}]' <1> ----- -<1> Specify the maximum cluster version of {product-title} that your Operator is compatible with. For example, setting `value` to `4.9` prevents cluster upgrades to {product-title} versions later than 4.9 when this bundle is installed on a cluster. - -. If your bundle is intended for distribution in a Red Hat-provided Operator catalog, configure the compatible versions of {product-title} for your Operator by setting the following properties. This configuration ensures your Operator is only included in catalogs that target compatible versions of {product-title}: -+ -[NOTE] -==== -This step is only valid when publishing Operators in Red Hat-provided catalogs. If your bundle is only intended for distribution in a custom catalog, you can skip this step. For more details, see "Red Hat-provided Operator catalogs". -==== - -.. Set the `com.redhat.openshift.versions` annotation in your project's `bundle/metadata/annotations.yaml` file: -+ -.Example `bundle/metadata/annotations.yaml` file with compatible versions -[source,yaml] ----- -com.redhat.openshift.versions: "v4.7-v4.9" <1> ----- -<1> Set to a range or single version. - -.. To prevent your bundle from being carried on to an incompatible version of {product-title}, ensure that the index image is generated with the proper `com.redhat.openshift.versions` label in your Operator's bundle image. For example, if your project was generated using the Operator SDK, update the `bundle.Dockerfile` file: -+ -.Example `bundle.Dockerfile` with compatible versions -+ -[source,yaml] ----- -LABEL com.redhat.openshift.versions="" <1> ----- -<1> Set to a range or single version, for example, `v4.7-v4.9`. This setting defines the cluster versions where the Operator should be distributed, and the Operator does not appear in a catalog of a cluster version which is outside of the range. - -You can now bundle a new version of your Operator and publish the updated version to a catalog for distribution. diff --git a/modules/osdk-crd-templates.adoc b/modules/osdk-crd-templates.adoc deleted file mode 100644 index 0403a197a0..0000000000 --- a/modules/osdk-crd-templates.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-generating-csvs.adoc - -[id="osdk-crds-templates_{context}"] -= CRD templates - -Users of your Operator must be made aware of which options are required versus optional. You can provide templates for each of your custom resource definitions (CRDs) with a minimum set of configuration as an annotation named `alm-examples`. Compatible UIs will pre-fill this template for users to further customize. - -The annotation consists of a list of the kind, for example, the CRD name and the corresponding `metadata` and `spec` of the Kubernetes object. - -The following full example provides templates for `EtcdCluster`, `EtcdBackup` and `EtcdRestore`: - -[source,yaml] ----- -metadata: - annotations: - alm-examples: >- - [{"apiVersion":"etcd.database.coreos.com/v1beta2","kind":"EtcdCluster","metadata":{"name":"example","namespace":""},"spec":{"size":3,"version":"3.2.13"}},{"apiVersion":"etcd.database.coreos.com/v1beta2","kind":"EtcdRestore","metadata":{"name":"example-etcd-cluster"},"spec":{"etcdCluster":{"name":"example-etcd-cluster"},"backupStorageType":"S3","s3":{"path":"","awsSecret":""}}},{"apiVersion":"etcd.database.coreos.com/v1beta2","kind":"EtcdBackup","metadata":{"name":"example-etcd-cluster-backup"},"spec":{"etcdEndpoints":[""],"storageType":"S3","s3":{"path":"","awsSecret":""}}}] ----- diff --git a/modules/osdk-crds.adoc b/modules/osdk-crds.adoc deleted file mode 100644 index cf28460296..0000000000 --- a/modules/osdk-crds.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-generating-csvs.adoc - -:_mod-docs-content-type: CONCEPT -[id="osdk-crds_{context}"] -= Understanding your custom resource definitions (CRDs) - -There are two types of custom resource definitions (CRDs) that your Operator can use: ones that are _owned_ by it and ones that it depends on, which are _required_. diff --git a/modules/osdk-create-cr.adoc b/modules/osdk-create-cr.adoc deleted file mode 100644 index 16522eea77..0000000000 --- a/modules/osdk-create-cr.adoc +++ /dev/null @@ -1,244 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/golang/osdk-golang-tutorial.adoc -// * operators/operator_sdk/ansible/osdk-ansible-tutorial.adoc -// * operators/operator_sdk/helm/osdk-helm-tutorial.adoc - -ifeval::["{context}" == "osdk-golang-tutorial"] -:golang: -:app-proper: Memcached -:app: memcached -:group: cache -endif::[] -ifeval::["{context}" == "osdk-ansible-tutorial"] -:ansible: -:app-proper: Memcached -:app: memcached -:group: cache -endif::[] -ifeval::["{context}" == "osdk-helm-tutorial"] -:helm: -:app-proper: Nginx -:app: nginx -:group: demo -endif::[] - -:_mod-docs-content-type: PROCEDURE -[id="osdk-create-cr_{context}"] -= Creating a custom resource - -After your Operator is installed, you can test it by creating a custom resource (CR) that is now provided on the cluster by the Operator. - -.Prerequisites - -* Example {app-proper} Operator, which provides the `{app-proper}` CR, installed on a cluster - -.Procedure - -. Change to the namespace where your Operator is installed. For example, if you deployed the Operator using the `make deploy` command: -+ -[source,terminal,subs="attributes+"] ----- -$ oc project {app}-operator-system ----- - -. Edit the sample `{app-proper}` CR manifest at `config/samples/{group}_v1_{app}.yaml` to contain the following specification: -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: {group}.example.com/v1 -kind: {app-proper} -metadata: - name: {app}-sample -... -spec: -... -ifdef::helm[] - replicaCount: 3 -endif::[] -ifndef::helm[] - size: 3 -endif::[] ----- - -ifdef::helm[] -. The {app-proper} service account requires privileged access to run in {product-title}. Add the following security context constraint (SCC) to the service account for the `{app}-sample` pod: -+ -[source,terminal,subs="attributes+"] ----- -$ oc adm policy add-scc-to-user \ - anyuid system:serviceaccount:{app}-operator-system:{app}-sample ----- -endif::[] - -. Create the CR: -+ -[source,terminal,subs="attributes+"] ----- -$ oc apply -f config/samples/{group}_v1_{app}.yaml ----- - -. Ensure that the `{app-proper}` Operator creates the deployment for the sample CR with the correct size: -+ -[source,terminal] ----- -$ oc get deployments ----- -+ -.Example output -[source,terminal] -ifdef::helm[] ----- -NAME READY UP-TO-DATE AVAILABLE AGE -nginx-operator-controller-manager 1/1 1 1 8m -nginx-sample 3/3 3 3 1m ----- -endif::[] -ifndef::helm[] ----- -NAME READY UP-TO-DATE AVAILABLE AGE -memcached-operator-controller-manager 1/1 1 1 8m -memcached-sample 3/3 3 3 1m ----- -endif::[] - -. Check the pods and CR status to confirm the status is updated with the {app-proper} pod names. - -.. Check the pods: -+ -[source,terminal] ----- -$ oc get pods ----- -+ -.Example output -[source,terminal] -ifdef::helm[] ----- -NAME READY STATUS RESTARTS AGE -nginx-sample-6fd7c98d8-7dqdr 1/1 Running 0 1m -nginx-sample-6fd7c98d8-g5k7v 1/1 Running 0 1m -nginx-sample-6fd7c98d8-m7vn7 1/1 Running 0 1m ----- -endif::[] -ifndef::helm[] ----- -NAME READY STATUS RESTARTS AGE -memcached-sample-6fd7c98d8-7dqdr 1/1 Running 0 1m -memcached-sample-6fd7c98d8-g5k7v 1/1 Running 0 1m -memcached-sample-6fd7c98d8-m7vn7 1/1 Running 0 1m ----- -endif::[] - -.. Check the CR status: -+ -[source,terminal,subs="attributes+"] ----- -$ oc get {app}/{app}-sample -o yaml ----- -+ -.Example output -[source,yaml,subs="attributes+"] ----- -apiVersion: {group}.example.com/v1 -kind: {app-proper} -metadata: -... - name: {app}-sample -... -spec: -ifdef::helm[] - replicaCount: 3 -endif::[] -ifndef::helm[] - size: 3 -endif::[] -status: - nodes: - - {app}-sample-6fd7c98d8-7dqdr - - {app}-sample-6fd7c98d8-g5k7v - - {app}-sample-6fd7c98d8-m7vn7 ----- - -. Update the deployment size. - -.. Update `config/samples/{group}_v1_{app}.yaml` file to change the `spec.size` field in the `{app-proper}` CR from `3` to `5`: -+ -[source,terminal,subs="attributes+"] ----- -$ oc patch {app} {app}-sample \ -ifdef::helm[] - -p '{"spec":{"replicaCount": 5}}' \ -endif::[] -ifndef::helm[] - -p '{"spec":{"size": 5}}' \ -endif::[] - --type=merge ----- - -.. Confirm that the Operator changes the deployment size: -+ -[source,terminal] ----- -$ oc get deployments ----- -+ -.Example output -[source,terminal] -ifdef::helm[] ----- -NAME READY UP-TO-DATE AVAILABLE AGE -nginx-operator-controller-manager 1/1 1 1 10m -nginx-sample 5/5 5 5 3m ----- -endif::[] -ifndef::helm[] ----- -NAME READY UP-TO-DATE AVAILABLE AGE -memcached-operator-controller-manager 1/1 1 1 10m -memcached-sample 5/5 5 5 3m ----- -endif::[] - -. Delete the CR by running the following command: -+ -[source,terminal,subs="attributes+"] ----- -$ oc delete -f config/samples/{group}_v1_{app}.yaml ----- - -. Clean up the resources that have been created as part of this tutorial. - -* If you used the `make deploy` command to test the Operator, run the following command: -+ -[source,terminal] ----- -$ make undeploy ----- - -* If you used the `operator-sdk run bundle` command to test the Operator, run the following command: -+ -[source,terminal] ----- -$ operator-sdk cleanup ----- - - -ifeval::["{context}" == "osdk-golang-tutorial"] -:!golang: -:!app-proper: -:!app: -:!group: -endif::[] -ifeval::["{context}" == "osdk-ansible-tutorial"] -:!ansible: -:!app-proper: -:!app: -:!group: -endif::[] -ifeval::["{context}" == "osdk-helm-tutorial"] -:!helm: -:!app-proper: -:!app: -:!group: -endif::[] diff --git a/modules/osdk-create-project.adoc b/modules/osdk-create-project.adoc deleted file mode 100644 index cbff19fe7a..0000000000 --- a/modules/osdk-create-project.adoc +++ /dev/null @@ -1,119 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/golang/osdk-golang-tutorial.adoc -// * operators/operator_sdk/ansible/osdk-ansible-tutorial.adoc -// * operators/operator_sdk/helm/osdk-helm-tutorial.adoc - -ifeval::["{context}" == "osdk-golang-tutorial"] -:golang: -:type: Go -:app: memcached -endif::[] -ifeval::["{context}" == "osdk-ansible-tutorial"] -:ansible: -:type: Ansible -:app: memcached -endif::[] -ifeval::["{context}" == "osdk-helm-tutorial"] -:helm: -:type: Helm -:app: nginx -endif::[] - -:_mod-docs-content-type: PROCEDURE -[id="osdk-create-project_{context}"] -= Creating a project - -Use the Operator SDK CLI to create a project called `{app}-operator`. - -.Procedure - -. Create a directory for the project: -+ -[source,terminal,subs="attributes+"] ----- -$ mkdir -p $HOME/projects/{app}-operator ----- - -. Change to the directory: -+ -[source,terminal,subs="attributes+"] ----- -$ cd $HOME/projects/{app}-operator ----- - -ifdef::golang[] -. Activate support for Go modules: -+ -[source,terminal] ----- -$ export GO111MODULE=on ----- -endif::[] - -. Run the `operator-sdk init` command -ifdef::ansible[] -with the `ansible` plugin -endif::[] -ifdef::helm[] -with the `helm` plugin -endif::[] -to initialize the project: -+ -[source,terminal,subs="attributes+"] -ifdef::golang[] ----- -$ operator-sdk init \ - --domain=example.com \ - --repo=github.com/example-inc/{app}-operator ----- -+ -[NOTE] -==== -The `operator-sdk init` command uses the Go plugin by default. -==== -+ -The `operator-sdk init` command generates a `go.mod` file to be used with link:https://golang.org/ref/mod[Go modules]. The `--repo` flag is required when creating a project outside of `$GOPATH/src/`, because generated files require a valid module path. -endif::[] -ifdef::ansible[] ----- -$ operator-sdk init \ - --plugins=ansible \ - --domain=example.com ----- -endif::[] -ifdef::helm[] ----- -$ operator-sdk init \ - --plugins=helm \ - --domain=example.com \ - --group=demo \ - --version=v1 \ - --kind=Nginx ----- -+ -[NOTE] -==== -By default, the `helm` plugin initializes a project using a boilerplate Helm chart. You can use additional flags, such as the `--helm-chart` flag, to initialize a project using an existing Helm chart. -==== -+ -The `init` command creates the `nginx-operator` project specifically for watching a resource with API version `example.com/v1` and kind `Nginx`. - -. For Helm-based projects, the `init` command generates the RBAC rules in the `config/rbac/role.yaml` file based on the resources that would be deployed by the default manifest for the chart. Verify that the rules generated in this file meet the permission requirements of the Operator. -endif::[] - -ifeval::["{context}" == "osdk-golang-tutorial"] -:!golang: -:!type: -:!app: -endif::[] -ifeval::["{context}" == "osdk-ansible-tutorial"] -:!ansible: -:!type: -:!app: -endif::[] -ifeval::["{context}" == "osdk-helm-tutorial"] -:!helm: -:!type: -:!app: -endif::[] diff --git a/modules/osdk-csv-annotations-dep.adoc b/modules/osdk-csv-annotations-dep.adoc deleted file mode 100644 index e06b5a3351..0000000000 --- a/modules/osdk-csv-annotations-dep.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-generating-csvs.adoc - -[id="osdk-csv-manual-annotations-deprecated_{context}"] -= Deprecated infrastructure feature annotations - -Starting in {product-title} 4.14, the `operators.openshift.io/infrastructure-features` group of annotations are deprecated by the group of annotations with the `features.operators.openshift.io` namespace. While you are encouraged to use the newer annotations, both groups are currently accepted when used in parallel. - -These annotations detail the infrastructure features that an Operator supports. Users can view and filter by these features when discovering Operators through OperatorHub in the web console or on the link:https://catalog.redhat.com/software/search?deployed_as=Operator[Red Hat Ecosystem Catalog]. - -.Deprecated `operators.openshift.io/infrastructure-features` annotations -[cols="2a,4a",options="header"] -|=== -|Valid annotation values |Description - -|`disconnected` -|Operator supports being mirrored into disconnected catalogs, including all dependencies, and does not require internet access. All related images required for mirroring are listed by the Operator. - -|`cnf` -|Operator provides a Cloud-native Network Functions (CNF) Kubernetes plugin. - -|`cni` -|Operator provides a Container Network Interface (CNI) Kubernetes plugin. - -|`csi` -|Operator provides a Container Storage Interface (CSI) Kubernetes plugin. - -|`fips` -|Operator accepts the FIPS mode of the underlying platform and works on nodes that are booted into FIPS mode. - -[IMPORTANT] -==== -When running {op-system-base-full} or {op-system-first} booted in FIPS mode, {product-title} core components use the {op-system-base} cryptographic libraries that have been submitted to NIST for FIPS 140-2/140-3 Validation on only the x86_64, ppc64le, and s390x architectures. -==== - -|`proxy-aware` -|Operator supports running on a cluster behind a proxy. Operator accepts the standard proxy environment variables `HTTP_PROXY` and `HTTPS_PROXY`, which Operator Lifecycle Manager (OLM) provides to the Operator automatically when the cluster is configured to use a proxy. Required environment variables are passed down to Operands for managed workloads. -|=== - -.Example CSV with `disconnected` and `proxy-aware` support -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: ClusterServiceVersion -metadata: - annotations: - operators.openshift.io/infrastructure-features: '["disconnected", "proxy-aware"]' ----- diff --git a/modules/osdk-csv-annotations-infra.adoc b/modules/osdk-csv-annotations-infra.adoc deleted file mode 100644 index 002a78ca84..0000000000 --- a/modules/osdk-csv-annotations-infra.adoc +++ /dev/null @@ -1,81 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-generating-csvs.adoc - -:_mod-docs-content-type: REFERENCE -[id="osdk-csv-annotations-infra_{context}"] -= Infrastructure features annotations - -Annotations in the `features.operators.openshift.io` group detail the infrastructure features that an Operator might support, specified by setting a `"true"` or `"false"` value. Users can view and filter by these features when discovering Operators through OperatorHub in the web console or on the link:https://catalog.redhat.com/software/search?deployed_as=Operator[Red Hat Ecosystem Catalog]. These annotations are supported in {product-title} 4.10 and later. - -[IMPORTANT] -==== -The `features.operators.openshift.io` infrastructure feature annotations deprecate the `operators.openshift.io/infrastructure-features` annotations used in earlier versions of {product-title}. See "Deprecated infrastructure feature annotations" for more information. -==== - -.Infrastructure features annotations -[cols="4a,5a,3a,options="header"] -|=== -|Annotation |Description |Valid values^[1]^ - -|`features.operators.openshift.io/disconnected` -|Specify whether an Operator supports being mirrored into disconnected catalogs, including all dependencies, and does not require internet access. The Operator leverages the `spec.relatedImages` CSV field to refer to any related image by its digest. -|`"true"` or `"false"` - -|`features.operators.openshift.io/fips-compliant` -|Specify whether an Operator accepts the FIPS-140 configuration of the underlying platform and works on nodes that are booted into FIPS mode. In this mode, the Operator and any workloads it manages (operands) are solely calling the {op-system-base-full} cryptographic library submitted for FIPS-140 validation. -|`"true"` or `"false"` - -|`features.operators.openshift.io/proxy-aware` -|Specify whether an Operator supports running on a cluster behind a proxy by accepting the standard `HTTP_PROXY` and `HTTPS_PROXY` proxy environment variables. If applicable, the Operator passes this information to the workload it manages (operands). -|`"true"` or `"false"` - -|`features.operators.openshift.io/tls-profiles` -|Specify whether an Operator implements well-known tunables to modify the TLS cipher suite used by the Operator and, if applicable, any of the workloads it manages (operands). -|`"true"` or `"false"` - -|`features.operators.openshift.io/token-auth-aws` -|Specify whether an Operator supports configuration for tokenized authentication with AWS APIs via AWS Secure Token Service (STS) by using the Cloud Credential Operator (CCO). -|`"true"` or `"false"` - -|`features.operators.openshift.io/token-auth-azure` -|Specify whether an Operator supports configuration for tokenized authentication with Azure APIs via Azure Managed Identity by using the Cloud Credential Operator (CCO). -|`"true"` or `"false"` - -|`features.operators.openshift.io/token-auth-gcp` -|Specify whether an Operator supports configuration for tokenized authentication with Google Cloud APIs via GCP Workload Identity Foundation (WIF) by using the Cloud Credential Operator (CCO). -|`"true"` or `"false"` - -|`features.operators.openshift.io/cnf` -|Specify whether an Operator provides a Cloud-Native Network Function (CNF) Kubernetes plugin. -|`"true"` or `"false"` - -|`features.operators.openshift.io/cni` -|Specify whether an Operator provides a Container Network Interface (CNI) Kubernetes plugin. -|`"true"` or `"false"` - -|`features.operators.openshift.io/csi` -|Specify whether an Operator provides a Container Storage Interface (CSI) Kubernetes plugin. -|`"true"` or `"false"` - -|=== -[.small] --- -1. Valid values are shown intentionally with double quotes, because Kubernetes annotations must be strings. --- - -.Example CSV with infrastructure feature annotations -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: ClusterServiceVersion -metadata: - annotations: - features.operators.openshift.io/disconnected: "true" - features.operators.openshift.io/fips-compliant: "false" - features.operators.openshift.io/proxy-aware: "false" - features.operators.openshift.io/tls-profiles: "false" - features.operators.openshift.io/token-auth-aws: "false" - features.operators.openshift.io/token-auth-azure: "false" - features.operators.openshift.io/token-auth-gcp: "false" ----- diff --git a/modules/osdk-csv-annotations-other.adoc b/modules/osdk-csv-annotations-other.adoc deleted file mode 100644 index a2ed78b7cc..0000000000 --- a/modules/osdk-csv-annotations-other.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-generating-csvs.adoc - -[id="osdk-csv-annotations-other_{context}"] -= Other optional annotations - -The following Operator annotations are optional. - -.Other optional annotations -[cols="5a,5a",options="header"] -|=== -|Annotation |Description - -|`alm-examples` -|Provide custom resource definition (CRD) templates with a minimum set of configuration. Compatible UIs pre-fill this template for users to further customize. - -|`operatorframework.io/initialization-resource` -|Specify a single required custom resource by adding `operatorframework.io/initialization-resource` annotation to the cluster service version (CSV) during Operator installation. The user is then prompted to create the custom resource through a template provided in the CSV. Must include a template that contains a complete YAML definition. - -|`operatorframework.io/suggested-namespace` -|Set a suggested namespace where the Operator should be deployed. - -|`operatorframework.io/suggested-namespace-template` -|Set a manifest for a `Namespace` object with the default node selector for the namespace specified. - -|`operators.openshift.io/valid-subscription` -|Free-form array for listing any specific subscriptions that are required to use the Operator. For example, `'["3Scale Commercial License", "Red Hat Managed Integration"]'`. - -|`operators.operatorframework.io/internal-objects` -|Hides CRDs in the UI that are not meant for user manipulation. - -|=== - -.Example CSV with an {product-title} license requirement -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: ClusterServiceVersion -metadata: - annotations: - operators.openshift.io/valid-subscription: '["OpenShift Container Platform"]' ----- - -.Example CSV with a 3scale license requirement -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: ClusterServiceVersion -metadata: - annotations: - operators.openshift.io/valid-subscription: '["3Scale Commercial License", "Red Hat Managed Integration"]' ----- \ No newline at end of file diff --git a/modules/osdk-csv-bundle-files.adoc b/modules/osdk-csv-bundle-files.adoc deleted file mode 100644 index b13790dc3e..0000000000 --- a/modules/osdk-csv-bundle-files.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-generating-csvs.adoc - -[id="osdk-csv-bundle-files_{context}"] -= Generated files and resources - -The `make bundle` command creates the following files and directories in your Operator project: - -* A bundle manifests directory named `bundle/manifests` that contains a `ClusterServiceVersion` (CSV) object -* A bundle metadata directory named `bundle/metadata` -* All custom resource definitions (CRDs) in a `config/crd` directory -* A Dockerfile `bundle.Dockerfile` - -The following resources are typically included in a CSV: - -Role:: Defines Operator permissions within a namespace. -ClusterRole:: Defines cluster-wide Operator permissions. -Deployment:: Defines how an Operand of an Operator is run in pods. -CustomResourceDefinition (CRD):: Defines custom resources that your Operator reconciles. -Custom resource examples:: Examples of resources adhering to the spec of a particular CRD. diff --git a/modules/osdk-csv-composition-configuration.adoc b/modules/osdk-csv-composition-configuration.adoc deleted file mode 100644 index 8d8cf66ed2..0000000000 --- a/modules/osdk-csv-composition-configuration.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-generating-csvs.adoc - -[id="osdk-configuring-csv-composition_{context}"] -= CSV composition configuration - -Operator authors can configure CSV composition by populating several fields in the `deploy/olm-catalog/csv-config.yaml` file: - -[cols="2a,8a",options="header"] -|=== -|Field |Description - -|`operator-path` (string) -|The Operator resource manifest file path. Default: `deploy/operator.yaml`. - -|`crd-cr-path-list` (string(, string)*) -|A list of CRD and CR manifest file paths. Default: `[deploy/crds/*_{crd,cr}.yaml]`. - -|`rbac-path-list` (string(, string)*) -|A list of RBAC role manifest file paths. Default: `[deploy/role.yaml]`. -|=== diff --git a/modules/osdk-csv-manual-annotations.adoc b/modules/osdk-csv-manual-annotations.adoc deleted file mode 100644 index 632b138170..0000000000 --- a/modules/osdk-csv-manual-annotations.adoc +++ /dev/null @@ -1,8 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-generating-csvs.adoc - -[id="osdk-csv-manual-annotations_{context}"] -= Operator metadata annotations - -Operator developers can set certain annotations in the metadata of a cluster service version (CSV) to enable features or highlight capabilities in user interfaces (UIs), such as OperatorHub or the link:https://catalog.redhat.com/software/search?deployed_as=Operator[Red Hat Ecosystem Catalog]. Operator metadata annotations are manually defined by setting the `metadata.annotations` field in the CSV YAML file. \ No newline at end of file diff --git a/modules/osdk-csv-ver.adoc b/modules/osdk-csv-ver.adoc deleted file mode 100644 index 1d297583e3..0000000000 --- a/modules/osdk-csv-ver.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-generating-csvs.adoc - -[id="osdk-csv-ver_{context}"] -= Version management - -The `--version` flag for the `generate bundle` subcommand supplies a semantic version for your bundle when creating one for the first time and when upgrading an existing one. - -By setting the `VERSION` variable in your `Makefile`, the `--version` flag is automatically invoked using that value when the `generate bundle` subcommand is run by the `make bundle` command. The CSV version is the same as the Operator version, and a new CSV is generated when upgrading Operator versions. diff --git a/modules/osdk-deploy-olm.adoc b/modules/osdk-deploy-olm.adoc deleted file mode 100644 index 9fb528129f..0000000000 --- a/modules/osdk-deploy-olm.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/golang/osdk-golang-tutorial.adoc -// * operators/operator_sdk/ansible/osdk-ansible-tutorial.adoc -// * operators/operator_sdk/helm/osdk-helm-tutorial.adoc -// * operators/operator_sdk/osdk-working-bundle-images.adoc - -ifeval::["{context}" == "osdk-golang-tutorial"] -:golang: -endif::[] -ifeval::["{context}" == "osdk-working-bundle-images"] -:golang: -endif::[] - -:_mod-docs-content-type: PROCEDURE -[id="osdk-deploy-olm_{context}"] -= Deploying an Operator with Operator Lifecycle Manager - -Operator Lifecycle Manager (OLM) helps you to install, update, and manage the lifecycle of Operators and their associated services on a Kubernetes cluster. OLM is installed by default on {product-title} and runs as a Kubernetes extension so that you can use the web console and the OpenShift CLI (`oc`) for all Operator lifecycle management functions without any additional tools. - -The Operator bundle format is the default packaging method for Operator SDK and OLM. You can use the Operator SDK to quickly run a bundle image on OLM to ensure that it runs properly. - -.Prerequisites - -- Operator SDK CLI installed on a development workstation -- Operator bundle image built and pushed to a registry -- OLM installed on a Kubernetes-based cluster (v1.16.0 or later if you use `apiextensions.k8s.io/v1` CRDs, for example {product-title} {product-version}) -ifndef::openshift-dedicated,openshift-rosa[] -- Logged in to the cluster with `oc` using an account with `cluster-admin` permissions -endif::openshift-dedicated,openshift-rosa[] -ifdef::openshift-dedicated,openshift-rosa[] -- Logged in to the cluster with `oc` using an account with `dedicated-admin` permissions -endif::openshift-dedicated,openshift-rosa[] -ifdef::golang[] -- If your Operator is Go-based, your project must be updated to use supported images for running on {product-title} -endif::[] - -.Procedure - -* Enter the following command to run the Operator on the cluster: -+ -[source,terminal] ----- -$ operator-sdk run bundle \//<1> - -n \//<2> - //: <3> ----- -<1> The `run bundle` command creates a valid file-based catalog and installs the Operator bundle on your cluster using OLM. -<2> Optional: By default, the command installs the Operator in the currently active project in your `~/.kube/config` file. You can add the `-n` flag to set a different namespace scope for the installation. -<3> If you do not specify an image, the command uses `quay.io/operator-framework/opm:latest` as the default index image. If you specify an image, the command uses the bundle image itself as the index image. -+ -[IMPORTANT] -==== -As of {product-title} 4.11, the `run bundle` command supports the file-based catalog format for Operator catalogs by default. The deprecated SQLite database format for Operator catalogs continues to be supported; however, it will be removed in a future release. It is recommended that Operator authors migrate their workflows to the file-based catalog format. -==== -+ -This command performs the following actions: -+ --- -* Create an index image referencing your bundle image. The index image is opaque and ephemeral, but accurately reflects how a bundle would be added to a catalog in production. -* Create a catalog source that points to your new index image, which enables OperatorHub to discover your Operator. -* Deploy your Operator to your cluster by creating an `OperatorGroup`, `Subscription`, `InstallPlan`, and all other required resources, including RBAC. --- - -ifeval::["{context}" == "osdk-golang-tutorial"] -:!golang: -endif::[] -ifeval::["{context}" == "osdk-working-bundle-images"] -:!golang: -endif::[] \ No newline at end of file diff --git a/modules/osdk-ensuring-operator-workloads-run-restricted-psa.adoc b/modules/osdk-ensuring-operator-workloads-run-restricted-psa.adoc deleted file mode 100644 index 48c02e9b68..0000000000 --- a/modules/osdk-ensuring-operator-workloads-run-restricted-psa.adoc +++ /dev/null @@ -1,65 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-complying-with-psa.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-ensuring-operator-workloads-run-restricted-psa_{context}"] -= Ensuring Operator workloads run in namespaces set to the restricted pod security level - -To ensure your Operator project can run on a wide variety of deployments and environments, configure the Operator's workloads to run in namespaces set to the `restricted` pod security level. - -[WARNING] -==== -You must leave the `runAsUser` field empty. If your image requires a specific user, it cannot be run under restricted security context constraints (SCC) and restricted pod security enforcement. -==== - -.Procedure - -* To configure Operator workloads to run in namespaces set to the `restricted` pod security level, edit your Operator's namespace definition similar to the following examples: -+ -[IMPORTANT] -==== -It is recommended that you set the seccomp profile in your Operator's namespace definition. However, setting the seccomp profile is not supported in {product-title} 4.10. -==== - -** For Operator projects that must run in only {product-title} 4.11 and later, edit your Operator's namespace definition similar to the following example: -+ -.Example `config/manager/manager.yaml` file -[source,yaml] ----- -... -spec: - securityContext: - seccompProfile: - type: RuntimeDefault <1> - runAsNonRoot: true - containers: - - name: - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL -... ----- -<1> By setting the seccomp profile type to `RuntimeDefault`, the SCC defaults to the pod security profile of the namespace. - -** For Operator projects that must also run in {product-title} 4.10, edit your Operator's namespace definition similar to the following example: -+ -.Example `config/manager/manager.yaml` file -[source,yaml] ----- -... -spec: - securityContext: <1> - runAsNonRoot: true - containers: - - name: - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL -... ----- -<1> Leaving the seccomp profile type unset ensures your Operator project can run in {product-title} 4.10. diff --git a/modules/osdk-generating-a-csv.adoc b/modules/osdk-generating-a-csv.adoc deleted file mode 100644 index 46c768618f..0000000000 --- a/modules/osdk-generating-a-csv.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-generating-csvs.adoc - -[id="osdk-generating-a-csv_{context}"] -= Generating a CSV - -.Prerequisites - -- An Operator project generated using the Operator SDK - -.Procedure - -. In your Operator project, configure your CSV composition by modifying the `deploy/olm-catalog/csv-config.yaml` file, if desired. - -. Generate the CSV: -+ -[source,terminal] ----- -$ operator-sdk generate csv --csv-version ----- - -. In the new CSV generated in the `deploy/olm-catalog/` directory, ensure all required, manually-defined fields are set appropriately. diff --git a/modules/osdk-golang-controller-configs.adoc b/modules/osdk-golang-controller-configs.adoc deleted file mode 100644 index 5e05ae3f43..0000000000 --- a/modules/osdk-golang-controller-configs.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/golang/osdk-golang-tutorial.adoc - -[id="osdk-golang-controller-configs_{context}"] -= Controller configurations - -You can initialize a controller by using many other useful configurations. For example: - -* Set the maximum number of concurrent reconciles for the controller by using the `MaxConcurrentReconciles` option, which defaults to `1`: -+ -[source,go] ----- -func (r *MemcachedReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&cachev1.Memcached{}). - Owns(&appsv1.Deployment{}). - WithOptions(controller.Options{ - MaxConcurrentReconciles: 2, - }). - Complete(r) -} ----- - -* Filter watch events using predicates. - -* Choose the type of link:https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/handler#EventHandler[EventHandler] to change how a watch event translates to reconcile requests for the reconcile loop. For Operator relationships that are more complex than primary and secondary resources, you can use the `EnqueueRequestsFromMapFunc` handler to transform a watch event into an arbitrary set of reconcile requests. - -For more details on these and other configurations, see the upstream link:https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/builder#example-Builder[Builder] and link:https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/controller[Controller] GoDocs. diff --git a/modules/osdk-golang-controller-rbac-markers.adoc b/modules/osdk-golang-controller-rbac-markers.adoc deleted file mode 100644 index 5288edd8af..0000000000 --- a/modules/osdk-golang-controller-rbac-markers.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/golang/osdk-golang-tutorial.adoc - -[id="osdk-golang-controller-rbac-markers_{context}"] -= Permissions and RBAC manifests - -The controller requires certain RBAC permissions to interact with the resources it manages. These are specified using RBAC markers, such as the following: - -[source,go] ----- -// +kubebuilder:rbac:groups=cache.example.com,resources=memcacheds,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=cache.example.com,resources=memcacheds/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=cache.example.com,resources=memcacheds/finalizers,verbs=update -// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list; - -func (r *MemcachedReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - ... -} ----- - -The `ClusterRole` object manifest at `config/rbac/role.yaml` is generated from the previous markers by using the `controller-gen` utility whenever the `make manifests` command is run. diff --git a/modules/osdk-golang-controller-reconcile-loop.adoc b/modules/osdk-golang-controller-reconcile-loop.adoc deleted file mode 100644 index 0dfed92cec..0000000000 --- a/modules/osdk-golang-controller-reconcile-loop.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/golang/osdk-golang-tutorial.adoc - -[id="osdk-golang-controller-reconcile-loop_{context}"] -= Reconcile loop - -Every controller has a reconciler object with a `Reconcile()` method that implements the reconcile loop. The reconcile loop is passed the `Request` argument, which is a namespace and name key used to find the primary resource object, `Memcached`, from the cache: - -[source,go] ----- -import ( - ctrl "sigs.k8s.io/controller-runtime" - - cachev1 "github.com/example-inc/memcached-operator/api/v1" - ... -) - -func (r *MemcachedReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - // Lookup the Memcached instance for this reconcile request - memcached := &cachev1.Memcached{} - err := r.Get(ctx, req.NamespacedName, memcached) - ... -} ----- - -Based on the return values, result, and error, the request might be requeued and the reconcile loop might be triggered again: - -[source,go] ----- -// Reconcile successful - don't requeue -return ctrl.Result{}, nil -// Reconcile failed due to error - requeue -return ctrl.Result{}, err -// Requeue for any reason other than an error -return ctrl.Result{Requeue: true}, nil ----- - -You can set the `Result.RequeueAfter` to requeue the request after a grace period as well: - -[source,go] ----- -import "time" - -// Reconcile for any reason other than an error after 5 seconds -return ctrl.Result{RequeueAfter: time.Second*5}, nil ----- - -[NOTE] -==== -You can return `Result` with `RequeueAfter` set to periodically reconcile a CR. -==== - -For more on reconcilers, clients, and interacting with resource events, see the link:https://sdk.operatorframework.io/docs/building-operators/golang/references/client/[Controller Runtime Client API] documentation. diff --git a/modules/osdk-golang-controller-resources.adoc b/modules/osdk-golang-controller-resources.adoc deleted file mode 100644 index 8843455cd9..0000000000 --- a/modules/osdk-golang-controller-resources.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/golang/osdk-golang-tutorial.adoc - -[id="osdk-golang-controller-resources_{context}"] -= Resources watched by the controller - -The `SetupWithManager()` function in `controllers/memcached_controller.go` specifies how the controller is built to watch a CR and other resources that are owned and managed by that controller. - -[source,go] ----- -import ( - ... - appsv1 "k8s.io/api/apps/v1" - ... -) - -func (r *MemcachedReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&cachev1.Memcached{}). - Owns(&appsv1.Deployment{}). - Complete(r) -} ----- - -`NewControllerManagedBy()` provides a controller builder that allows various controller configurations. - -`For(&cachev1.Memcached{})` specifies the `Memcached` type as the primary resource to watch. For each Add, Update, or Delete event for a `Memcached` type, the reconcile loop is sent a reconcile `Request` argument, which consists of a namespace and name key, for that `Memcached` object. - -`Owns(&appsv1.Deployment{})` specifies the `Deployment` type as the secondary resource to watch. For each `Deployment` type Add, Update, or Delete event, the event handler maps each event to a reconcile request for the owner of the deployment. In this case, the owner is the `Memcached` object for which the deployment was created. diff --git a/modules/osdk-golang-create-api-controller.adoc b/modules/osdk-golang-create-api-controller.adoc deleted file mode 100644 index 9bc7dd5c86..0000000000 --- a/modules/osdk-golang-create-api-controller.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/golang/osdk-golang-tutorial.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-golang-create-api-controller_{context}"] -= Creating an API and controller - -Use the Operator SDK CLI to create a custom resource definition (CRD) API and controller. - -.Procedure - -. Run the following command to create an API with group `cache`, version, `v1`, and kind `Memcached`: -+ -[source,terminal] ----- -$ operator-sdk create api \ - --group=cache \ - --version=v1 \ - --kind=Memcached ----- - -. When prompted, enter `y` for creating both the resource and controller: -+ -[source,terminal] ----- -Create Resource [y/n] -y -Create Controller [y/n] -y ----- -+ -.Example output -[source,terminal] ----- -Writing scaffold for you to edit... -api/v1/memcached_types.go -controllers/memcached_controller.go -... ----- - -This process generates the `Memcached` resource API at `api/v1/memcached_types.go` and the controller at `controllers/memcached_controller.go`. diff --git a/modules/osdk-golang-define-api.adoc b/modules/osdk-golang-define-api.adoc deleted file mode 100644 index 0d19958dea..0000000000 --- a/modules/osdk-golang-define-api.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/golang/osdk-golang-tutorial.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-golang-define-api_{context}"] -= Defining the API - -Define the API for the `Memcached` custom resource (CR). - -.Procedure - -. Modify the Go type definitions at `api/v1/memcached_types.go` to have the following `spec` and `status`: -+ -[source,go] ----- -// MemcachedSpec defines the desired state of Memcached -type MemcachedSpec struct { - // +kubebuilder:validation:Minimum=0 - // Size is the size of the memcached deployment - Size int32 `json:"size"` -} - -// MemcachedStatus defines the observed state of Memcached -type MemcachedStatus struct { - // Nodes are the names of the memcached pods - Nodes []string `json:"nodes"` -} ----- - -. Update the generated code for the resource type: -+ -[source,terminal] ----- -$ make generate ----- -+ -[TIP] -==== -After you modify a `*_types.go` file, you must run the `make generate` command to update the generated code for that resource type. -==== -+ -The above Makefile target invokes the `controller-gen` utility to update the `api/v1/zz_generated.deepcopy.go` file. This ensures your API Go type definitions implement the `runtime.Object` interface that all Kind types must implement. diff --git a/modules/osdk-golang-generate-crd.adoc b/modules/osdk-golang-generate-crd.adoc deleted file mode 100644 index 81a1cebdba..0000000000 --- a/modules/osdk-golang-generate-crd.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/golang/osdk-golang-tutorial.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-golang-generate-crd_{context}"] -= Generating CRD manifests - -After the API is defined with `spec` and `status` fields and custom resource definition (CRD) validation markers, you can generate CRD manifests. - -.Procedure - -* Run the following command to generate and update CRD manifests: -+ -[source,terminal] ----- -$ make manifests ----- -+ -This Makefile target invokes the `controller-gen` utility to generate the CRD manifests in the `config/crd/bases/cache.example.com_memcacheds.yaml` file. diff --git a/modules/osdk-golang-implement-controller.adoc b/modules/osdk-golang-implement-controller.adoc deleted file mode 100644 index 8a21c9f027..0000000000 --- a/modules/osdk-golang-implement-controller.adoc +++ /dev/null @@ -1,227 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/golang/osdk-golang-tutorial.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-golang-implement-controller_{context}"] -= Implementing the controller - -After creating a new API and controller, you can implement the controller logic. - -.Procedure - -* For this example, replace the generated controller file `controllers/memcached_controller.go` with following example implementation: -+ -.Example `memcached_controller.go` -[%collapsible] -==== -[source,golang] ----- -/* -Copyright 2020. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "reflect" - - "context" - - "github.com/go-logr/logr" - "k8s.io/apimachinery/pkg/runtime" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - ctrllog "sigs.k8s.io/controller-runtime/pkg/log" - - cachev1 "github.com/example-inc/memcached-operator/api/v1" -) - -// MemcachedReconciler reconciles a Memcached object -type MemcachedReconciler struct { - client.Client - Log logr.Logger - Scheme *runtime.Scheme -} - -// +kubebuilder:rbac:groups=cache.example.com,resources=memcacheds,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=cache.example.com,resources=memcacheds/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=cache.example.com,resources=memcacheds/finalizers,verbs=update -// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list; - -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the Memcached object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.7.0/pkg/reconcile -func (r *MemcachedReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - //log := r.Log.WithValues("memcached", req.NamespacedName) - log := ctrllog.FromContext(ctx) - // Fetch the Memcached instance - memcached := &cachev1.Memcached{} - err := r.Get(ctx, req.NamespacedName, memcached) - if err != nil { - if errors.IsNotFound(err) { - // Request object not found, could have been deleted after reconcile request. - // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. - // Return and don't requeue - log.Info("Memcached resource not found. Ignoring since object must be deleted") - return ctrl.Result{}, nil - } - // Error reading the object - requeue the request. - log.Error(err, "Failed to get Memcached") - return ctrl.Result{}, err - } - - // Check if the deployment already exists, if not create a new one - found := &appsv1.Deployment{} - err = r.Get(ctx, types.NamespacedName{Name: memcached.Name, Namespace: memcached.Namespace}, found) - if err != nil && errors.IsNotFound(err) { - // Define a new deployment - dep := r.deploymentForMemcached(memcached) - log.Info("Creating a new Deployment", "Deployment.Namespace", dep.Namespace, "Deployment.Name", dep.Name) - err = r.Create(ctx, dep) - if err != nil { - log.Error(err, "Failed to create new Deployment", "Deployment.Namespace", dep.Namespace, "Deployment.Name", dep.Name) - return ctrl.Result{}, err - } - // Deployment created successfully - return and requeue - return ctrl.Result{Requeue: true}, nil - } else if err != nil { - log.Error(err, "Failed to get Deployment") - return ctrl.Result{}, err - } - - // Ensure the deployment size is the same as the spec - size := memcached.Spec.Size - if *found.Spec.Replicas != size { - found.Spec.Replicas = &size - err = r.Update(ctx, found) - if err != nil { - log.Error(err, "Failed to update Deployment", "Deployment.Namespace", found.Namespace, "Deployment.Name", found.Name) - return ctrl.Result{}, err - } - // Spec updated - return and requeue - return ctrl.Result{Requeue: true}, nil - } - - // Update the Memcached status with the pod names - // List the pods for this memcached's deployment - podList := &corev1.PodList{} - listOpts := []client.ListOption{ - client.InNamespace(memcached.Namespace), - client.MatchingLabels(labelsForMemcached(memcached.Name)), - } - if err = r.List(ctx, podList, listOpts...); err != nil { - log.Error(err, "Failed to list pods", "Memcached.Namespace", memcached.Namespace, "Memcached.Name", memcached.Name) - return ctrl.Result{}, err - } - podNames := getPodNames(podList.Items) - - // Update status.Nodes if needed - if !reflect.DeepEqual(podNames, memcached.Status.Nodes) { - memcached.Status.Nodes = podNames - err := r.Status().Update(ctx, memcached) - if err != nil { - log.Error(err, "Failed to update Memcached status") - return ctrl.Result{}, err - } - } - - return ctrl.Result{}, nil -} - -// deploymentForMemcached returns a memcached Deployment object -func (r *MemcachedReconciler) deploymentForMemcached(m *cachev1.Memcached) *appsv1.Deployment { - ls := labelsForMemcached(m.Name) - replicas := m.Spec.Size - - dep := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: m.Name, - Namespace: m.Namespace, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: &replicas, - Selector: &metav1.LabelSelector{ - MatchLabels: ls, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: ls, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Image: "memcached:1.4.36-alpine", - Name: "memcached", - Command: []string{"memcached", "-m=64", "-o", "modern", "-v"}, - Ports: []corev1.ContainerPort{{ - ContainerPort: 11211, - Name: "memcached", - }}, - }}, - }, - }, - }, - } - // Set Memcached instance as the owner and controller - ctrl.SetControllerReference(m, dep, r.Scheme) - return dep -} - -// labelsForMemcached returns the labels for selecting the resources -// belonging to the given memcached CR name. -func labelsForMemcached(name string) map[string]string { - return map[string]string{"app": "memcached", "memcached_cr": name} -} - -// getPodNames returns the pod names of the array of pods passed in -func getPodNames(pods []corev1.Pod) []string { - var podNames []string - for _, pod := range pods { - podNames = append(podNames, pod.Name) - } - return podNames -} - -// SetupWithManager sets up the controller with the Manager. -func (r *MemcachedReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&cachev1.Memcached{}). - Owns(&appsv1.Deployment{}). - Complete(r) -} - - ----- -==== -+ -The example controller runs the following reconciliation logic for each `Memcached` custom resource (CR): -+ --- -* Create a Memcached deployment if it does not exist. -* Ensure that the deployment size is the same as specified by the `Memcached` CR spec. -* Update the `Memcached` CR status with the names of the `memcached` pods. --- diff --git a/modules/osdk-golang-manager.adoc b/modules/osdk-golang-manager.adoc deleted file mode 100644 index ab3290be17..0000000000 --- a/modules/osdk-golang-manager.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/golang/osdk-golang-tutorial.adoc - -:_mod-docs-content-type: CONCEPT -[id="osdk-golang-manager_{context}"] -= About the Manager - -The main program for the Operator is the `main.go` file, which initializes and runs the link:https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/manager#Manager[Manager]. The Manager automatically registers the Scheme for all custom resource (CR) API definitions and sets up and runs controllers and webhooks. - -The Manager can restrict the namespace that all controllers watch for resources: - -[source,go] ----- -mgr, err := ctrl.NewManager(cfg, manager.Options{Namespace: namespace}) ----- - -By default, the Manager watches the namespace where the Operator runs. To watch all namespaces, you can leave the `namespace` option empty: - -[source,go] ----- -mgr, err := ctrl.NewManager(cfg, manager.Options{Namespace: ""}) ----- - -You can also use the link:https://pkg.go.dev/github.com/kubernetes-sigs/controller-runtime@v0.2.0-alpha.0/pkg/cache#MultiNamespacedCacheBuilder[`MultiNamespacedCacheBuilder`] function to watch a specific set of namespaces: - -[source,go] ----- -var namespaces []string <1> -mgr, err := ctrl.NewManager(cfg, manager.Options{ <2> - NewCache: cache.MultiNamespacedCacheBuilder(namespaces), -}) ----- -<1> List of namespaces. -<2> Creates a `Cmd` struct to provide shared dependencies and start components. diff --git a/modules/osdk-golang-multi-group-apis.adoc b/modules/osdk-golang-multi-group-apis.adoc deleted file mode 100644 index df4a8fb027..0000000000 --- a/modules/osdk-golang-multi-group-apis.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/golang/osdk-golang-tutorial.adoc - -:_mod-docs-content-type: CONCEPT -[id="osdk-golang-multi-group-apis_{context}"] -= About multi-group APIs - -Before you create an API and controller, consider whether your Operator requires multiple API groups. This tutorial covers the default case of a single group API, but to change the layout of your project to support multi-group APIs, you can run the following command: - -[source,terminal] ----- -$ operator-sdk edit --multigroup=true ----- - -This command updates the `PROJECT` file, which should look like the following example: - -[source,yaml] ----- -domain: example.com -layout: go.kubebuilder.io/v3 -multigroup: true -... ----- - -For multi-group projects, the API Go type files are created in the `apis///` directory, and the controllers are created in the `controllers//` directory. The Dockerfile is then updated accordingly. - -.Additional resource - -* For more details on migrating to a multi-group project, see the link:https://book.kubebuilder.io/migration/multi-group.html[Kubebuilder documentation]. diff --git a/modules/osdk-golang-project-layout.adoc b/modules/osdk-golang-project-layout.adoc deleted file mode 100644 index 6f3e25dbcd..0000000000 --- a/modules/osdk-golang-project-layout.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/golang/osdk-golang-project-layout.adoc - -[id="osdk-golang-project-layout_{context}"] -= Go-based project layout - -Go-based Operator projects, the default type, generated using the `operator-sdk init` command contain the following files and directories: - -[options="header",cols="1,4"] -|=== - -|File or directory |Purpose - -|`main.go` -|Main program of the Operator. This instantiates a new manager that registers all custom resource definitions (CRDs) in the `apis/` directory and starts all controllers in the `controllers/` directory. - -|`apis/` -|Directory tree that defines the APIs of the CRDs. You must edit the `apis//_types.go` files to define the API for each resource type and import these packages in your controllers to watch for these resource types. - -|`controllers/` -|Controller implementations. Edit the `controller/_controller.go` files to define the reconcile logic of the controller for handling a resource type of the specified kind. - -|`config/` -|Kubernetes manifests used to deploy your controller on a cluster, including CRDs, RBAC, and certificates. - -|`Makefile` -|Targets used to build and deploy your controller. - -|`Dockerfile` -|Instructions used by a container engine to build your Operator. - -|`manifests/` -|Kubernetes manifests for registering CRDs, setting up RBAC, and deploying the Operator as a deployment. - -|=== diff --git a/modules/osdk-ha-sno-api-examples.adoc b/modules/osdk-ha-sno-api-examples.adoc deleted file mode 100644 index b39c06807c..0000000000 --- a/modules/osdk-ha-sno-api-examples.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-ha-sno.adoc - -[id="osdk-ha-sno-api-examples_{context}"] -= Example API usage in Operator projects - -As an Operator author, you can update your Operator project to access the Infrastructure API by using normal Kubernetes constructs and the `controller-runtime` library, as shown in the following examples: - -.`controller-runtime` library example -[source,go] ----- -// Simple query - nn := types.NamespacedName{ - Name: "cluster", - } - infraConfig := &configv1.Infrastructure{} - err = crClient.Get(context.Background(), nn, infraConfig) - if err != nil { - return err - } - fmt.Printf("using crclient: %v\n", infraConfig.Status.ControlPlaneTopology) - fmt.Printf("using crclient: %v\n", infraConfig.Status.InfrastructureTopology) ----- - -.Kubernetes constructs example -[source,go] ----- -operatorConfigInformer := configinformer.NewSharedInformerFactoryWithOptions(configClient, 2*time.Second) - infrastructureLister = operatorConfigInformer.Config().V1().Infrastructures().Lister() - infraConfig, err := configClient.ConfigV1().Infrastructures().Get(context.Background(), "cluster", metav1.GetOptions{}) - if err != nil { - return err - } -// fmt.Printf("%v\n", infraConfig) - fmt.Printf("%v\n", infraConfig.Status.ControlPlaneTopology) - fmt.Printf("%v\n", infraConfig.Status.InfrastructureTopology) ----- diff --git a/modules/osdk-ha-sno-api.adoc b/modules/osdk-ha-sno-api.adoc deleted file mode 100644 index 5e03d3048d..0000000000 --- a/modules/osdk-ha-sno-api.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-ha-sno.adoc - -:_mod-docs-content-type: CONCEPT -[id="osdk-ha-sno-api_{context}"] -= About the cluster high-availability mode API - -{product-title} provides a cluster high-availability mode API that can be used by Operators to help detect infrastructure topology. The Infrastructure API holds cluster-wide information regarding infrastructure. Operators managed by Operator Lifecycle Manager (OLM) can use the Infrastructure API if they need to configure an Operand or managed workload differently based on the high-availability mode. - -In the Infrastructure API, the `infrastructureTopology` status expresses the expectations for infrastructure services that do not run on control plane nodes, usually indicated by a node selector for a `role` value other than `master`. The `controlPlaneTopology` status expresses the expectations for Operands that normally run on control plane nodes. - -The default setting for either status is `HighlyAvailable`, which represents the behavior Operators have in multiple node clusters. The `SingleReplica` setting is used in single-node clusters, also known as {sno}, and indicates that Operators should not configure their Operands for high-availability operation. - -The {product-title} installer sets the `controlPlaneTopology` and `infrastructureTopology` status fields based on the replica counts for the cluster when it is created, according to the following rules: - -* When the control plane replica count is less than 3, the `controlPlaneTopology` status is set to `SingleReplica`. Otherwise, it is set to `HighlyAvailable`. -* When the worker replica count is 0, the control plane nodes are also configured as workers. Therefore, the `infrastructureTopology` status will be the same as the `controlPlaneTopology` status. -* When the worker replica count is 1, the `infrastructureTopology` is set to `SingleReplica`. Otherwise, it is set to `HighlyAvailable`. diff --git a/modules/osdk-helm-charts.adoc b/modules/osdk-helm-charts.adoc deleted file mode 100644 index ae752a6d16..0000000000 --- a/modules/osdk-helm-charts.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/helm/osdk-helm-support.adoc - -[id="osdk-helm-charts_{context}"] -= Helm charts - -One of the Operator SDK options for generating an Operator project includes leveraging an existing Helm chart to deploy Kubernetes resources as a unified application, without having to write any Go code. Such Helm-based Operators are designed to excel at stateless applications that require very little logic when rolled out, because changes should be applied to the Kubernetes objects that are generated as part of the chart. This may sound limiting, but can be sufficient for a surprising amount of use-cases as shown by the proliferation of Helm charts built by the Kubernetes community. - -The main function of an Operator is to read from a custom object that represents your application instance and have its desired state match what is running. In the case of a Helm-based Operator, the `spec` field of the object is a list of configuration options that are typically described in the Helm `values.yaml` file. Instead of setting these values with flags using the Helm CLI (for example, `helm install -f values.yaml`), you can express them within a custom resource (CR), which, as a native Kubernetes object, enables the benefits of RBAC applied to it and an audit trail. - -For an example of a simple CR called `Tomcat`: - -[source,yaml] ----- -apiVersion: apache.org/v1alpha1 -kind: Tomcat -metadata: - name: example-app -spec: - replicaCount: 2 ----- - -The `replicaCount` value, `2` in this case, is propagated into the template of the chart where the following is used: - -[source,yaml] ----- -{{ .Values.replicaCount }} ----- - -After an Operator is built and deployed, you can deploy a new instance of an app by creating a new instance of a CR, or list the different instances running in all environments using the `oc` command: - -[source,terminal] ----- -$ oc get Tomcats --all-namespaces ----- - -There is no requirement use the Helm CLI or install Tiller; Helm-based Operators import code from the Helm project. All you have to do is have an instance of the Operator running and register the CR with a custom resource definition (CRD). Because it obeys RBAC, you can more easily prevent production changes. diff --git a/modules/osdk-helm-existing-chart.adoc b/modules/osdk-helm-existing-chart.adoc deleted file mode 100644 index 8e59d06a5f..0000000000 --- a/modules/osdk-helm-existing-chart.adoc +++ /dev/null @@ -1,65 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/helm/osdk-helm-tutorial.adoc - -[id="osdk-helm-existing-chart_{context}"] -= Existing Helm charts - -Instead of creating your project with a boilerplate Helm chart, you can alternatively use an existing chart, either from your local file system or a remote chart repository, by using the following flags: - -* `--helm-chart` -* `--helm-chart-repo` -* `--helm-chart-version` - -If the `--helm-chart` flag is specified, the `--group`, `--version`, and `--kind` flags become optional. If left unset, the following default values are used: - -[options="header"] -|=== -|Flag |Value - -|`--domain` -|`my.domain` - -|`--group` -|`charts` - -|`--version` -|`v1` - -|`--kind` -|Deduced from the specified chart -|=== - -If the `--helm-chart` flag specifies a local chart archive, for example `example-chart-1.2.0.tgz`, or directory, the chart is validated and unpacked or copied into the project. Otherwise, the Operator SDK attempts to fetch the chart from a remote repository. - -If a custom repository URL is not specified by the `--helm-chart-repo` flag, the following chart reference formats are supported: - -[cols="1,4",options="header"] -|=== -|Format |Description - -|`/` -|Fetch the Helm chart named `` from the helm chart repository named ``, as specified in the `$HELM_HOME/repositories/repositories.yaml` file. Use the `helm repo add` command to configure this file. - -|`` -|Fetch the Helm chart archive at the specified URL. -|=== - -If a custom repository URL is specified by `--helm-chart-repo`, the following chart reference format is supported: - -[cols="1,4",options="header"] -|=== -|Format |Description - -|`` -|Fetch the Helm chart named `` in the Helm chart repository specified by the `--helm-chart-repo` URL value. -|=== - -If the `--helm-chart-version` flag is unset, the Operator SDK fetches the latest available version of the Helm chart. Otherwise, it fetches the specified version. The optional `--helm-chart-version` flag is not used when the chart specified with the `--helm-chart` flag refers to a specific version, for example when it is a local path or a URL. - -For more details and examples, run: - -[source,terminal] ----- -$ operator-sdk init --plugins helm --help ----- diff --git a/modules/osdk-helm-logic.adoc b/modules/osdk-helm-logic.adoc deleted file mode 100644 index f0e00e41cc..0000000000 --- a/modules/osdk-helm-logic.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/helm/osdk-helm-tutorial.adoc - -:_mod-docs-content-type: CONCEPT -[id="osdk-helm-logic_{context}"] -= Understanding the Operator logic - -For this example, the `nginx-operator` project executes the following reconciliation logic for each `Nginx` custom resource (CR): - -* Create an Nginx deployment if it does not exist. -* Create an Nginx service if it does not exist. -* Create an Nginx ingress if it is enabled and does not exist. -* Ensure that the deployment, service, and optional ingress match the desired configuration as specified by the `Nginx` CR, for example the replica count, image, and service type. - -By default, the `nginx-operator` project watches `Nginx` resource events as shown in the `watches.yaml` file and executes Helm releases using the specified chart: - -[source,yaml] ----- -# Use the 'create api' subcommand to add watches to this file. -- group: demo - version: v1 - kind: Nginx - chart: helm-charts/nginx -# +kubebuilder:scaffold:watch ----- diff --git a/modules/osdk-helm-modify-cr.adoc b/modules/osdk-helm-modify-cr.adoc deleted file mode 100644 index 402cbe01f9..0000000000 --- a/modules/osdk-helm-modify-cr.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/helm/osdk-helm-tutorial.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-helm-modify-cr_{context}"] -= Modifying the custom resource spec - -Helm uses a concept called link:https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing[values] to provide customizations to the defaults of a Helm chart, which are defined in the `values.yaml` file. - -You can override these defaults by setting the desired values in the custom resource (CR) spec. You can use the number of replicas as an example. - -.Procedure - -. The `helm-charts/nginx/values.yaml` file has a value called `replicaCount` set to `1` by default. To have two Nginx instances in your deployment, your CR spec must contain `replicaCount: 2`. -+ -Edit the `config/samples/demo_v1_nginx.yaml` file to set `replicaCount: 2`: -+ -[source,yaml] ----- -apiVersion: demo.example.com/v1 -kind: Nginx -metadata: - name: nginx-sample -... -spec: -... - replicaCount: 2 ----- - -. Similarly, the default service port is set to `80`. To use `8080`, edit the `config/samples/demo_v1_nginx.yaml` file to set `spec.port: 8080`,which adds the service port override: -+ -[source,yaml] ----- -apiVersion: demo.example.com/v1 -kind: Nginx -metadata: - name: nginx-sample -spec: - replicaCount: 2 - service: - port: 8080 ----- - -The Helm Operator applies the entire spec as if it was the contents of a values file, just like the `helm install -f ./overrides.yaml` command. diff --git a/modules/osdk-helm-project-layout.adoc b/modules/osdk-helm-project-layout.adoc deleted file mode 100644 index 2dd5e79460..0000000000 --- a/modules/osdk-helm-project-layout.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/helm/osdk-helm-project-layout.adoc - -[id="osdk-helm-project-layout_{context}"] -= Helm-based project layout - -Helm-based Operator projects generated using the `operator-sdk init --plugins helm` command contain the following directories and files: - -[options="header",cols="1,4"] -|=== - -|File/folders |Purpose - -|`config/` -|link:https://kustomize.io/[Kustomize] manifests for deploying the Operator on a Kubernetes cluster. - -|`helm-charts/` -|Helm chart initialized with the `operator-sdk create api` command. - -|`Dockerfile` -|Used to build the Operator image with the `make docker-build` command. - -|`watches.yaml` -|Group/version/kind (GVK) and Helm chart location. - -|`Makefile` -|Targets used to manage the project. - -|`PROJECT` -|YAML file containing metadata information for the Operator. - -|=== diff --git a/modules/osdk-helm-sample-chart.adoc b/modules/osdk-helm-sample-chart.adoc deleted file mode 100644 index b8b576e065..0000000000 --- a/modules/osdk-helm-sample-chart.adoc +++ /dev/null @@ -1,12 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/helm/osdk-helm-tutorial.adoc - -[id="osdk-helm-sample-chart_{context}"] -= Sample Helm chart - -When a Helm Operator project is created, the Operator SDK creates a sample Helm chart that contains a set of templates for a simple Nginx release. - -For this example, templates are available for deployment, service, and ingress resources, along with a `NOTES.txt` template, which Helm chart developers use to convey helpful information about a release. - -If you are not already familiar with Helm charts, review the link:https://docs.helm.sh/developing_charts/[Helm developer documentation]. diff --git a/modules/osdk-hiding-internal-objects.adoc b/modules/osdk-hiding-internal-objects.adoc deleted file mode 100644 index dcab32709b..0000000000 --- a/modules/osdk-hiding-internal-objects.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-generating-csvs.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-hiding-internal-objects_{context}"] -= Hiding internal objects - -It is common practice for Operators to use custom resource definitions (CRDs) internally to accomplish a task. These objects are not meant for users to manipulate and can be confusing to users of the Operator. For example, a database Operator might have a `Replication` CRD that is created whenever a user creates a Database object with `replication: true`. - -As an Operator author, you can hide any CRDs in the user interface that are not meant for user manipulation by adding the `operators.operatorframework.io/internal-objects` annotation to the cluster service version (CSV) of your Operator. - -.Procedure - -. Before marking one of your CRDs as internal, ensure that any debugging information or configuration that might be required to manage the application is reflected on the status or `spec` block of your CR, if applicable to your Operator. - -. Add the `operators.operatorframework.io/internal-objects` annotation to the CSV of your Operator to specify any internal objects to hide in the user interface: -+ -.Internal object annotation -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: ClusterServiceVersion -metadata: - name: my-operator-v1.2.3 - annotations: - operators.operatorframework.io/internal-objects: '["my.internal.crd1.io","my.internal.crd2.io"]' <1> -... ----- -<1> Set any internal CRDs as an array of strings. diff --git a/modules/osdk-how-csv-gen-works.adoc b/modules/osdk-how-csv-gen-works.adoc deleted file mode 100644 index 5871c86d1e..0000000000 --- a/modules/osdk-how-csv-gen-works.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-generating-csvs.adoc - -[id="osdk-how-csv-gen-works_{context}"] -= How CSV generation works - -Operator bundle manifests, which include cluster service versions (CSVs), describe how to display, create, and manage an application with Operator Lifecycle Manager (OLM). The CSV generator in the Operator SDK, called by the `generate bundle` subcommand, is the first step towards publishing your Operator to a catalog and deploying it with OLM. The subcommand requires certain input manifests to construct a CSV manifest; all inputs are read when the command is invoked, along with a CSV base, to idempotently generate or regenerate a CSV. - -Typically, the `generate kustomize manifests` subcommand would be run first to generate the input link:https://kustomize.io/[Kustomize] bases that are consumed by the `generate bundle` subcommand. However, the Operator SDK provides the `make bundle` command, which automates several tasks, including running the following subcommands in order: - -. `generate kustomize manifests` -. `generate bundle` -. `bundle validate` diff --git a/modules/osdk-init-resource.adoc b/modules/osdk-init-resource.adoc deleted file mode 100644 index d3975b862b..0000000000 --- a/modules/osdk-init-resource.adoc +++ /dev/null @@ -1,76 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-generating-csvs.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-init-resource_{context}"] -= Initializing required custom resources - -An Operator might require the user to instantiate a custom resource before the Operator can be fully functional. However, it can be challenging for a user to determine what is required or how to define the resource. - -As an Operator developer, you can specify a single required custom resource by adding `operatorframework.io/initialization-resource` to the cluster service version (CSV) during Operator installation. You are then prompted to create the custom resource through a template that is provided in the CSV. -The annotation must include a template that contains a complete YAML definition that is required to initialize the resource during installation. - -If this annotation is defined, after installing the Operator from the {product-title} web console, the user is prompted to create the resource using the template provided in the CSV. - -.Procedure - -* Add the `operatorframework.io/initialization-resource` annotation to the CSV of your Operator to specify a required custom resource. For example, the following annotation requires the creation of a `StorageCluster` resource and provides a full YAML definition: -+ -.Initialization resource annotation -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: ClusterServiceVersion -metadata: - name: my-operator-v1.2.3 - annotations: - operatorframework.io/initialization-resource: |- - { - "apiVersion": "ocs.openshift.io/v1", - "kind": "StorageCluster", - "metadata": { - "name": "example-storagecluster" - }, - "spec": { - "manageNodes": false, - "monPVCTemplate": { - "spec": { - "accessModes": [ - "ReadWriteOnce" - ], - "resources": { - "requests": { - "storage": "10Gi" - } - }, - "storageClassName": "gp2" - } - }, - "storageDeviceSets": [ - { - "count": 3, - "dataPVCTemplate": { - "spec": { - "accessModes": [ - "ReadWriteOnce" - ], - "resources": { - "requests": { - "storage": "1Ti" - } - }, - "storageClassName": "gp2", - "volumeMode": "Block" - } - }, - "name": "example-deviceset", - "placement": {}, - "portable": true, - "resources": {} - } - ] - } - } -... ----- diff --git a/modules/osdk-installing-cli-linux-macos.adoc b/modules/osdk-installing-cli-linux-macos.adoc deleted file mode 100644 index 6ccaa26b52..0000000000 --- a/modules/osdk-installing-cli-linux-macos.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/osdk/cli-osdk-install.adoc -// * operators/operator_sdk/osdk-installing-cli.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-installing-cli-linux-macos_{context}"] -= Installing the Operator SDK CLI on Linux - -You can install the OpenShift SDK CLI tool on Linux. - -.Prerequisites - -* link:https://golang.org/dl/[Go] v1.19+ -ifdef::openshift-origin[] -* link:https://docs.docker.com/install/[`docker`] v17.03+, link:https://github.com/containers/libpod/blob/master/install.md[`podman`] v1.2.0+, or link:https://github.com/containers/buildah/blob/master/install.md[`buildah`] v1.7+ -endif::[] -ifndef::openshift-origin[] -* `docker` v17.03+, `podman` v1.9.3+, or `buildah` v1.7+ -endif::[] - -.Procedure - -. Navigate to the link:https://mirror.openshift.com/pub/openshift-v4/x86_64/clients/operator-sdk/[OpenShift mirror site]. - -. From the latest {product-version} directory, download the latest version of the tarball for Linux. - -. Unpack the archive: -+ -[source,terminal,subs="attributes+"] ----- -$ tar xvf operator-sdk-v{osdk_ver}-ocp-linux-x86_64.tar.gz ----- - -. Make the file executable: -+ -[source,terminal] ----- -$ chmod +x operator-sdk ----- - -. Move the extracted `operator-sdk` binary to a directory that is on your `PATH`. -+ -[TIP] -==== -To check your `PATH`: - -[source,terminal] ----- -$ echo $PATH ----- -==== -+ -[source,terminal] ----- -$ sudo mv ./operator-sdk /usr/local/bin/operator-sdk ----- - -.Verification - -* After you install the Operator SDK CLI, verify that it is available: -+ -[source,terminal] ----- -$ operator-sdk version ----- -+ -.Example output -[source,terminal,subs="attributes+"] ----- -operator-sdk version: "v{osdk_ver}-ocp", ... ----- diff --git a/modules/osdk-installing-cli-macos.adoc b/modules/osdk-installing-cli-macos.adoc deleted file mode 100644 index 4d718ebb76..0000000000 --- a/modules/osdk-installing-cli-macos.adoc +++ /dev/null @@ -1,85 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/osdk/cli-osdk-install.adoc -// * operators/operator_sdk/osdk-installing-cli.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-installing-cli-macos_{context}"] -= Installing the Operator SDK CLI on macOS - -You can install the OpenShift SDK CLI tool on macOS. - -.Prerequisites - -* link:https://golang.org/dl/[Go] v1.19+ -ifdef::openshift-origin[] -* link:https://docs.docker.com/install/[`docker`] v17.03+, link:https://github.com/containers/libpod/blob/master/install.md[`podman`] v1.2.0+, or link:https://github.com/containers/buildah/blob/master/install.md[`buildah`] v1.7+ -endif::[] -ifndef::openshift-origin[] -* `docker` v17.03+, `podman` v1.9.3+, or `buildah` v1.7+ -endif::[] - -.Procedure -ifndef::openshift-rosa,openshift-dedicated[] -. For the `amd64` and `arm64` architectures, navigate to the link:https://mirror.openshift.com/pub/openshift-v4/x86_64/clients/operator-sdk/[OpenShift mirror site for the `amd64` architecture] and link:https://mirror.openshift.com/pub/openshift-v4/arm64/clients/operator-sdk/[OpenShift mirror site for the `arm64` architecture] respectively. -endif::openshift-rosa,openshift-dedicated[] - -ifdef::openshift-rosa,openshift-dedicated[] -. For the `amd64` architecture, navigate to the link:https://mirror.openshift.com/pub/openshift-v4/x86_64/clients/operator-sdk/[OpenShift mirror site for the `amd64` architecture]. -endif::openshift-rosa,openshift-dedicated[] - - -. From the latest {product-version} directory, download the latest version of the tarball for macOS. - -. Unpack the Operator SDK archive for `amd64` architecture by running the following command: -+ -[source,terminal,subs="attributes+"] ----- -$ tar xvf operator-sdk-v{osdk_ver}-ocp-darwin-x86_64.tar.gz ----- -ifndef::openshift-rosa,openshift-dedicated[] -. Unpack the Operator SDK archive for `arm64` architecture by running the following command: -+ -[source,terminal,subs="attributes+"] ----- -$ tar xvf operator-sdk-v{osdk_ver}-ocp-darwin-aarch64.tar.gz ----- -endif::openshift-rosa,openshift-dedicated[] -. Make the file executable by running the following command: -+ -[source,terminal] ----- -$ chmod +x operator-sdk ----- - -. Move the extracted `operator-sdk` binary to a directory that is on your `PATH` by running the following command: -+ -[TIP] -==== -Check your `PATH` by running the following command: - -[source,terminal] ----- -$ echo $PATH ----- -==== -+ -[source,terminal] ----- -$ sudo mv ./operator-sdk /usr/local/bin/operator-sdk ----- - -.Verification - -* After you install the Operator SDK CLI, verify that it is available by running the following command:: -+ -[source,terminal] ----- -$ operator-sdk version ----- -+ -.Example output -[source,terminal,subs="attributes+"] ----- -operator-sdk version: "v{osdk_ver}-ocp", ... ----- diff --git a/modules/osdk-leader-election-types.adoc b/modules/osdk-leader-election-types.adoc deleted file mode 100644 index ab904e67f2..0000000000 --- a/modules/osdk-leader-election-types.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-leader-election.adoc - -[id="osdk-leader-election-types_{context}"] -= Operator leader election examples - -The following examples illustrate how to use the two leader election options for an Operator, Leader-for-life and Leader-with-lease. - -[id="osdk-leader-for-life-election_{context}"] -== Leader-for-life election - -With the Leader-for-life election implementation, a call to `leader.Become()` blocks the Operator as it retries until it can become the leader by creating the config map named `memcached-operator-lock`: - -[source,go] ----- -import ( - ... - "github.com/operator-framework/operator-sdk/pkg/leader" -) - -func main() { - ... - err = leader.Become(context.TODO(), "memcached-operator-lock") - if err != nil { - log.Error(err, "Failed to retry for leader lock") - os.Exit(1) - } - ... -} ----- - -If the Operator is not running inside a cluster, `leader.Become()` simply returns without error to skip the leader election since it cannot detect the name of the Operator. - -[id="osdk-leader-with-lease-election_{context}"] -== Leader-with-lease election - -The Leader-with-lease implementation can be enabled using the link:https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/manager#Options[Manager Options] for leader election: - -[source,go] ----- -import ( - ... - "sigs.k8s.io/controller-runtime/pkg/manager" -) - -func main() { - ... - opts := manager.Options{ - ... - LeaderElection: true, - LeaderElectionID: "memcached-operator-lock" - } - mgr, err := manager.New(cfg, opts) - ... -} ----- - -When the Operator is not running in a cluster, the Manager returns an error when starting because it cannot detect the namespace of the Operator to create the config map for leader election. You can override this namespace by setting the `LeaderElectionNamespace` option for the Manager. diff --git a/modules/osdk-manager-file.adoc b/modules/osdk-manager-file.adoc deleted file mode 100644 index 47e21fd96d..0000000000 --- a/modules/osdk-manager-file.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/golang/osdk-golang-tutorial.adoc - -[id="osdk-manager-file_{context}"] -= Manager file - -The main program for the Operator is the manager file at `cmd/manager/main.go`. The manager automatically registers the scheme for all custom resources (CRs) defined under `pkg/apis/` and runs all controllers under `pkg/controller/`. - -The manager can restrict the namespace that all controllers watch for resources: - -[source,go] ----- -mgr, err := manager.New(cfg, manager.Options{Namespace: namespace}) ----- - -By default, the controller watches the namespace that the Operator runs in. To watch all namespaces, you can leave the namespace option empty: - -[source,go] ----- -mgr, err := manager.New(cfg, manager.Options{Namespace: ""}) ----- - -//// -TODO: Doc on manager options(Sync period, leader election, registering 3rd party types) -//// diff --git a/modules/osdk-managing-psa-for-operators-with-escalated-permissions.adoc b/modules/osdk-managing-psa-for-operators-with-escalated-permissions.adoc deleted file mode 100644 index b52721025b..0000000000 --- a/modules/osdk-managing-psa-for-operators-with-escalated-permissions.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-complying-with-psa.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-managing-psa-for-operators-with-escalated-permissions_{context}"] -= Managing pod security admission for Operator workloads that require escalated permissions - -If your Operator project requires escalated permissions to run, you must edit your Operator's cluster service version (CSV). - -.Procedure - -. Set the security context configuration to the required permission level in your Operator's CSV, similar to the following example: -+ -.Example `.clusterserviceversion.yaml` file with network administrator privileges -[source,yaml] ----- -... -containers: - - name: my-container - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - "NET_ADMIN" -... ----- - -. Set the service account privileges that allow your Operator's workloads to use the required security context constraints (SCC), similar to the following example: -+ -.Example `.clusterserviceversion.yaml` file -[source,yaml] ----- -... - install: - spec: - clusterPermissions: - - rules: - - apiGroups: - - security.openshift.io - resourceNames: - - privileged - resources: - - securitycontextconstraints - verbs: - - use - serviceAccountName: default -... ----- - -. Edit your Operator's CSV description to explain why your Operator project requires escalated permissions similar to the following example: -+ -.Example `.clusterserviceversion.yaml` file -[source,yaml] ----- -... -spec: - apiservicedefinitions:{} - ... -description: The requires a privileged pod security admission label set on the Operator's namespace. The Operator's agents require escalated permissions to restart the node if the node needs remediation. ----- diff --git a/modules/osdk-manually-defined-csv-fields.adoc b/modules/osdk-manually-defined-csv-fields.adoc deleted file mode 100644 index 76ec4e9b06..0000000000 --- a/modules/osdk-manually-defined-csv-fields.adoc +++ /dev/null @@ -1,83 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-generating-csvs.adoc - -[id="osdk-manually-defined-csv-fields_{context}"] -= Manually-defined CSV fields - -Many CSV fields cannot be populated using generated, generic manifests that are not specific to Operator SDK. These fields are mostly human-written metadata about the Operator and various custom resource definitions (CRDs). - -Operator authors must directly modify their cluster service version (CSV) YAML file, adding personalized data to the following required fields. The Operator SDK gives a warning during CSV generation when a lack of data in any of the required fields is detected. - -The following tables detail which manually-defined CSV fields are required and which are optional. - -.Required CSV fields -[cols="2a,8a",options="header"] -|=== -|Field |Description - -|`metadata.name` -|A unique name for this CSV. Operator version should be included in the name to ensure uniqueness, for example `app-operator.v0.1.1`. - -|`metadata.capabilities` -|The capability level according to the Operator maturity model. Options include `Basic Install`, `Seamless Upgrades`, `Full Lifecycle`, `Deep Insights`, and `Auto Pilot`. - -|`spec.displayName` -|A public name to identify the Operator. - -|`spec.description` -|A short description of the functionality of the Operator. - -|`spec.keywords` -|Keywords describing the Operator. - -|`spec.maintainers` -|Human or organizational entities maintaining the Operator, with a `name` and `email`. - -|`spec.provider` -|The provider of the Operator (usually an organization), with a `name`. - -|`spec.labels` -|Key-value pairs to be used by Operator internals. - -|`spec.version` -|Semantic version of the Operator, for example `0.1.1`. - -|`spec.customresourcedefinitions` -|Any CRDs the Operator uses. This field is populated automatically by the Operator SDK if any CRD YAML files are present in `deploy/`. However, several fields not in the CRD manifest spec require user input: - -- `description`: description of the CRD. -- `resources`: any Kubernetes resources leveraged by the CRD, for example `Pod` and `StatefulSet` objects. -- `specDescriptors`: UI hints for inputs and outputs of the Operator. -|=== - - -.Optional CSV fields -[cols="2a,8a",options="header"] -|=== -|Field |Description - -|`spec.replaces` -|The name of the CSV being replaced by this CSV. - -|`spec.links` -|URLs (for example, websites and documentation) pertaining to the Operator or application being managed, each with a `name` and `url`. - -|`spec.selector` -|Selectors by which the Operator can pair resources in a cluster. - -|`spec.icon` -|A base64-encoded icon unique to the Operator, set in a `base64data` field with a `mediatype`. - -|`spec.maturity` -|The level of maturity the software has achieved at this version. Options include `planning`, `pre-alpha`, `alpha`, `beta`, `stable`, `mature`, `inactive`, and `deprecated`. - -|`metadata.annotations` -|=== - -Further details on what data each field above should hold are found in the link:https://github.com/operator-framework/operator-lifecycle-manager/blob/master/doc/design/building-your-csv.md[CSV spec]. - -[NOTE] -==== -Several YAML fields currently requiring user intervention can potentially be parsed from Operator code. -==== diff --git a/modules/osdk-migrating-pkgman.adoc b/modules/osdk-migrating-pkgman.adoc deleted file mode 100644 index df4f00eeea..0000000000 --- a/modules/osdk-migrating-pkgman.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-pkgman-to-bundle.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-migrating-pkgman_{context}"] -= Migrating a package manifest project to bundle format - -Operator authors can use the Operator SDK to migrate a package manifest format Operator project to a bundle format project. - -.Prerequisites - -* Operator SDK CLI installed -* Operator project initially generated using the Operator SDK in package manifest format - -.Procedure - -* Use the Operator SDK to migrate your package manifest project to the bundle format and generate bundle images: -+ -[source,terminal] ----- -$ operator-sdk pkgman-to-bundle \ <1> - [--output-dir ] \ <2> - --image-tag-base <3> ----- -<1> Specify the location of the package manifests directory for the project, such as `packagemanifests/` or `manifests/`. -<2> Optional: By default, the generated bundles are written locally to disk to the `bundle/` directory. You can use the `--output-dir` flag to specify an alternative location. -<3> Set the `--image-tag-base` flag to provide the base of the image name, such as `quay.io/example/etcd`, that will be used for the bundles. Provide the name without a tag, because the tag for the images will be set according to the bundle version. For example, the full bundle image names are generated in the format `:`. - -//// -Reinsert in place after https://bugzilla.redhat.com/show_bug.cgi?id=1967369 is fixed: - - [--build-cmd ] \ <3> - -<3> Optional: Specify the build command for building container images using the `--build-cmd` flag. The default build command is `docker build`. The command must be in your `PATH`, otherwise you must provide a fully qualified path name. -//// - -.Verification - -* Verify that the generated bundle image runs successfully: -+ -[source,terminal] ----- -$ operator-sdk run bundle : ----- -+ -.Example output -[source,terminal] ----- -INFO[0025] Successfully created registry pod: quay-io-my-etcd-0-9-4 -INFO[0025] Created CatalogSource: etcd-catalog -INFO[0026] OperatorGroup "operator-sdk-og" created -INFO[0026] Created Subscription: etcdoperator-v0-9-4-sub -INFO[0031] Approved InstallPlan install-5t58z for the Subscription: etcdoperator-v0-9-4-sub -INFO[0031] Waiting for ClusterServiceVersion "default/etcdoperator.v0.9.4" to reach 'Succeeded' phase -INFO[0032] Waiting for ClusterServiceVersion "default/etcdoperator.v0.9.4" to appear -INFO[0048] Found ClusterServiceVersion "default/etcdoperator.v0.9.4" phase: Pending -INFO[0049] Found ClusterServiceVersion "default/etcdoperator.v0.9.4" phase: Installing -INFO[0064] Found ClusterServiceVersion "default/etcdoperator.v0.9.4" phase: Succeeded -INFO[0065] OLM has successfully installed "etcdoperator.v0.9.4" ----- diff --git a/modules/osdk-monitoring-custom-metrics.adoc b/modules/osdk-monitoring-custom-metrics.adoc deleted file mode 100644 index 6bfcd94a92..0000000000 --- a/modules/osdk-monitoring-custom-metrics.adoc +++ /dev/null @@ -1,168 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-monitoring-prometheus.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-monitoring-custom-metrics_{context}"] -= Exposing custom metrics for Go-based Operators - -As an Operator author, you can publish custom metrics by using the global Prometheus registry from the `controller-runtime/pkg/metrics` library. - -.Prerequisites - -* Go-based Operator generated using the Operator SDK -* Prometheus Operator, which is deployed by default on {product-title} clusters - -.Procedure - -. In your Operator SDK project, uncomment the following line in the `config/default/kustomization.yaml` file: -+ -[source,yaml] ----- -../prometheus ----- - -. Create a custom controller class to publish additional metrics from the Operator. The following example declares the `widgets` and `widgetFailures` collectors as global variables, and then registers them with the `init()` function in the controller's package: -+ -.`controllers/memcached_controller_test_metrics.go` file -[%collapsible] -==== -[source,go] ----- -package controllers - -import ( - "github.com/prometheus/client_golang/prometheus" - "sigs.k8s.io/controller-runtime/pkg/metrics" -) - - -var ( - widgets = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "widgets_total", - Help: "Number of widgets processed", - }, - ) - widgetFailures = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "widget_failures_total", - Help: "Number of failed widgets", - }, - ) -) - -func init() { - // Register custom metrics with the global prometheus registry - metrics.Registry.MustRegister(widgets, widgetFailures) -} ----- -==== - -. Record to these collectors from any part of the reconcile loop in the `main` controller class, which determines the business logic for the metric: -+ -.`controllers/memcached_controller.go` file -[%collapsible] -==== -[source,go] ----- -func (r *MemcachedReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - ... - ... - // Add metrics - widgets.Inc() - widgetFailures.Inc() - - return ctrl.Result{}, nil -} ----- -==== - -. Build and push the Operator: -+ -[source,terminal] ----- -$ make docker-build docker-push IMG=//: ----- - -. Deploy the Operator: -+ -[source,terminal] ----- -$ make deploy IMG=//: ----- - -. Create role and role binding definitions to allow the service monitor of the Operator to be scraped by the Prometheus instance of the {product-title} cluster. -+ -Roles must be assigned so that service accounts have the permissions to scrape the metrics of the namespace: -+ -.`config/prometheus/role.yaml` role -[%collapsible] -==== -[source,yaml] ----- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: prometheus-k8s-role - namespace: memcached-operator-system -rules: - - apiGroups: - - "" - resources: - - endpoints - - pods - - services - - nodes - - secrets - verbs: - - get - - list - - watch ----- -==== -+ -.`config/prometheus/rolebinding.yaml` role binding -[%collapsible] -==== -[source,yaml] ----- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: prometheus-k8s-rolebinding - namespace: memcached-operator-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: prometheus-k8s-role -subjects: - - kind: ServiceAccount - name: prometheus-k8s - namespace: openshift-monitoring ----- -==== - -. Apply the roles and role bindings for the deployed Operator: -+ -[source,terminal] -+ ----- -$ oc apply -f config/prometheus/role.yaml ----- -+ -[source,terminal] ----- -$ oc apply -f config/prometheus/rolebinding.yaml ----- - -. Set the labels for the namespace that you want to scrape, which enables OpenShift cluster monitoring for that namespace: -+ -[source,terminal] ----- -$ oc label namespace openshift.io/cluster-monitoring="true" ----- - -.Verification - -* Query and view the metrics in the {product-title} web console. You can use the names that were set in the custom controller class, for example `widgets_total` and `widget_failures_total`. diff --git a/modules/osdk-monitoring-prometheus-operator-support.adoc b/modules/osdk-monitoring-prometheus-operator-support.adoc deleted file mode 100644 index 195c61b421..0000000000 --- a/modules/osdk-monitoring-prometheus-operator-support.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-monitoring-prometheus.adoc - -[id="osdk-monitoring-prometheus-operator-support_{context}"] -= Prometheus Operator support - -link:https://prometheus.io/[Prometheus] is an open-source systems monitoring and alerting toolkit. The Prometheus Operator creates, configures, and manages Prometheus clusters running on Kubernetes-based clusters, such as {product-title}. - -Helper functions exist in the Operator SDK by default to automatically set up metrics in any generated Go-based Operator for use on clusters where the Prometheus Operator is deployed. diff --git a/modules/osdk-multi-arch-building-images.adoc b/modules/osdk-multi-arch-building-images.adoc deleted file mode 100644 index 1feb3eef9d..0000000000 --- a/modules/osdk-multi-arch-building-images.adoc +++ /dev/null @@ -1,107 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-multi-arch-support.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-multi-arch-building-images_{context}"] -= Building a manifest list of the platforms your Operator supports - -You can use the `make docker-buildx` command to build a manifest list of the platforms supported by your Operator and operands. A manifest list references specific image manifests for one or more architectures. An image manifest specifies the platforms that an image supports. - -For more information, see link:https://specs.opencontainers.org/image-spec/image-index[OpenContainers Image Index Spec] or link:https://docs.docker.com/registry/spec/manifest-v2-2/#manifest-list[Image Manifest v2, Schema 2]. - -[IMPORTANT] -==== -If your Operator project deploys an application or other workload resources, the following procedure assumes the application's multi-platform images are built during the application release process. -==== - -.Prerequisites - -* An Operator project built using the Operator SDK version {osdk_ver} or later -* Docker installed - -.Procedure - -. Inspect the image manifests of your Operator and operands to find which platforms your Operator project can support. Run the following command to inspect an image manifest: -+ -[source,terminal] ----- -$ docker manifest inspect <1> ----- -<1> Specifies an image manifest, such as `redhat/ubi9:latest`. -+ -The platforms that your Operator and operands mutually support determine the platform compatibility of your Operator project. -+ -.Example output -[source,json] ----- -{ - "manifests": [ - { - "digest": "sha256:c0669ef34cdc14332c0f1ab0c2c01acb91d96014b172f1a76f3a39e63d1f0bda", - "mediaType": "application/vnd.docker.distribution.manifest.v2+json", - "platform": { - "architecture": "amd64", - "os": "linux" - }, - "size": 528 - }, -... - { - "digest": "sha256:30e6d35703c578ee703230b9dc87ada2ba958c1928615ac8a674fcbbcbb0f281", - "mediaType": "application/vnd.docker.distribution.manifest.v2+json", - "platform": { - "architecture": "arm64", - "os": "linux", - "variant": "v8" - }, - "size": 528 - }, -... - ] -} ----- - -. If the previous command does not output platform information, then the specified base image might be a single image instead of an image manifest. You can find which architectures an image supports by running the following command: -+ -[source,terminal] ----- -$ docker inspect ----- - -. For Go-based Operator projects, the Operator SDK explicitly references the `amd64` architecture in your project's Dockerfile. Make the following change -to your Dockerfile to set an environment variable to the value specified by the platform flag: -+ -.Example Dockerfile -[source,docker] ----- -FROM golang:1.19 as builder -ARG TARGETOS -ARG TARGETARCH -... -RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager main.go <1> ----- -<1> Change the `GOARCH` field from `amd64` to `$TARGETARCH`. - -. Your Operator project's makefile defines the `PLATFORMS` environment variable. If your Operator's images do not support all of the platforms set by default, edit the variable to specify the supported platforms. The following example defines the supported platforms as `linux/arm64` and `linux/amd64`: -+ -.Example makefile -[source,make] ----- -# ... -PLATFORMS ?= linux/arm64,linux/amd64 <1> -.PHONY: docker-buildx -# ... ----- -+ -<1> The following `PLATFORMS` values are set by default: `linux/arm64`, `linux/amd64`, `linux/s390x`, and `linux/ppc64le`. -+ -When you run the `make docker buildx` command to generate a manifest list, the Operator SDK creates an image manifest for each of the platforms specified by the `PLATFORMS` variable. - -. Run the following command from your Operator project directory to build your manager image. Running the command builds a manager image with multi-platform support and pushes the manifest list to your registry. -+ -[source,terminal] ----- -$ make docker-buildx \ - IMG=//: ----- diff --git a/modules/osdk-multi-arch-node-affinity.adoc b/modules/osdk-multi-arch-node-affinity.adoc deleted file mode 100644 index 8f21b12629..0000000000 --- a/modules/osdk-multi-arch-node-affinity.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-multi-arch-support.adoc - -:_mod-docs-content-type: CONCEPT -[id="osdk-multi-arch-node-affinity_{context}"] -= About node affinity rules for multi-architecture compute machines and Operator workloads - -You must set node affinity rules to ensure your Operator workloads can run on multi-architecture compute machines. Node affinity is a set of rules used by the scheduler to define a pod's placement. Setting node affinity rules ensures your Operator's workloads are scheduled to compute machines with compatible architectures. - -If your Operator performs better on particular architectures, you can set preferred node affinity rules to schedule pods to machines with the specified architectures. - -For more information, see "About clusters with multi-architecture compute machines" and "Controlling pod placement on nodes using node affinity rules". diff --git a/modules/osdk-multi-arch-node-preference.adoc b/modules/osdk-multi-arch-node-preference.adoc deleted file mode 100644 index 7210883df9..0000000000 --- a/modules/osdk-multi-arch-node-preference.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-multi-arch-support.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-multi-arch-node-preference_{context}"] -= Using preferred node affinity rules to configure support for multi-architecture compute machines for Operator projects - -If your Operator performs better on particular architectures, you can configure preferred node affinity rules to schedule pods to nodes to the specified architectures. - -.Prerequisites - -* An Operator project created or maintained with Operator SDK {osdk_ver} or later. -* A manifest list defining the platforms your Operator supports. -* Required node affinity rules are set for your Operator project. - -.Procedure - -. Search your Operator project for Kubernetes manifests that define pod spec and pod template spec objects. -+ -.Example Kubernetes manifest -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: s1 -spec: - containers: - - name: - image: docker.io// ----- - -. Set your Operator's preferred node affinity rules in the Kubernetes manifests that define pod spec and pod template spec objects, similar to the following example: -+ -.Example Kubernetes manifest -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: s1 -spec: - containers: - - name: - image: docker.io// - affinity: - nodeAffinity: - preferredDuringSchedulingIgnoredDuringExecution: <1> - - preference: - matchExpressions: <2> - - key: kubernetes.io/arch <3> - operator: In <4> - values: - - amd64 - - arm64 - weight: 90 <5> ----- -<1> Defines a preferred rule. -<2> If you specify multiple `matchExpressions` associated with `nodeSelectorTerms`, then the pod can be scheduled onto a node only if all `matchExpressions` are satisfied. -<3> Specifies the architectures defined in the manifest list. -<4> Specifies an `operator`. The Operator can be `In`, `NotIn`, `Exists`, or `DoesNotExist`. For example, use the value of `In` to require the label to be in the node. -<5> Specifies a weight for the node, valid values are `1`-`100`. The node with highest weight is preferred. diff --git a/modules/osdk-multi-arch-node-reqs.adoc b/modules/osdk-multi-arch-node-reqs.adoc deleted file mode 100644 index f3d688c661..0000000000 --- a/modules/osdk-multi-arch-node-reqs.adoc +++ /dev/null @@ -1,130 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-multi-arch-support.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-multi-arch-node-reqs_{context}"] -= Using required node affinity rules to support multi-architecture compute machines for Operator projects - -If you want your Operator to support multi-architecture compute machines, you must define your Operator's required node affinity rules. - -.Prerequisites - -* An Operator project created or maintained with Operator SDK {osdk_ver} or later. -* A manifest list defining the platforms your Operator supports. - -.Procedure - -. Search your Operator project for Kubernetes manifests that define pod spec and pod template spec objects. -+ -[IMPORTANT] -==== -Because object type names are not declared in YAML files, look for the mandatory `containers` field in your Kubernetes manifests. The `containers` field is required when specifying both pod spec and pod template spec objects. - -You must set node affinity rules in all Kubernetes manifests that define a pod spec or pod template spec, including objects such as `Pod`, `Deployment`, `DaemonSet`, and `StatefulSet`. -==== -+ -.Example Kubernetes manifest -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: s1 -spec: - containers: - - name: - image: docker.io// ----- - -. Set the required node affinity rules in the Kubernetes manifests that define pod spec and pod template spec objects, similar to the following example: -+ -.Example Kubernetes manifest -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: s1 -spec: - containers: - - name: - image: docker.io// - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: <1> - nodeSelectorTerms: <2> - - matchExpressions: <3> - - key: kubernetes.io/arch <4> - operator: In - values: - - amd64 - - arm64 - - ppc64le - - s390x - - key: kubernetes.io/os <5> - operator: In - values: - - linux ----- -<1> Defines a required rule. -<2> If you specify multiple `nodeSelectorTerms` associated with `nodeAffinity` types, then the pod can be scheduled onto a node if one of the `nodeSelectorTerms` is satisfied. -<3> If you specify multiple `matchExpressions` associated with `nodeSelectorTerms`, then the pod can be scheduled onto a node only if all `matchExpressions` are satisfied. -<4> Specifies the architectures defined in the manifest list. -<5> Specifies the operating systems defined in the manifest list. - -. Go-based Operator projects that use dynamically created workloads might embed pod spec and pod template spec objects in the Operator's logic. -+ -If your project embeds pod spec or pod template spec objects in the Operator's logic, edit your Operator's logic similar to the following example. The following example shows how to update a `PodSpec` object by using the Go API: -+ -[source,go] ----- -Template: corev1.PodTemplateSpec{ - ... - Spec: corev1.PodSpec{ - Affinity: &corev1.Affinity{ - NodeAffinity: &corev1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ - NodeSelectorTerms: []corev1.NodeSelectorTerm{ - { - MatchExpressions: []corev1.NodeSelectorRequirement{ - { - Key: "kubernetes.io/arch", - Operator: "In", - Values: []string{"amd64","arm64","ppc64le","s390x"}, - }, - { - Key: "kubernetes.io/os", - Operator: "In", - Values: []string{"linux"}, - }, - }, - }, - }, - }, - }, - }, - SecurityContext: &corev1.PodSecurityContext{ - ... - }, - Containers: []corev1.Container{{ - ... - }}, - }, ----- -+ -where: - -`RequiredDuringSchedulingIgnoredDuringExecution`:: Defines a required rule. -`NodeSelectorTerms`:: If you specify multiple `nodeSelectorTerms` associated with `nodeAffinity` types, then the pod can be scheduled onto a node if one of the `nodeSelectorTerms` is satisfied. -`MatchExpressions`:: If you specify multiple `matchExpressions` associated with `nodeSelectorTerms`, then the pod can be scheduled onto a node only if all `matchExpressions` are satisfied. -`kubernetes.io/arch`:: Specifies the architectures defined in the manifest list. -`kubernetes.io/os`:: Specifies the operating systems defined in the manifest list. - -[WARNING] -==== -If you do not set node affinity rules and a container is scheduled to a compute machine with an incompatible architecture, the pod fails and triggers one of the following events: - -`CrashLoopBackOff`:: Occurs when an image manifest's entry point fails to run and an `exec format error` message is printed in the logs. -`ImagePullBackOff`:: Occurs when a manifest list does not include a manifest for the architecture where a pod is scheduled or the node affinity terms are set to the wrong values. -==== diff --git a/modules/osdk-multi-arch-validate.adoc b/modules/osdk-multi-arch-validate.adoc deleted file mode 100644 index 7c757f8626..0000000000 --- a/modules/osdk-multi-arch-validate.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-multi-arch-support.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-multi-arch-validate_{context}"] -= Validating your Operator's multi-platform readiness - -You can validate your Operator's multi-platform readiness by running the `bundle validate` command. The command verifies that your Operator project meets the following conditions: - -* Your Operator's manager image supports the platforms labeled in the cluster service version (CSV) file. -* Your Operator's CSV has labels for the supported platforms for Operator Lifecycle Manager (OLM) and OperatorHub. - -.Procedure - -* Run the following command to validate your Operator project for multiple architecture readiness: -+ -[source,terminal] ----- -$ operator-sdk bundle validate ./bundle \ - --select-optional name=multiarch ----- -+ -.Example validation message -[source,text] ----- -INFO[0020] All validation tests have completed successfully ----- -+ -.Example error message for missing CSV labels in the manager image -[source,text] ----- -ERRO[0016] Error: Value test-operator.v0.0.1: not all images specified are providing the support described via the CSV labels. Note that (SO.architecture): (linux.ppc64le) was not found for the image(s) [quay.io/example-org/test-operator:v1alpha1] -ERRO[0016] Error: Value test-operator.v0.0.1: not all images specified are providing the support described via the CSV labels. Note that (SO.architecture): (linux.s390x) was not found for the image(s) [quay.io/example-org/test-operator:v1alpha1] -ERRO[0016] Error: Value test-operator.v0.0.1: not all images specified are providing the support described via the CSV labels. Note that (SO.architecture): (linux.amd64) was not found for the image(s) [quay.io/example-org/test-operator:v1alpha1] -ERRO[0016] Error: Value test-operator.v0.0.1: not all images specified are providing the support described via the CSV labels. Note that (SO.architecture): (linux.arm64) was not found for the image(s) [quay.io/example-org/test-operator:v1alpha1] ----- -+ -.Example error message for missing OperatorHub flags -[source,text] ----- -WARN[0014] Warning: Value test-operator.v0.0.1: check if the CSV is missing the label (operatorframework.io/arch.) for the Arch(s): ["amd64" "arm64" "ppc64le" "s390x"]. Be aware that your Operator manager image ["quay.io/example-org/test-operator:v1alpha1"] provides this support. Thus, it is very likely that you want to provide it and if you support more than amd64 architectures, you MUST,use the required labels for all which are supported.Otherwise, your solution cannot be listed on the cluster for these architectures ----- diff --git a/modules/osdk-operatorconditions.adoc b/modules/osdk-operatorconditions.adoc deleted file mode 100644 index a41439fbd5..0000000000 --- a/modules/osdk-operatorconditions.adoc +++ /dev/null @@ -1,79 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-generating-csvs.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-operatorconditions_{context}"] -= Enabling Operator conditions - -Operator Lifecycle Manager (OLM) provides Operators with a channel to communicate complex states that influence OLM behavior while managing the Operator. By default, OLM creates an `OperatorCondition` custom resource definition (CRD) when it installs an Operator. Based on the conditions set in the `OperatorCondition` custom resource (CR), the behavior of OLM changes accordingly. - -To support Operator conditions, an Operator must be able to read the `OperatorCondition` CR created by OLM and have the ability to complete the following tasks: - -* Get the specific condition. -* Set the status of a specific condition. - -This can be accomplished by using the link:https://github.com/operator-framework/operator-lib/tree/v0.11.0[`operator-lib`] library. An Operator author can provide a link:https://github.com/kubernetes-sigs/controller-runtime/tree/master/pkg/client[`controller-runtime` client] in their Operator for the library to access the `OperatorCondition` CR owned by the Operator in the cluster. - -The library provides a generic `Conditions` interface, which has the following methods to `Get` and `Set` a `conditionType` in the `OperatorCondition` CR: - -`Get`:: To get the specific condition, the library uses the `client.Get` function from `controller-runtime`, which requires an `ObjectKey` of type `types.NamespacedName` present in `conditionAccessor`. - -`Set`:: To update the status of the specific condition, the library uses the `client.Update` function from `controller-runtime`. An error occurs if the `conditionType` is not present in the CRD. - -The Operator is allowed to modify only the `status` subresource of the CR. Operators can either delete or update the `status.conditions` array to include the condition. For more details on the format and description of the fields present in the conditions, see the upstream link:https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Condition[Condition GoDocs]. - -[NOTE] -==== -Operator SDK {osdk_ver} supports `operator-lib` v0.11.0. -==== - -.Prerequisites - -* An Operator project generated using the Operator SDK. - -.Procedure - -To enable Operator conditions in your Operator project: - -. In the `go.mod` file of your Operator project, add `operator-framework/operator-lib` as a required library: -+ -[source,go] ----- -module github.com/example-inc/memcached-operator - -go 1.19 - -require ( - k8s.io/apimachinery v0.26.0 - k8s.io/client-go v0.26.0 - sigs.k8s.io/controller-runtime v0.14.1 - operator-framework/operator-lib v0.11.0 -) ----- - -. Write your own constructor in your Operator logic that will result in the following outcomes: -+ --- -* Accepts a `controller-runtime` client. -* Accepts a `conditionType`. -* Returns a `Condition` interface to update or add conditions. --- -+ -Because OLM currently supports the `Upgradeable` condition, you can create an interface that has methods to access the `Upgradeable` condition. For example: -+ -[source,go] ----- -import ( - ... - apiv1 "github.com/operator-framework/api/pkg/operators/v1" -) - -func NewUpgradeable(cl client.Client) (Condition, error) { - return NewCondition(cl, "apiv1.OperatorUpgradeable") -} - -cond, err := NewUpgradeable(cl); ----- -+ -In this example, the `NewUpgradeable` constructor is further used to create a variable `cond` of type `Condition`. The `cond` variable would in turn have `Get` and `Set` methods, which can be used for handling the OLM `Upgradeable` condition. diff --git a/modules/osdk-owned-crds.adoc b/modules/osdk-owned-crds.adoc deleted file mode 100644 index e1d7a67064..0000000000 --- a/modules/osdk-owned-crds.adoc +++ /dev/null @@ -1,117 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-generating-csvs.adoc - -[id="osdk-crds-owned_{context}"] -= Owned CRDs - -The custom resource definitions (CRDs) owned by your Operator are the most important part of your CSV. This establishes the link between your Operator and the required RBAC rules, dependency management, and other Kubernetes concepts. - -It is common for your Operator to use multiple CRDs to link together concepts, such as top-level database configuration in one object and a representation of replica sets in another. Each one should be listed out in the CSV file. - -.Owned CRD fields -[cols="2a,5a,2",options="header"] -|=== -|Field |Description |Required/optional - -|`Name` -|The full name of your CRD. -|Required - -|`Version` -|The version of that object API. -|Required - -|`Kind` -|The machine readable name of your CRD. -|Required - -|`DisplayName` -|A human readable version of your CRD name, for example `MongoDB Standalone`. -|Required - -|`Description` -|A short description of how this CRD is used by the Operator or a description of the functionality provided by the CRD. -|Required - -|`Group` -|The API group that this CRD belongs to, for example `database.example.com`. -|Optional - -|`Resources` -a|Your CRDs own one or more types of Kubernetes objects. These are listed in the `resources` section to inform your users of the objects they might need to troubleshoot or how to connect to the application, such as the service or ingress rule that exposes a database. - -It is recommended to only list out the objects that are important to a human, not an exhaustive list of everything you orchestrate. For example, do not list config maps that store internal state that are not meant to be modified by a user. -|Optional - -|`SpecDescriptors`, `StatusDescriptors`, and `ActionDescriptors` -a|These descriptors are a way to hint UIs with certain inputs or outputs of your Operator that are most important to an end user. If your CRD contains the name of a secret or config map that the user must provide, you can specify that here. These items are linked and highlighted in compatible UIs. - -There are three types of descriptors: - -* `SpecDescriptors`: A reference to fields in the `spec` block of an object. -* `StatusDescriptors`: A reference to fields in the `status` block of an object. -* `ActionDescriptors`: A reference to actions that can be performed on an object. - -All descriptors accept the following fields: - -* `DisplayName`: A human readable name for the `Spec`, `Status`, or `Action`. -* `Description`: A short description of the `Spec`, `Status`, or `Action` and how it is used by the Operator. -* `Path`: A dot-delimited path of the field on the object that this descriptor describes. -* `X-Descriptors`: Used to determine which "capabilities" this descriptor has and which UI component to use. See the *openshift/console* project for a canonical link:https://github.com/openshift/console/tree/release-4.3/frontend/packages/operator-lifecycle-manager/src/components/descriptors/types.ts[list of React UI X-Descriptors] for {product-title}. - -Also see the *openshift/console* project for more information on link:https://github.com/openshift/console/tree/release-4.3/frontend/packages/operator-lifecycle-manager/src/components/descriptors[Descriptors] in general. -|Optional - -|=== - -The following example depicts a `MongoDB Standalone` CRD that requires some user input in the form of a secret and config map, and orchestrates services, stateful sets, pods and config maps: - -[id="osdk-crds-owned-example_{context}"] -.Example owned CRD -[source,yaml] ----- - - displayName: MongoDB Standalone - group: mongodb.com - kind: MongoDbStandalone - name: mongodbstandalones.mongodb.com - resources: - - kind: Service - name: '' - version: v1 - - kind: StatefulSet - name: '' - version: v1beta2 - - kind: Pod - name: '' - version: v1 - - kind: ConfigMap - name: '' - version: v1 - specDescriptors: - - description: Credentials for Ops Manager or Cloud Manager. - displayName: Credentials - path: credentials - x-descriptors: - - 'urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Secret' - - description: Project this deployment belongs to. - displayName: Project - path: project - x-descriptors: - - 'urn:alm:descriptor:com.tectonic.ui:selector:core:v1:ConfigMap' - - description: MongoDB version to be installed. - displayName: Version - path: version - x-descriptors: - - 'urn:alm:descriptor:com.tectonic.ui:label' - statusDescriptors: - - description: The status of each of the pods for the MongoDB cluster. - displayName: Pod Status - path: pods - x-descriptors: - - 'urn:alm:descriptor:com.tectonic.ui:podStatuses' - version: v1 - description: >- - MongoDB Deployment consisting of only one host. No replication of - data. ----- diff --git a/modules/osdk-project-file.adoc b/modules/osdk-project-file.adoc deleted file mode 100644 index 8f0f27d518..0000000000 --- a/modules/osdk-project-file.adoc +++ /dev/null @@ -1,92 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/golang/osdk-golang-tutorial.adoc -// * operators/operator_sdk/ansible/osdk-ansible-tutorial.adoc -// * operators/operator_sdk/helm/osdk-helm-tutorial.adoc - -ifeval::["{context}" == "osdk-golang-tutorial"] -:golang: -:type: Go -:app: memcached -endif::[] -ifeval::["{context}" == "osdk-ansible-tutorial"] -:ansible: -:type: Ansible -:app: memcached -endif::[] -ifeval::["{context}" == "osdk-helm-tutorial"] -:helm: -:type: Helm -:app: nginx -endif::[] - -[id="osdk-project-file_{context}"] -= PROJECT file - -Among the files generated by the `operator-sdk init` command is a Kubebuilder `PROJECT` file. Subsequent `operator-sdk` commands, as well as `help` output, that are run from the project root read this file and are aware that the project type is {type}. For example: - -[source,yaml] -ifdef::golang[] ----- -domain: example.com -layout: -- go.kubebuilder.io/v3 -projectName: memcached-operator -repo: github.com/example-inc/memcached-operator -version: "3" -plugins: - manifests.sdk.operatorframework.io/v2: {} - scorecard.sdk.operatorframework.io/v2: {} - sdk.x-openshift.io/v1: {} ----- -endif::[] -ifdef::ansible[] ----- -domain: example.com -layout: -- ansible.sdk.operatorframework.io/v1 -plugins: - manifests.sdk.operatorframework.io/v2: {} - scorecard.sdk.operatorframework.io/v2: {} - sdk.x-openshift.io/v1: {} -projectName: memcached-operator -version: "3" ----- -endif::[] -ifdef::helm[] ----- -domain: example.com -layout: -- helm.sdk.operatorframework.io/v1 -plugins: - manifests.sdk.operatorframework.io/v2: {} - scorecard.sdk.operatorframework.io/v2: {} - sdk.x-openshift.io/v1: {} -projectName: nginx-operator -resources: -- api: - crdVersion: v1 - namespaced: true - domain: example.com - group: demo - kind: Nginx - version: v1 -version: "3" ----- -endif::[] - -ifeval::["{context}" == "osdk-golang-tutorial"] -:!golang: -:!type: -:!app: -endif::[] -ifeval::["{context}" == "osdk-ansible-tutorial"] -:!ansible: -:!type: -:!app: -endif::[] -ifeval::["{context}" == "osdk-helm-tutorial"] -:!helm: -:!type: -:!app: -endif::[] \ No newline at end of file diff --git a/modules/osdk-pruning-utility-about.adoc b/modules/osdk-pruning-utility-about.adoc deleted file mode 100644 index 49833aa0e7..0000000000 --- a/modules/osdk-pruning-utility-about.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-pruning-utility.adoc - -:_mod-docs-content-type: CONCEPT -[id="osdk-about-pruning-utility_{context}"] -= About the operator-lib pruning utility - -Objects, such as jobs or pods, are created as a normal part of the Operator life cycle. If -ifndef::openshift-dedicated,openshift-rosa[] -the cluster administrator -endif::openshift-dedicated,openshift-rosa[] -ifdef::openshift-dedicated,openshift-rosa[] -an administrator with the `dedicated-admin` role -endif::openshift-dedicated,openshift-rosa[] -or the Operator does not remove these object, they can stay in the cluster and consume resources. - -Previously, the following options were available for pruning unnecessary objects: - -* Operator authors had to create a unique pruning solution for their Operators. -* Cluster administrators had to clean up objects on their own. - -The `operator-lib` link:https://github.com/operator-framework/operator-lib/tree/main/prune[pruning utility] removes objects from a Kubernetes cluster for a given namespace. The library was added in version `0.9.0` of the link:https://github.com/operator-framework/operator-lib/releases/tag/v0.9.0[`operator-lib` library] as part of the Operator Framework. diff --git a/modules/osdk-pruning-utility-config.adoc b/modules/osdk-pruning-utility-config.adoc deleted file mode 100644 index f422c71e84..0000000000 --- a/modules/osdk-pruning-utility-config.adoc +++ /dev/null @@ -1,86 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-pruning-utility.adoc - -:_mod-docs-content-type: REFERENCE -[id="osdk-pruning-utility-config_{context}"] -= Pruning utility configuration - -The `operator-lib` pruning utility is written in Go and includes common pruning strategies for Go-based Operators. - -.Example configuration -[source,go] ----- -cfg = Config{ - log: logf.Log.WithName("prune"), - DryRun: false, - Clientset: client, - LabelSelector: "app=", - Resources: []schema.GroupVersionKind{ - {Group: "", Version: "", Kind: PodKind}, - }, - Namespaces: []string{""}, - Strategy: StrategyConfig{ - Mode: MaxCountStrategy, - MaxCountSetting: 1, - }, - PreDeleteHook: myhook, -} ----- - -The pruning utility configuration file defines pruning actions by using the following fields: - -[cols="3,7",options="header"] -|=== -|Configuration field |Description - -|`log` -|Logger used to handle library log messages. - -|`DryRun` -|Boolean that determines whether resources should be removed. If set to `true`, the utility runs but does not to remove resources. - -|`Clientset` -|Client-go Kubernetes ClientSet used for Kubernetes API calls. - -|`LabelSelector` -|Kubernetes label selector expression used to find resources to prune. - -|`Resources` -|Kubernetes resource kinds. `PodKind` and `JobKind` are currently supported. - -|`Namespaces` -|List of Kubernetes namespaces to search for resources. - -|`Strategy` -|Pruning strategy to run. - -|`Strategy.Mode` -|`MaxCountStrategy`, `MaxAgeStrategy`, or `CustomStrategy` are currently supported. - -|`Strategy.MaxCountSetting` -|Integer value for `MaxCountStrategy` that specifies how many resources should remain after the pruning utility runs. - -|`Strategy.MaxAgeSetting` -|Go `time.Duration` string value, such as `48h`, that specifies the age of resources to prune. - -|`Strategy.CustomSettings` -|Go map of values that can be passed into a custom strategy function. - -|`PreDeleteHook` -|Optional: Go function to call before pruning a resource. - -|`CustomStrategy` -|Optional: Go function that implements a custom pruning strategy. -|=== - -.Pruning execution - -You can call the pruning action by running the execute function on the pruning configuration. - -[source,go] ----- -err := cfg.Execute(ctx) ----- - -You can also call a pruning action by using a cron package or by calling the pruning utility with a triggering event. diff --git a/modules/osdk-publish-catalog.adoc b/modules/osdk-publish-catalog.adoc deleted file mode 100644 index e181894ee6..0000000000 --- a/modules/osdk-publish-catalog.adoc +++ /dev/null @@ -1,192 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-working-bundle-images.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-publish-catalog_{context}"] -= Publishing a catalog containing a bundled Operator - -To install and manage Operators, Operator Lifecycle Manager (OLM) requires that Operator bundles are listed in an index image, which is referenced by a catalog on the cluster. As an Operator author, you can use the Operator SDK to create an index containing the bundle for your Operator and all of its dependencies. This is useful for testing on remote clusters and publishing to container registries. - -[NOTE] -==== -The Operator SDK uses the `opm` CLI to facilitate index image creation. Experience with the `opm` command is not required. For advanced use cases, the `opm` command can be used directly instead of the Operator SDK. -==== - -.Prerequisites - -- Operator SDK CLI installed on a development workstation -- Operator bundle image built and pushed to a registry -- OLM installed on a Kubernetes-based cluster (v1.16.0 or later if you use `apiextensions.k8s.io/v1` CRDs, for example {product-title} {product-version}) -ifndef::openshift-dedicated,openshift-rosa[] -- Logged in to the cluster with `oc` using an account with `cluster-admin` permissions -endif::openshift-dedicated,openshift-rosa[] -ifdef::openshift-dedicated,openshift-rosa[] -- Logged in to the cluster with `oc` using an account with `dedicated-admin` permissions -endif::openshift-dedicated,openshift-rosa[] - -.Procedure - -. Run the following `make` command in your Operator project directory to build an index image containing your Operator bundle: -+ -[source,terminal] ----- -$ make catalog-build CATALOG_IMG=//: ----- -+ -where the `CATALOG_IMG` argument references a repository that you have access to. You can obtain an account for storing containers at repository sites such as Quay.io. - -. Push the built index image to a repository: -+ -[source,terminal] ----- -$ make catalog-push CATALOG_IMG=//: ----- -+ -[TIP] -==== -You can use Operator SDK `make` commands together if you would rather perform multiple actions in sequence at once. For example, if you had not yet built a bundle image for your Operator project, you can build and push both a bundle image and an index image with the following syntax: - -[source,terminal] ----- -$ make bundle-build bundle-push catalog-build catalog-push \ - BUNDLE_IMG= \ - CATALOG_IMG= ----- - -Alternatively, you can set the `IMAGE_TAG_BASE` field in your `Makefile` to an existing repository: - -[source,terminal] ----- -IMAGE_TAG_BASE=quay.io/example/my-operator ----- - -You can then use the following syntax to build and push images with automatically-generated names, such as `quay.io/example/my-operator-bundle:v0.0.1` for the bundle image and `quay.io/example/my-operator-catalog:v0.0.1` for the index image: - -[source,terminal] ----- -$ make bundle-build bundle-push catalog-build catalog-push ----- -==== - -. Define a `CatalogSource` object that references the index image you just generated, and then create the object by using the `oc apply` command or web console: -+ -.Example `CatalogSource` YAML -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: CatalogSource -metadata: - name: cs-memcached - namespace: -spec: - displayName: My Test - publisher: Company - sourceType: grpc - grpcPodConfig: - securityContextConfig: <1> - image: quay.io/example/memcached-catalog:v0.0.1 <2> - updateStrategy: - registryPoll: - interval: 10m ----- -<1> Specify the value of `legacy` or `restricted`. If the field is not set, the default value is `legacy`. In a future {product-title} release, it is planned that the default value will be `restricted`. If your catalog cannot run with `restricted` permissions, it is recommended that you manually set this field to `legacy`. -<2> Set `image` to the image pull spec you used previously with the `CATALOG_IMG` argument. - -. Check the catalog source: -+ -[source,terminal] ----- -$ oc get catalogsource ----- -+ -.Example output -[source,terminal] ----- -NAME DISPLAY TYPE PUBLISHER AGE -cs-memcached My Test grpc Company 4h31m ----- - -.Verification - -. Install the Operator using your catalog: - -.. Define an `OperatorGroup` object and create it by using the `oc apply` command or web console: -+ -.Example `OperatorGroup` YAML -[source,yaml] ----- -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: my-test - namespace: -spec: - targetNamespaces: - - ----- - -.. Define a `Subscription` object and create it by using the `oc apply` command or web console: -+ -.Example `Subscription` YAML -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: catalogtest - namespace: -spec: - channel: "alpha" - installPlanApproval: Manual - name: catalog - source: cs-memcached - sourceNamespace: - startingCSV: memcached-operator.v0.0.1 ----- - -. Verify the installed Operator is running: - -.. Check the Operator group: -+ -[source,terminal] ----- -$ oc get og ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -my-test 4h40m ----- - -.. Check the cluster service version (CSV): -+ -[source,terminal] ----- -$ oc get csv ----- -+ -.Example output -[source,terminal] ----- -NAME DISPLAY VERSION REPLACES PHASE -memcached-operator.v0.0.1 Test 0.0.1 Succeeded ----- - -.. Check the pods for the Operator: -+ -[source,terminal] ----- -$ oc get pods ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -9098d908802769fbde8bd45255e69710a9f8420a8f3d814abe88b68f8ervdj6 0/1 Completed 0 4h33m -catalog-controller-manager-7fd5b7b987-69s4n 2/2 Running 0 4h32m -cs-memcached-7622r 1/1 Running 0 4h33m ----- diff --git a/modules/osdk-quickstart.adoc b/modules/osdk-quickstart.adoc deleted file mode 100644 index d0df298e44..0000000000 --- a/modules/osdk-quickstart.adoc +++ /dev/null @@ -1,226 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/golang/osdk-golang-quickstart.adoc -// * operators/operator_sdk/ansible/osdk-ansible-quickstart.adoc -// * operators/operator_sdk/helm/osdk-helm-quickstart.adoc - -ifeval::["{context}" == "osdk-golang-quickstart"] -:golang: -:type: Go -:app-proper: Memcached -:app: memcached -:group: cache -endif::[] -ifeval::["{context}" == "osdk-ansible-quickstart"] -:ansible: -:type: Ansible -:app-proper: Memcached -:app: memcached -:group: cache -endif::[] -ifeval::["{context}" == "osdk-helm-quickstart"] -:helm: -:type: Helm -:app-proper: Nginx -:app: nginx -:group: demo -endif::[] - -:_mod-docs-content-type: PROCEDURE -[id="osdk-quickstart_{context}"] -= Creating and deploying {type}-based Operators - -You can build and deploy a simple {type}-based Operator for {app-proper} by using the Operator SDK. - -.Procedure - -. *Create a project.* - -.. Create your project directory: -+ -[source,terminal,subs="attributes+"] ----- -$ mkdir {app}-operator ----- - -.. Change into the project directory: -+ -[source,terminal,subs="attributes+"] ----- -$ cd {app}-operator ----- - -.. Run the `operator-sdk init` command -ifdef::ansible[] -with the `ansible` plugin -endif::[] -ifdef::helm[] -with the `helm` plugin -endif::[] -to initialize the project: -+ -[source,terminal,subs="attributes+"] -ifdef::golang[] ----- -$ operator-sdk init \ - --domain=example.com \ - --repo=github.com/example-inc/{app}-operator ----- -+ -The command uses the Go plugin by default. -endif::[] -ifdef::ansible[] ----- -$ operator-sdk init \ - --plugins=ansible \ - --domain=example.com ----- -endif::[] -ifdef::helm[] ----- -$ operator-sdk init \ - --plugins=helm ----- -endif::[] - -. *Create an API.* -+ -Create a simple {app-proper} API: -+ -[source,terminal,subs="attributes+"] -ifdef::golang[] ----- -$ operator-sdk create api \ - --resource=true \ - --controller=true \ - --group {group} \ - --version v1 \ - --kind {app-proper} ----- -endif::[] -ifdef::ansible[] ----- -$ operator-sdk create api \ - --group {group} \ - --version v1 \ - --kind {app-proper} \ - --generate-role <1> ----- -<1> Generates an Ansible role for the API. -endif::[] -ifdef::helm[] ----- -$ operator-sdk create api \ - --group {group} \ - --version v1 \ - --kind {app-proper} ----- -+ -This API uses the built-in Helm chart boilerplate from the `helm create` command. -endif::[] - -. *Build and push the Operator image.* -+ -Use the default `Makefile` targets to build and push your Operator. Set `IMG` with a pull spec for your image that uses a registry you can push to: -+ -[source,terminal] ----- -$ make docker-build docker-push IMG=//: ----- - -. *Run the Operator.* - -.. Install the CRD: -+ -[source,terminal] ----- -$ make install ----- - -.. Deploy the project to the cluster. Set `IMG` to the image that you pushed: -+ -[source,terminal] ----- -$ make deploy IMG=//: ----- - -ifdef::helm[] -. *Add a security context constraint (SCC).* -+ -The {app-proper} service account requires privileged access to run in {product-title}. Add the following SCC to the service account for the `{app}-sample` pod: -+ -[source,terminal,subs="attributes+"] ----- -$ oc adm policy add-scc-to-user \ - anyuid system:serviceaccount:{app}-operator-system:{app}-sample ----- -endif::[] - -. *Create a sample custom resource (CR).* - -.. Create a sample CR: -+ -[source,terminal,subs="attributes+"] ----- -$ oc apply -f config/samples/{group}_v1_{app}.yaml \ - -n {app}-operator-system ----- - -.. Watch for the CR to reconcile the Operator: -+ -[source,terminal,subs="attributes+"] ----- -$ oc logs deployment.apps/{app}-operator-controller-manager \ - -c manager \ - -n {app}-operator-system ----- -ifdef::ansible[] -+ -.Example output -[source,terminal] ----- -... -I0205 17:48:45.881666 7 leaderelection.go:253] successfully acquired lease memcached-operator-system/memcached-operator -{"level":"info","ts":1612547325.8819902,"logger":"controller-runtime.manager.controller.memcached-controller","msg":"Starting EventSource","source":"kind source: cache.example.com/v1, Kind=Memcached"} -{"level":"info","ts":1612547325.98242,"logger":"controller-runtime.manager.controller.memcached-controller","msg":"Starting Controller"} -{"level":"info","ts":1612547325.9824686,"logger":"controller-runtime.manager.controller.memcached-controller","msg":"Starting workers","worker count":4} -{"level":"info","ts":1612547348.8311093,"logger":"runner","msg":"Ansible-runner exited successfully","job":"4037200794235010051","name":"memcached-sample","namespace":"memcached-operator-system"} ----- -endif::[] - -. *Delete a CR.* -+ -Delete a CR by running the following command: -+ -[source,terminal,subs="attributes+"] ----- -$ oc delete -f config/samples/{group}_v1_{app}.yaml -n {app}-operator-system ----- - -. *Clean up.* -+ -Run the following command to clean up the resources that have been created as part of this procedure: -+ -[source,terminal] ----- -$ make undeploy ----- - -ifeval::["{context}" == "osdk-golang-quickstart"] -:!golang: -:!type: -:!app-proper: -:!app: -endif::[] -ifeval::["{context}" == "osdk-ansible-quickstart"] -:!ansible: -:!type: -:!app-proper: -:!app: -endif::[] -ifeval::["{context}" == "osdk-helm-quickstart"] -:!helm: -:!type: -:!app-proper: -:!app: -endif::[] diff --git a/modules/osdk-required-crds.adoc b/modules/osdk-required-crds.adoc deleted file mode 100644 index 8de26ca9e2..0000000000 --- a/modules/osdk-required-crds.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-generating-csvs.adoc - -[id="osdk-crds-required_{context}"] -= Required CRDs - -Relying on other required CRDs is completely optional and only exists to reduce the scope of individual Operators and provide a way to compose multiple Operators together to solve an end-to-end use case. - -An example of this is an Operator that might set up an application and install an etcd cluster (from an etcd Operator) to use for distributed locking and a Postgres database (from a Postgres Operator) for data storage. - -Operator Lifecycle Manager (OLM) checks against the available CRDs and Operators in the cluster to fulfill these requirements. If suitable versions are found, the Operators are started within the desired namespace and a service account created for each Operator to create, watch, and modify the Kubernetes resources required. - -.Required CRD fields -[cols="2a,5a,2",options="header"] -|=== -|Field |Description |Required/optional - -|`Name` -|The full name of the CRD you require. -|Required - -|`Version` -|The version of that object API. -|Required - -|`Kind` -|The Kubernetes object kind. -|Required - -|`DisplayName` -|A human readable version of the CRD. -|Required - -|`Description` -|A summary of how the component fits in your larger architecture. -|Required -|=== - -.Example required CRD -[source,yaml] ----- - required: - - name: etcdclusters.etcd.database.coreos.com - version: v1beta2 - kind: EtcdCluster - displayName: etcd Cluster - description: Represents a cluster of etcd nodes. ----- diff --git a/modules/osdk-run-deployment.adoc b/modules/osdk-run-deployment.adoc deleted file mode 100644 index a3585d25da..0000000000 --- a/modules/osdk-run-deployment.adoc +++ /dev/null @@ -1,84 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/golang/osdk-golang-tutorial.adoc -// * operators/operator_sdk/ansible/osdk-ansible-tutorial.adoc -// * operators/operator_sdk/ansible/osdk-ansible-inside-operator.adoc -// * operators/operator_sdk/helm/osdk-helm-tutorial.adoc - -ifeval::["{context}" == "osdk-golang-tutorial"] -:golang: -endif::[] - -:_mod-docs-content-type: PROCEDURE -[id="osdk-run-deployment_{context}"] -ifeval::["{context}" != "osdk-ansible-inside-operator"] -= Running as a deployment on the cluster -endif::[] -ifeval::["{context}" == "osdk-ansible-inside-operator"] -= Testing an Ansible-based Operator on the cluster - -After you have tested your custom Ansible logic locally inside of an Operator, you can test the Operator inside of a pod on an {product-title} cluster, which is preferred for production use. -endif::[] - -You can run your Operator project as a deployment on your cluster. - -ifdef::golang[] -.Prerequisites - -* Prepared your Go-based Operator to run on {product-title} by updating the project to use supported images -endif::[] - -.Procedure - -. Run the following `make` commands to build and push the Operator image. Modify the `IMG` argument in the following steps to reference a repository that you have access to. You can obtain an account for storing containers at repository sites such as Quay.io. - -.. Build the image: -+ -[source,terminal] ----- -$ make docker-build IMG=//: ----- -+ -[NOTE] -==== -The Dockerfile generated by the SDK for the Operator explicitly references `GOARCH=amd64` for `go build`. This can be amended to `GOARCH=$TARGETARCH` for non-AMD64 architectures. Docker will automatically set the environment variable to the value specified by `–platform`. With Buildah, the `–build-arg` will need to be used for the purpose. For more information, see link:https://sdk.operatorframework.io/docs/advanced-topics/multi-arch/#supporting-multiple-architectures[Multiple Architectures]. -==== - -.. Push the image to a repository: -+ -[source,terminal] ----- -$ make docker-push IMG=//: ----- -+ -[NOTE] -==== -The name and tag of the image, for example `IMG=//:`, in both the commands can also be set in your Makefile. Modify the `IMG ?= controller:latest` value to set your default image name. -==== - -. Run the following command to deploy the Operator: -+ -[source,terminal] ----- -$ make deploy IMG=//: ----- -+ -By default, this command creates a namespace with the name of your Operator project in the form `-system` and is used for the deployment. This command also installs the RBAC manifests from `config/rbac`. - -. Run the following command to verify that the Operator is running: -+ -[source,terminal] ----- -$ oc get deployment -n -system ----- -+ -.Example output -[source,terminal] ----- -NAME READY UP-TO-DATE AVAILABLE AGE --controller-manager 1/1 1 1 8m ----- - -ifeval::["{context}" == "osdk-golang-tutorial"] -:!golang: -endif::[] diff --git a/modules/osdk-run-locally.adoc b/modules/osdk-run-locally.adoc deleted file mode 100644 index db45a25e71..0000000000 --- a/modules/osdk-run-locally.adoc +++ /dev/null @@ -1,77 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/golang/osdk-golang-tutorial.adoc -// * operators/operator_sdk/ansible/osdk-ansible-tutorial.adoc -// * operators/operator_sdk/helm/osdk-helm-tutorial.adoc - -ifeval::["{context}" == "osdk-golang-tutorial"] -:golang: -endif::[] -ifeval::["{context}" == "osdk-ansible-tutorial"] -:ansible: -endif::[] -ifeval::["{context}" == "osdk-helm-tutorial"] -:helm: -endif::[] - - -:_mod-docs-content-type: PROCEDURE -[id="osdk-run-locally_{context}"] -= Running locally outside the cluster - -You can run your Operator project as a Go program outside of the cluster. This is useful for development purposes to speed up deployment and testing. - -.Procedure - -* Run the following command to install the custom resource definitions (CRDs) in the cluster configured in your `~/.kube/config` file and run the Operator locally: -+ -[source,terminal] ----- -$ make install run ----- -+ -.Example output -[source,terminal] -ifdef::golang[] ----- -... -2021-01-10T21:09:29.016-0700 INFO controller-runtime.metrics metrics server is starting to listen {"addr": ":8080"} -2021-01-10T21:09:29.017-0700 INFO setup starting manager -2021-01-10T21:09:29.017-0700 INFO controller-runtime.manager starting metrics server {"path": "/metrics"} -2021-01-10T21:09:29.018-0700 INFO controller-runtime.manager.controller.memcached Starting EventSource {"reconciler group": "cache.example.com", "reconciler kind": "Memcached", "source": "kind source: /, Kind="} -2021-01-10T21:09:29.218-0700 INFO controller-runtime.manager.controller.memcached Starting Controller {"reconciler group": "cache.example.com", "reconciler kind": "Memcached"} -2021-01-10T21:09:29.218-0700 INFO controller-runtime.manager.controller.memcached Starting workers {"reconciler group": "cache.example.com", "reconciler kind": "Memcached", "worker count": 1} ----- -endif::[] -ifdef::ansible[] ----- -... -{"level":"info","ts":1612589622.7888272,"logger":"ansible-controller","msg":"Watching resource","Options.Group":"cache.example.com","Options.Version":"v1","Options.Kind":"Memcached"} -{"level":"info","ts":1612589622.7897573,"logger":"proxy","msg":"Starting to serve","Address":"127.0.0.1:8888"} -{"level":"info","ts":1612589622.789971,"logger":"controller-runtime.manager","msg":"starting metrics server","path":"/metrics"} -{"level":"info","ts":1612589622.7899997,"logger":"controller-runtime.manager.controller.memcached-controller","msg":"Starting EventSource","source":"kind source: cache.example.com/v1, Kind=Memcached"} -{"level":"info","ts":1612589622.8904517,"logger":"controller-runtime.manager.controller.memcached-controller","msg":"Starting Controller"} -{"level":"info","ts":1612589622.8905244,"logger":"controller-runtime.manager.controller.memcached-controller","msg":"Starting workers","worker count":8} ----- -endif::[] -ifdef::helm[] ----- -... -{"level":"info","ts":1612652419.9289865,"logger":"controller-runtime.metrics","msg":"metrics server is starting to listen","addr":":8080"} -{"level":"info","ts":1612652419.9296563,"logger":"helm.controller","msg":"Watching resource","apiVersion":"demo.example.com/v1","kind":"Nginx","namespace":"","reconcilePeriod":"1m0s"} -{"level":"info","ts":1612652419.929983,"logger":"controller-runtime.manager","msg":"starting metrics server","path":"/metrics"} -{"level":"info","ts":1612652419.930015,"logger":"controller-runtime.manager.controller.nginx-controller","msg":"Starting EventSource","source":"kind source: demo.example.com/v1, Kind=Nginx"} -{"level":"info","ts":1612652420.2307851,"logger":"controller-runtime.manager.controller.nginx-controller","msg":"Starting Controller"} -{"level":"info","ts":1612652420.2309358,"logger":"controller-runtime.manager.controller.nginx-controller","msg":"Starting workers","worker count":8} ----- -endif::[] - -ifeval::["{context}" == "osdk-golang-tutorial"] -:!golang: -endif::[] -ifeval::["{context}" == "osdk-ansible-tutorial"] -:!ansible: -endif::[] -ifeval::["{context}" == "osdk-helm-tutorial"] -:!helm: -endif::[] \ No newline at end of file diff --git a/modules/osdk-run-operator.adoc b/modules/osdk-run-operator.adoc deleted file mode 100644 index c18c25ce29..0000000000 --- a/modules/osdk-run-operator.adoc +++ /dev/null @@ -1,66 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/golang/osdk-golang-tutorial.adoc -// * operators/operator_sdk/ansible/osdk-ansible-tutorial.adoc -// * operators/operator_sdk/helm/osdk-helm-tutorial.adoc - -ifeval::["{context}" == "osdk-golang-tutorial"] -:golang: -endif::[] -ifeval::["{context}" == "osdk-ansible-tutorial"] -:ansible: -endif::[] -ifeval::["{context}" == "osdk-helm-tutorial"] -:helm: -endif::[] - -[id="osdk-run-operator_{context}"] -= Running the Operator - -// The "run locally" and "run as a deployment" options require cluster-admin. Therefore, these options are not available for OSD/ROSA. - -// Deployment options for OCP -ifndef::openshift-dedicated,openshift-rosa[] -There are three ways you can use the Operator SDK CLI to build and run your Operator: - -* Run locally outside the cluster as a Go program. -* Run as a deployment on the cluster. -* Bundle your Operator and use Operator Lifecycle Manager (OLM) to deploy on the cluster. - -ifdef::golang[] -[NOTE] -==== -Before running your Go-based Operator as either a deployment on {product-title} or as a bundle that uses OLM, ensure that your project has been updated to use supported images. -==== -endif::[] -endif::openshift-dedicated,openshift-rosa[] - -// Deployment options for OSD/ROSA -ifdef::openshift-dedicated,openshift-rosa[] -To build and run your Operator, use the Operator SDK CLI to bundle your Operator, and then use Operator Lifecycle Manager (OLM) to deploy on the cluster. - -[NOTE] -==== -If you wish to deploy your Operator on an OpenShift Container Platform cluster instead of a {product-title} cluster, two additional deployment options are available: - -* Run locally outside the cluster as a Go program. -* Run as a deployment on the cluster. -==== - -ifdef::golang[] -[NOTE] -==== -Before running your Go-based Operator as a bundle that uses OLM, ensure that your project has been updated to use supported images. -==== -endif::[] -endif::openshift-dedicated,openshift-rosa[] - -ifeval::["{context}" == "osdk-golang-tutorial"] -:!golang: -endif::[] -ifeval::["{context}" == "osdk-ansible-tutorial"] -:!ansible: -endif::[] -ifeval::["{context}" == "osdk-helm-tutorial"] -:!helm: -endif::[] diff --git a/modules/osdk-run-proxy.adoc b/modules/osdk-run-proxy.adoc deleted file mode 100644 index 217b0435f5..0000000000 --- a/modules/osdk-run-proxy.adoc +++ /dev/null @@ -1,150 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/golang/osdk-golang-tutorial.adoc -// * operators/operator_sdk/ansible/osdk-ansible-tutorial.adoc -// * operators/operator_sdk/helm/osdk-helm-tutorial.adoc - -ifeval::["{context}" == "osdk-golang-tutorial"] -:golang: -endif::[] -ifeval::["{context}" == "osdk-ansible-tutorial"] -:ansible: -endif::[] -ifeval::["{context}" == "osdk-helm-tutorial"] -:helm: -endif::[] - -:_mod-docs-content-type: PROCEDURE -[id="osdk-run-proxy_{context}"] -= Enabling proxy support - -Operator authors can develop Operators that support network proxies. -ifndef::openshift-dedicated,openshift-rosa[] -Cluster administrators -endif::openshift-dedicated,openshift-rosa[] -ifdef::openshift-dedicated,openshift-rosa[] -Administrators with the `dedicated-admin` role -endif::openshift-dedicated,openshift-rosa[] -configure proxy support for the environment variables that are handled by Operator Lifecycle Manager (OLM). To support proxied clusters, your Operator must inspect the environment for the following standard proxy variables and pass the values to Operands: - -* `HTTP_PROXY` -* `HTTPS_PROXY` -* `NO_PROXY` - -[NOTE] -==== -This tutorial uses `HTTP_PROXY` as an example environment variable. -==== - -.Prerequisites -* A cluster with cluster-wide egress proxy enabled. - -.Procedure -ifdef::golang[] -. Edit the `controllers/memcached_controller.go` file to include the following: -.. Import the `proxy` package from the link:https://github.com/operator-framework/operator-lib[`operator-lib`] library: -+ -[source,golang] ----- -import ( - ... - "github.com/operator-framework/operator-lib/proxy" -) ----- - -.. Add the `proxy.ReadProxyVarsFromEnv` helper function to the reconcile loop and append the results to the Operand environments: -+ -[source,golang] ----- -for i, container := range dep.Spec.Template.Spec.Containers { - dep.Spec.Template.Spec.Containers[i].Env = append(container.Env, proxy.ReadProxyVarsFromEnv()...) -} -... ----- - -endif::[] - -ifdef::ansible[] -. Add the environment variables to the deployment by updating the `roles/memcached/tasks/main.yml` file with the following: -+ -[source,yaml] ----- -... -env: - - name: HTTP_PROXY - value: '{{ lookup("env", "HTTP_PROXY") | default("", True) }}' - - name: http_proxy - value: '{{ lookup("env", "HTTP_PROXY") | default("", True) }}' -... ----- - -endif::[] - -ifdef::helm[] -. Edit the `watches.yaml` file to include overrides based on an environment variable by adding the `overrideValues` field: -+ -[source,yaml] ----- -... -- group: demo.example.com - version: v1alpha1 - kind: Nginx - chart: helm-charts/nginx - overrideValues: - proxy.http: $HTTP_PROXY -... ----- - -. Add the `proxy.http` value in the `helm-charts/nginx/values.yaml` file: -+ -[source,yaml] ----- -... -proxy: - http: "" - https: "" - no_proxy: "" ----- - -. To make sure the chart template supports using the variables, edit the chart template in the `helm-charts/nginx/templates/deployment.yaml` file to contain the following: -+ -[source,yaml] ----- -containers: - - name: {{ .Chart.Name }} - securityContext: - - toYaml {{ .Values.securityContext | nindent 12 }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - env: - - name: http_proxy - value: "{{ .Values.proxy.http }}" ----- - -endif::[] - -. Set the environment variable on the Operator deployment by adding the following to the `config/manager/manager.yaml` file: -+ -[source,yaml] ----- -containers: - - args: - - --leader-elect - - --leader-election-id=ansible-proxy-demo - image: controller:latest - name: manager - env: - - name: "HTTP_PROXY" - value: "http_proxy_test" ----- - - -ifeval::["{context}" == "osdk-golang-tutorial"] -:!golang: -endif::[] -ifeval::["{context}" == "osdk-ansible-tutorial"] -:!ansible: -endif::[] -ifeval::["{context}" == "osdk-helm-tutorial"] -:!helm: -endif::[] diff --git a/modules/osdk-scorecard-about.adoc b/modules/osdk-scorecard-about.adoc deleted file mode 100644 index aef514f204..0000000000 --- a/modules/osdk-scorecard-about.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-scorecard.adoc - -:_mod-docs-content-type: CONCEPT -[id="osdk-about-scorecard_{context}"] -= About the scorecard tool - -While the Operator SDK `bundle validate` subcommand can validate local bundle directories and remote bundle images for content and structure, you can use the `scorecard` command to run tests on your Operator based on a configuration file and test images. These tests are implemented within test images that are configured and constructed to be executed by the scorecard. - -The scorecard assumes it is run with access to a configured Kubernetes cluster, such as {product-title}. The scorecard runs each test within a pod, from which pod logs are aggregated and test results are sent to the console. The scorecard has built-in basic and Operator Lifecycle Manager (OLM) tests and also provides a means to execute custom test definitions. - -.Scorecard workflow -. Create all resources required by any related custom resources (CRs) and the Operator -. Create a proxy container in the deployment of the Operator to record calls to the API server and run tests -. Examine parameters in the CRs - -The scorecard tests make no assumptions as to the state of the Operator being tested. Creating Operators and CRs for an Operators are beyond the scope of the scorecard itself. Scorecard tests can, however, create whatever resources they require if the tests are designed for resource creation. - -.`scorecard` command syntax -[source,terminal] ----- -$ operator-sdk scorecard [flags] ----- - -The scorecard requires a positional argument for either the on-disk path to -your Operator bundle or the name of a bundle image. - -For further information about the flags, run: - -[source,terminal] ----- -$ operator-sdk scorecard -h ----- diff --git a/modules/osdk-scorecard-config.adoc b/modules/osdk-scorecard-config.adoc deleted file mode 100644 index 071544e15f..0000000000 --- a/modules/osdk-scorecard-config.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-scorecard.adoc - -[id="osdk-scorecard-config_{context}"] -= Scorecard configuration - -The scorecard tool uses a configuration that allows you to configure internal plugins, as well as several global configuration options. Tests are driven by a configuration file named `config.yaml`, which is generated by the `make bundle` command, located in your `bundle/` directory: - -[source,terminal] ----- -./bundle -... -└── tests - └── scorecard - └── config.yaml ----- - -.Example scorecard configuration file -[source,yaml,subs="attributes+"] ----- -kind: Configuration -apiversion: scorecard.operatorframework.io/v1alpha3 -metadata: - name: config -stages: -- parallel: true - tests: - - image: quay.io/operator-framework/scorecard-test:v{osdk_ver} - entrypoint: - - scorecard-test - - basic-check-spec - labels: - suite: basic - test: basic-check-spec-test - - image: quay.io/operator-framework/scorecard-test:v{osdk_ver} - entrypoint: - - scorecard-test - - olm-bundle-validation - labels: - suite: olm - test: olm-bundle-validation-test ----- - -The configuration file defines each test that scorecard can execute. The -following fields of the scorecard configuration file define the test as follows: - -[cols="3,7",options="header"] -|=== -|Configuration field |Description - -|`image` -|Test container image name that implements a test - -|`entrypoint` -|Command and arguments that are invoked in the test image to execute a test - -|`labels` -|Scorecard-defined or custom labels that select which tests to run -|=== - diff --git a/modules/osdk-scorecard-custom-tests.adoc b/modules/osdk-scorecard-custom-tests.adoc deleted file mode 100644 index f28cb0a976..0000000000 --- a/modules/osdk-scorecard-custom-tests.adoc +++ /dev/null @@ -1,155 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-scorecard.adoc - -[id="osdk-scorecard-custom-tests_{context}"] -= Custom scorecard tests - -The scorecard tool can run custom tests that follow these mandated conventions: - -* Tests are implemented within a container image -* Tests accept an entrypoint which include a command and arguments -* Tests produce `v1alpha3` scorecard output in JSON format with no extraneous logging in the test output -* Tests can obtain the bundle contents at a shared mount point of `/bundle` -* Tests can access the Kubernetes API using an in-cluster client connection - -Writing custom tests in other programming languages is possible if the test -image follows the above guidelines. - -The following example shows of a custom test image written in Go: - -.Example custom scorecard test -[%collapsible] -==== -[source,go] ----- -// Copyright 2020 The Operator-SDK Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "encoding/json" - "fmt" - "log" - "os" - - scapiv1alpha3 "github.com/operator-framework/api/pkg/apis/scorecard/v1alpha3" - apimanifests "github.com/operator-framework/api/pkg/manifests" -) - -// This is the custom scorecard test example binary -// As with the Redhat scorecard test image, the bundle that is under -// test is expected to be mounted so that tests can inspect the -// bundle contents as part of their test implementations. -// The actual test is to be run is named and that name is passed -// as an argument to this binary. This argument mechanism allows -// this binary to run various tests all from within a single -// test image. - -const PodBundleRoot = "/bundle" - -func main() { - entrypoint := os.Args[1:] - if len(entrypoint) == 0 { - log.Fatal("Test name argument is required") - } - - // Read the pod's untar'd bundle from a well-known path. - cfg, err := apimanifests.GetBundleFromDir(PodBundleRoot) - if err != nil { - log.Fatal(err.Error()) - } - - var result scapiv1alpha3.TestStatus - - // Names of the custom tests which would be passed in the - // `operator-sdk` command. - switch entrypoint[0] { - case CustomTest1Name: - result = CustomTest1(cfg) - case CustomTest2Name: - result = CustomTest2(cfg) - default: - result = printValidTests() - } - - // Convert scapiv1alpha3.TestResult to json. - prettyJSON, err := json.MarshalIndent(result, "", " ") - if err != nil { - log.Fatal("Failed to generate json", err) - } - fmt.Printf("%s\n", string(prettyJSON)) - -} - -// printValidTests will print out full list of test names to give a hint to the end user on what the valid tests are. -func printValidTests() scapiv1alpha3.TestStatus { - result := scapiv1alpha3.TestResult{} - result.State = scapiv1alpha3.FailState - result.Errors = make([]string, 0) - result.Suggestions = make([]string, 0) - - str := fmt.Sprintf("Valid tests for this image include: %s %s", - CustomTest1Name, - CustomTest2Name) - result.Errors = append(result.Errors, str) - return scapiv1alpha3.TestStatus{ - Results: []scapiv1alpha3.TestResult{result}, - } -} - -const ( - CustomTest1Name = "customtest1" - CustomTest2Name = "customtest2" -) - -// Define any operator specific custom tests here. -// CustomTest1 and CustomTest2 are example test functions. Relevant operator specific -// test logic is to be implemented in similarly. - -func CustomTest1(bundle *apimanifests.Bundle) scapiv1alpha3.TestStatus { - r := scapiv1alpha3.TestResult{} - r.Name = CustomTest1Name - r.State = scapiv1alpha3.PassState - r.Errors = make([]string, 0) - r.Suggestions = make([]string, 0) - almExamples := bundle.CSV.GetAnnotations()["alm-examples"] - if almExamples == "" { - fmt.Println("no alm-examples in the bundle CSV") - } - - return wrapResult(r) -} - -func CustomTest2(bundle *apimanifests.Bundle) scapiv1alpha3.TestStatus { - r := scapiv1alpha3.TestResult{} - r.Name = CustomTest2Name - r.State = scapiv1alpha3.PassState - r.Errors = make([]string, 0) - r.Suggestions = make([]string, 0) - almExamples := bundle.CSV.GetAnnotations()["alm-examples"] - if almExamples == "" { - fmt.Println("no alm-examples in the bundle CSV") - } - return wrapResult(r) -} - -func wrapResult(r scapiv1alpha3.TestResult) scapiv1alpha3.TestStatus { - return scapiv1alpha3.TestStatus{ - Results: []scapiv1alpha3.TestResult{r}, - } -} ----- -==== diff --git a/modules/osdk-scorecard-output.adoc b/modules/osdk-scorecard-output.adoc deleted file mode 100644 index 54ab3b7c91..0000000000 --- a/modules/osdk-scorecard-output.adoc +++ /dev/null @@ -1,74 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-scorecard.adoc - -[id="osdk-scorecard-output_{context}"] -= Scorecard output - -The `--output` flag for the `scorecard` command specifies the scorecard results output format: either `text` or `json`. - -.Example JSON output snippet -[%collapsible] -==== -[source,json,subs="attributes+"] ----- -{ - "apiVersion": "scorecard.operatorframework.io/v1alpha3", - "kind": "TestList", - "items": [ - { - "kind": "Test", - "apiVersion": "scorecard.operatorframework.io/v1alpha3", - "spec": { - "image": "quay.io/operator-framework/scorecard-test:v{osdk_ver}", - "entrypoint": [ - "scorecard-test", - "olm-bundle-validation" - ], - "labels": { - "suite": "olm", - "test": "olm-bundle-validation-test" - } - }, - "status": { - "results": [ - { - "name": "olm-bundle-validation", - "log": "time=\"2020-06-10T19:02:49Z\" level=debug msg=\"Found manifests directory\" name=bundle-test\ntime=\"2020-06-10T19:02:49Z\" level=debug msg=\"Found metadata directory\" name=bundle-test\ntime=\"2020-06-10T19:02:49Z\" level=debug msg=\"Getting mediaType info from manifests directory\" name=bundle-test\ntime=\"2020-06-10T19:02:49Z\" level=info msg=\"Found annotations file\" name=bundle-test\ntime=\"2020-06-10T19:02:49Z\" level=info msg=\"Could not find optional dependencies file\" name=bundle-test\n", - "state": "pass" - } - ] - } - } - ] -} ----- -==== - -.Example text output snippet -[%collapsible] -==== -[source,text,subs="attributes+"] ----- --------------------------------------------------------------------------------- -Image: quay.io/operator-framework/scorecard-test:v{osdk_ver} -Entrypoint: [scorecard-test olm-bundle-validation] -Labels: - "suite":"olm" - "test":"olm-bundle-validation-test" -Results: - Name: olm-bundle-validation - State: pass - Log: - time="2020-07-15T03:19:02Z" level=debug msg="Found manifests directory" name=bundle-test - time="2020-07-15T03:19:02Z" level=debug msg="Found metadata directory" name=bundle-test - time="2020-07-15T03:19:02Z" level=debug msg="Getting mediaType info from manifests directory" name=bundle-test - time="2020-07-15T03:19:02Z" level=info msg="Found annotations file" name=bundle-test - time="2020-07-15T03:19:02Z" level=info msg="Could not find optional dependencies file" name=bundle-test ----- -==== - -[NOTE] -==== -The output format spec matches the link:https://pkg.go.dev/github.com/operator-framework/api/pkg/apis/scorecard/v1alpha3#Test[`Test`] type layout. -==== diff --git a/modules/osdk-scorecard-parallel.adoc b/modules/osdk-scorecard-parallel.adoc deleted file mode 100644 index a6bb1be76e..0000000000 --- a/modules/osdk-scorecard-parallel.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-scorecard.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-scorecard-parallel_{context}"] -= Enabling parallel testing - -As an Operator author, you can define separate stages for your tests using the scorecard configuration file. Stages run sequentially in the order they are defined in the configuration file. A stage contains a list of tests and a configurable `parallel` setting. - -By default, or when a stage explicitly sets `parallel` to `false`, tests in a stage are run sequentially in the order they are defined in the configuration file. Running tests one at a time is helpful to guarantee that no two tests interact and conflict with each other. - -However, if tests are designed to be fully isolated, they can be parallelized. - -.Procedure - -* To run a set of isolated tests in parallel, include them in the same stage and set `parallel` to `true`: -+ -[source,terminal,subs="attributes+"] ----- -apiVersion: scorecard.operatorframework.io/v1alpha3 -kind: Configuration -metadata: - name: config -stages: -- parallel: true <1> - tests: - - entrypoint: - - scorecard-test - - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v{osdk_ver} - labels: - suite: basic - test: basic-check-spec-test - - entrypoint: - - scorecard-test - - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v{osdk_ver} - labels: - suite: olm - test: olm-bundle-validation-test ----- -<1> Enables parallel testing -+ -All tests in a parallel stage are executed simultaneously, and scorecard waits for all of them to finish before proceding to the next stage. This can make your tests run much faster. - diff --git a/modules/osdk-scorecard-run.adoc b/modules/osdk-scorecard-run.adoc deleted file mode 100644 index 31d340f947..0000000000 --- a/modules/osdk-scorecard-run.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-scorecard.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-scorecard-run_{context}"] -= Running the scorecard tool - -A default set of Kustomize files are generated by the Operator SDK after running the `init` command. The default `bundle/tests/scorecard/config.yaml` file that is generated can be immediately used to run the scorecard tool against your Operator, or you can modify this file to your test specifications. - -.Prerequisites - -* Operator project generated by using the Operator SDK - -.Procedure - -. Generate or regenerate your bundle manifests and metadata for your Operator: -+ -[source,terminal] ----- -$ make bundle ----- -+ -This command automatically adds scorecard annotations to your bundle metadata, which is used by the `scorecard` command to run tests. - -. Run the scorecard against the on-disk path to your Operator bundle or the name of a bundle image: -+ -[source,terminal] ----- -$ operator-sdk scorecard ----- diff --git a/modules/osdk-scorecard-select-tests.adoc b/modules/osdk-scorecard-select-tests.adoc deleted file mode 100644 index aa29a3236c..0000000000 --- a/modules/osdk-scorecard-select-tests.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-scorecard.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-scorecard-select-tests_{context}"] -= Selecting tests - -Scorecard tests are selected by setting the `--selector` CLI flag to a set of label strings. If a selector flag is not supplied, then all of the tests within the scorecard configuration file are run. - -Tests are run serially with test results being aggregated by the scorecard and written to standard output, or _stdout_. - -.Procedure - -. To select a single test, for example `basic-check-spec-test`, specify the test by using the `--selector` flag: -+ -[source,terminal] ----- -$ operator-sdk scorecard \ - -o text \ - --selector=test=basic-check-spec-test ----- - -. To select a suite of tests, for example `olm`, specify a label that is used by all of the OLM tests: -+ -[source,terminal] ----- -$ operator-sdk scorecard \ - -o text \ - --selector=suite=olm ----- - -. To select multiple tests, specify the test names by using the `selector` flag using the following syntax: -+ -[source,terminal] ----- -$ operator-sdk scorecard \ - -o text \ - --selector='test in (basic-check-spec-test,olm-bundle-validation-test)' ----- diff --git a/modules/osdk-scorecard-tests.adoc b/modules/osdk-scorecard-tests.adoc deleted file mode 100644 index b3d3a3cec3..0000000000 --- a/modules/osdk-scorecard-tests.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-scorecard.adoc - -[id="osdk-scorecard-tests_{context}"] -= Built-in scorecard tests - -The scorecard ships with pre-defined tests that are arranged into suites: the basic test suite and the Operator Lifecycle Manager (OLM) suite. - -[id="osdk-scorecard-basic-tests_{context}"] -.Basic test suite -[cols="3,7,3",options="header"] -|=== -|Test |Description |Short name - -|Spec Block Exists -|This test checks the custom resource (CR) created in the cluster to make sure that all CRs have a `spec` block. -|`basic-check-spec-test` -|=== - -[id="osdk-scorecard-olm-tests_{context}"] -.OLM test suite - -[cols="3,7,3",options="header"] -|=== -|Test |Description |Short name - -|Bundle Validation -|This test validates the bundle manifests found in the bundle that is passed into scorecard. If the bundle contents contain errors, then the test result output includes the validator log as well as error messages from the validation library. -|`olm-bundle-validation-test` - -|Provided APIs Have Validation -|This test verifies that the custom resource definitions (CRDs) for the provided CRs contain a validation section and that there is validation for each `spec` and `status` field detected in the CR. -|`olm-crds-have-validation-test` - -|Owned CRDs Have Resources Listed -|This test makes sure that the CRDs for each CR provided via the `cr-manifest` option have a `resources` subsection in the `owned` CRDs section of the ClusterServiceVersion (CSV). If the test detects used resources that are not listed in the resources section, it lists them in the suggestions at the end of the test. Users are required to fill out the resources section after initial code generation for this test to pass. -|`olm-crds-have-resources-test` - -|Spec Fields With Descriptors -|This test verifies that every field in the CRs `spec` sections has a corresponding descriptor listed in the CSV. -|`olm-spec-descriptors-test` - -|Status Fields With Descriptors -|This test verifies that every field in the CRs `status` sections have a corresponding descriptor listed in the CSV. -|`olm-status-descriptors-test` -|=== diff --git a/modules/osdk-suggested-namespace-node-selector.adoc b/modules/osdk-suggested-namespace-node-selector.adoc deleted file mode 100644 index 7ff008ae01..0000000000 --- a/modules/osdk-suggested-namespace-node-selector.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-generating-csvs.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-suggested-namespace-default-node_{context}"] -= Setting a suggested namespace with default node selector - -Some Operators expect to run only on control plane nodes, which can be done by setting a `nodeSelector` in the `Pod` spec by the Operator itself. - -To avoid getting duplicated and potentially conflicting cluster-wide default `nodeSelector`, you can set a default node selector on the namespace where the Operator runs. The default node selector will take precedence over the cluster default so the cluster default will not be applied to the pods in the Operators namespace. - -When adding the Operator to a cluster using OperatorHub, the web console auto-populates the suggested namespace for the -ifndef::openshift-dedicated,openshift-rosa[] -cluster administrator -endif::openshift-dedicated,openshift-rosa[] -ifdef::openshift-dedicated,openshift-rosa[] -installer -endif::openshift-dedicated,openshift-rosa[] -during the installation process. The suggested namespace is created using the namespace manifest in YAML which is included in the cluster service version (CSV). - -.Procedure - -* In your CSV, set the `operatorframework.io/suggested-namespace-template` with a manifest for a `Namespace` object. The following sample is a manifest for an example `Namespace` with the namespace default node selector specified: -+ -[source,yaml] ----- -metadata: - annotations: - operatorframework.io/suggested-namespace-template: <1> - { - "apiVersion": "v1", - "kind": "Namespace", - "metadata": { - "name": "vertical-pod-autoscaler-suggested-template", - "annotations": { - "openshift.io/node-selector": "" - } - } - } ----- -<1> Set your suggested namespace. -+ -[NOTE] -==== -If both `suggested-namespace` and `suggested-namespace-template` annotations are present in the CSV, `suggested-namespace-template` should take precedence. -==== \ No newline at end of file diff --git a/modules/osdk-suggested-namespace.adoc b/modules/osdk-suggested-namespace.adoc deleted file mode 100644 index 2af5c86e2c..0000000000 --- a/modules/osdk-suggested-namespace.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-generating-csvs.adoc - -:_mod-docs-content-type: PROCEDURE -[id="osdk-suggested-namespace_{context}"] -= Setting a suggested namespace - -Some Operators must be deployed in a specific namespace, or with ancillary resources in specific namespaces, to work properly. If resolved from a subscription, Operator Lifecycle Manager (OLM) defaults the namespaced resources of an Operator to the namespace of its subscription. - -As an Operator author, you can instead express a desired target namespace as part of your cluster service version (CSV) to maintain control over the final namespaces of the resources installed for their Operators. When adding the Operator to a cluster using OperatorHub, this enables the web console to autopopulate the suggested namespace for the -ifndef::openshift-dedicated,openshift-rosa[] -cluster administrator -endif::openshift-dedicated,openshift-rosa[] -ifdef::openshift-dedicated,openshift-rosa[] -installer -endif::openshift-dedicated,openshift-rosa[] -during the installation process. - -.Procedure - -* In your CSV, set the `operatorframework.io/suggested-namespace` annotation to your suggested namespace: -+ -[source,yaml] ----- -metadata: - annotations: - operatorframework.io/suggested-namespace: <1> ----- -<1> Set your suggested namespace. diff --git a/modules/osdk-updating-128-to-131.adoc b/modules/osdk-updating-128-to-131.adoc deleted file mode 100644 index a599893acc..0000000000 --- a/modules/osdk-updating-128-to-131.adoc +++ /dev/null @@ -1,203 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/golang/osdk-golang-updating-projects.adoc -// * operators/operator_sdk/ansible/osdk-ansible-updating-projects.adoc -// * operators/operator_sdk/helm/osdk-helm-updating-projects.adoc - -ifeval::["{context}" == "osdk-golang-updating-projects"] -:golang: -:type: Go -endif::[] -ifeval::["{context}" == "osdk-ansible-updating-projects"] -:ansible: -:type: Ansible -endif::[] -ifeval::["{context}" == "osdk-helm-updating-projects"] -:helm: -:type: Helm -endif::[] - -:_mod-docs-content-type: PROCEDURE -[id="osdk-upgrading-projects_{context}"] -= Updating {type}-based Operator projects for Operator SDK {osdk_ver} - -The following procedure updates an existing {type}-based Operator project for compatibility with {osdk_ver}. - -.Prerequisites - -* Operator SDK {osdk_ver} installed -* An Operator project created or maintained with Operator SDK {osdk_ver_n1} - -.Procedure - -ifdef::golang[] -* Edit your Operator project's Makefile to update the Operator SDK version to {osdk_ver}, as shown in the following example: -+ -.Example Makefile -[source,make,subs="attributes+"] ----- -# Set the Operator SDK version to use. By default, what is installed on the system is used. -# This is useful for CI or a project to utilize a specific version of the operator-sdk toolkit. -OPERATOR_SDK_VERSION ?= v{osdk_ver} <1> ----- -<1> Change the version from `{osdk_ver_n1}` to `{osdk_ver}`. -endif::[] - -ifdef::helm[] -. Edit your Operator's Dockerfile to update the Helm Operator version to {osdk_ver}, as shown in the following example: -+ -.Example Dockerfile -[source,docker,subs="attributes+"] ----- -FROM quay.io/operator-framework/helm-operator:v{osdk_ver} <1> ----- -<1> Update the Helm Operator version from `{osdk_ver_n1}` to `{osdk_ver}` - -. Edit your Operator project's Makefile to update the Operator SDK to {osdk_ver}, as shown in the following example: -+ -.Example Makefile -[source,make,subs="attributes+"] ----- -# Set the Operator SDK version to use. By default, what is installed on the system is used. -# This is useful for CI or a project to utilize a specific version of the operator-sdk toolkit. -OPERATOR_SDK_VERSION ?= v{osdk_ver} <1> ----- -<1> Change the version from `{osdk_ver_n1}` to `{osdk_ver}`. - -. If you use a custom service account for deployment, define the following role to require a watch operation on your secrets resource, as shown in the following example: -+ -.Example `config/rbac/role.yaml` file -[source,yaml] ----- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: -admin -subjects: -- kind: ServiceAccount - name: - namespace: -roleRef: - kind: ClusterRole - name: cluster-admin - apiGroup: "" -rules: <1> - - apiGroups: - - "" - resources: - - secrets - verbs: - - watch ----- -<1> Add the `rules` stanza to create a watch operation for your secrets resource. -endif::[] - -ifdef::ansible[] - -. Make the following changes to your Operator's Dockerfile: - -.. Replace the `ansible-operator-2.11-preview` base image with the `ansible-operator` base image and update the version to {osdk_ver}, as shown in the following example: -+ -.Example Dockerfile -[source,docker,subs="attributes+"] ----- -FROM quay.io/operator-framework/ansible-operator:v{osdk_ver} ----- - -.. The update to Ansible 2.15.0 in version 1.30.0 of the Ansible Operator removed the following preinstalled Python modules: -+ --- -* `ipaddress` -* `openshift` -* `jmespath` -* `cryptography` -* `oauthlib` --- -+ -If your Operator depends on one of these removed Python modules, update your Dockerfile to install the required modules by using the `pip install` command. - -. Edit your Operator project's Makefile to update the Operator SDK version to {osdk_ver}, as shown in the following example: -+ -.Example Makefile -[source,make,subs="attributes+"] ----- -# Set the Operator SDK version to use. By default, what is installed on the system is used. -# This is useful for CI or a project to utilize a specific version of the operator-sdk toolkit. -OPERATOR_SDK_VERSION ?= v{osdk_ver} <1> ----- -<1> Change the version from `{osdk_ver_n1}` to `{osdk_ver}`. - -. Update your `requirements.yaml` and `requirements.go` files to remove the `community.kubernetes` collection and update the `operator_sdk.util` collection to version `0.5.0`, as shown in the following example: -+ -.Example `requirements.yaml` file -[source,diff] ----- - collections: -- - name: community.kubernetes <1> -- version: "2.0.1" - - name: operator_sdk.util -- version: "0.4.0" -+ version: "0.5.0" <2> - - name: kubernetes.core - version: "2.4.0" - - name: cloud.common ----- -<1> Remove the `community.kubernetes` collection -<2> Update the `operator_sdk.util` collection to version `0.5.0`. - -. Remove all instances of the `lint` field from your `molecule/kind/molecule.yml` and `molecule/default/molecule.yml` files, as shown in the following example: -+ -[source,diff] ----- - --- - dependency: - name: galaxy - driver: - name: delegated -- lint: | -- set -e -- yamllint -d "{extends: relaxed, rules: {line-length: {max: 120}}}" . - platforms: - - name: cluster - groups: - - k8s - provisioner: - name: ansible -- lint: | -- set -e - ansible-lint - inventory: - group_vars: - all: - namespace: ${TEST_OPERATOR_NAMESPACE:-osdk-test} - host_vars: - localhost: - ansible_python_interpreter: '{{ ansible_playbook_python }}' - config_dir: ${MOLECULE_PROJECT_DIRECTORY}/config - samples_dir: ${MOLECULE_PROJECT_DIRECTORY}/config/samples - operator_image: ${OPERATOR_IMAGE:-""} - operator_pull_policy: ${OPERATOR_PULL_POLICY:-"Always"} - kustomize: ${KUSTOMIZE_PATH:-kustomize} - env: - K8S_AUTH_KUBECONFIG: ${KUBECONFIG:-"~/.kube/config"} - verifier: - name: ansible -- lint: | -- set -e -- ansible-lint ----- -endif::[] - - -ifeval::["{context}" == "osdk-golang-updating-projects"] -:!golang: -:!type: -endif::[] -ifeval::["{context}" == "osdk-ansible-updating-projects"] -:!ansible: -:!type: -endif::[] -ifeval::["{context}" == "osdk-helm-updating-projects"] -:!helm: -:!type: -endif::[] diff --git a/modules/osdk-updating-131-to-1361.adoc b/modules/osdk-updating-131-to-1361.adoc deleted file mode 100644 index 5d0bafb61a..0000000000 --- a/modules/osdk-updating-131-to-1361.adoc +++ /dev/null @@ -1,260 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/golang/osdk-golang-updating-projects.adoc -// * operators/operator_sdk/ansible/osdk-ansible-updating-projects.adoc -// * operators/operator_sdk/helm/osdk-helm-updating-projects.adoc - -ifeval::["{context}" == "osdk-golang-updating-projects"] -:golang: -:type: Go -endif::[] -ifeval::["{context}" == "osdk-ansible-updating-projects"] -:ansible: -:type: Ansible -endif::[] -ifeval::["{context}" == "osdk-helm-updating-projects"] -:helm: -:type: Helm -endif::[] - -:_mod-docs-content-type: PROCEDURE -[id="osdk-upgrading-projects_{context}"] -= Updating {type}-based Operator projects for Operator SDK {osdk_ver} - -The following procedure updates an existing {type}-based Operator project for compatibility with {osdk_ver}. - -.Prerequisites - -* Operator SDK {osdk_ver} installed -* An Operator project created or maintained with Operator SDK {osdk_ver_n1} - -.Procedure - -// The following few steps should be retained/updated for each new migration procedure, as they're just bumping the OSDK version for each language type. - -. Edit your Operator project's Makefile to update the Operator SDK version to {osdk_ver}, as shown in the following example: -+ -.Example Makefile -[source,make,subs="attributes+"] ----- -# Set the Operator SDK version to use. By default, what is installed on the system is used. -# This is useful for CI or a project to utilize a specific version of the operator-sdk toolkit. -OPERATOR_SDK_VERSION ?= v{osdk_ver} <1> ----- -<1> Change the version from `{osdk_ver_n1}` to `{osdk_ver}`. -ifdef::helm[] -. Edit your Operator's Dockerfile to update the `ose-helm-rhel9-operator` image tag to `{product-version}`, as shown in the following example: -+ -.Example Dockerfile -[source,docker,subs="attributes+"] ----- -FROM registry.redhat.io/openshift4/ose-helm-rhel9-operator:v{product-version} ----- -endif::[] -ifdef::ansible[] -. Edit your Operator's Dockerfile to update the `ose-ansible-operator` image tag to `{product-version}`, as shown in the following example: -+ -.Example Dockerfile -[source,docker,subs="attributes+"] ----- -FROM registry.redhat.io/openshift4/ose-ansible-operator:v{product-version} ----- -endif::[] -ifdef::golang[] -. The `go/v4` plugin is now stable and is the default version used when scaffolding a Go-based Operator. The transition from Golang v2 and v3 plugins to the new Golang v4 plugin introduces significant changes. This migration is designed to enhance your project's functionality and compatibility, reflecting the evolving landscape of Golang development. -+ -For more information on the reasoning behind these changes, see link:https://book.kubebuilder.io/migration/v3vsv4#tldr-of-the-new-gov4-plugin[go/v3 vs go/v4] in the Kubebuilder documentation. -+ -For a comprehensive understanding of the migration process to the v4 plugin format and detailed migration steps, see link:https://book.kubebuilder.io/migration/manually_migration_guide_gov3_to_gov4[Migration from go/v3 to go/v4 by updating the files manually] in the Kubebuilder documentation. -endif::[] - -. The `kustomize/v2` plugin is now stable and is the default version used in the plugin chain when using `go/v4`, `ansible/v1`, `helm/v1`, and `hybrid/v1-alpha` plugins. For more information on this default scaffold, see link:https://book.kubebuilder.io/plugins/kustomize-v2[Kustomize v2] in the Kubebuilder documentation. - -. If your Operator project uses a multi-platform, or multi-archicture, build, replace the existing `docker-buildx` target with following definition in your project Makefile: -+ -.Example Makefile -[source,make] ----- -docker-buildx: -## Build and push the Docker image for the manager for multi-platform support - - docker buildx create --name project-v3-builder - docker buildx use project-v3-builder - - docker buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile . - - docker buildx rm project-v3-builder ----- - -. You must upgrade the Kubernetes versions in your Operator project to use 1.29. The following changes must be made in your project structure, Makefile, and `go.mod` files. -+ -[IMPORTANT] -==== -The `go/v3` plugin is being deprecated by Kubebuilder, therefore Operator SDK is also migrating to `go/v4` in a future release. -==== - -.. Update your `go.mod` file to upgrade your dependencies: -+ -[source,go] ----- -k8s.io/api v0.29.2 -k8s.io/apimachinery v0.29.2 -k8s.io/client-go v0.29.2 -sigs.k8s.io/controller-runtime v0.17.3 ----- - -.. Download the upgraded dependencies by running the following command: -+ -[source,terminal] ----- -$ go mod tidy ----- - -ifdef::golang[] -.. Projects are now scaffolded with `kube-rbac-proxy` version `0.16.0`. Modify the version of `kube-rbac-proxy` in the scaffolded `config/default/manager_auth_proxy_patch.yaml` file by making the following changes: -+ -[source,diff] ----- -- gcr.io/kubebuilder/kube-rbac-proxy:v0.15.0 -+ gcr.io/kubebuilder/kube-rbac-proxy:v0.16.0 ----- -endif::[] - -ifdef::golang[] -.. You can now generate a file that contains all the resources built with Kustomize, which are necessary to install this project without its dependencies. Update your Makefile by making the following changes: -+ -[source,diff] ----- -+ .PHONY: build-installer -+ build-installer: manifests generate kustomize ## Generate a consolidated YAML with CRDs and deployment. -+ mkdir -p dist -+ cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} -+ $(KUSTOMIZE) build config/default > dist/install.yaml ----- - -.. Update the `ENVTEST_K8S_VERSION` variable in your Makefile by making the following changes: -+ -[source,diff] ----- -- ENVTEST_K8S_VERSION = 1.28.3 -+ ENVTEST_K8S_VERSION = 1.29.0 ----- - -.. Remove the following section from your Makefile: -+ -[source,diff] ----- -- GOLANGCI_LINT = $(shell pwd)/bin/golangci-lint -- GOLANGCI_LINT_VERSION ?= v1.54.2 -- golangci-lint: -- @[ -f $(GOLANGCI_LINT) ] || { \ -- set -e ;\ -- curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell dirname $(GOLANGCI_LINT)) $(GOLANGCI_LINT_VERSION) ;\ -- } ----- - -.. Update your Makefile by making the following changes: -+ -.Makefile changes -[%collapsible] -==== -[source,diff] ----- -- ## Tool Binaries -- KUBECTL ?= kubectl -- KUSTOMIZE ?= $(LOCALBIN)/kustomize -- CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen -- ENVTEST ?= $(LOCALBIN)/setup-envtest -- -- ## Tool Versions -- KUSTOMIZE_VERSION ?= v5.2.1 -- CONTROLLER_TOOLS_VERSION ?= v0.13.0 -- -- .PHONY: kustomize -- kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. If wrong version is installed, it will be removed before downloading. -- $(KUSTOMIZE): $(LOCALBIN) -- @if test -x $(LOCALBIN)/kustomize && ! $(LOCALBIN)/kustomize version | grep -q $(KUSTOMIZE_VERSION); then \ -- echo "$(LOCALBIN)/kustomize version is not expected $(KUSTOMIZE_VERSION). Removing it before installing."; \ -- rm -rf $(LOCALBIN)/kustomize; \ -- fi -- test -s $(LOCALBIN)/kustomize || GOBIN=$(LOCALBIN) GO111MODULE=on go install sigs.k8s.io/kustomize/kustomize/v5@$(KUSTOMIZE_VERSION) -- -- .PHONY: controller-gen -- controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. If wrong version is installed, it will be overwritten. -- $(CONTROLLER_GEN): $(LOCALBIN) -- test -s $(LOCALBIN)/controller-gen && $(LOCALBIN)/controller-gen --version | grep -q $(CONTROLLER_TOOLS_VERSION) || \ -- GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION) -- -- .PHONY: envtest -- envtest: $(ENVTEST) ## Download envtest-setup locally if necessary. -- $(ENVTEST): $(LOCALBIN) -- test -s $(LOCALBIN)/setup-envtest || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest -+ ## Tool Binaries -+ KUBECTL ?= kubectl -+ KUSTOMIZE ?= $(LOCALBIN)/kustomize-$(KUSTOMIZE_VERSION) -+ CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen-$(CONTROLLER_TOOLS_VERSION) -+ ENVTEST ?= $(LOCALBIN)/setup-envtest-$(ENVTEST_VERSION) -+ GOLANGCI_LINT = $(LOCALBIN)/golangci-lint-$(GOLANGCI_LINT_VERSION) -+ -+ ## Tool Versions -+ KUSTOMIZE_VERSION ?= v5.3.0 -+ CONTROLLER_TOOLS_VERSION ?= v0.14.0 -+ ENVTEST_VERSION ?= release-0.17 -+ GOLANGCI_LINT_VERSION ?= v1.57.2 -+ -+ .PHONY: kustomize -+ kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. -+ $(KUSTOMIZE): $(LOCALBIN) -+ $(call go-install-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v5,$(KUSTOMIZE_VERSION)) -+ -+ .PHONY: controller-gen -+ controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. -+ $(CONTROLLER_GEN): $(LOCALBIN) -+ $(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen,$(CONTROLLER_TOOLS_VERSION)) -+ -+ .PHONY: envtest -+ envtest: $(ENVTEST) ## Download setup-envtest locally if necessary. -+ $(ENVTEST): $(LOCALBIN) -+ $(call go-install-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest,$(ENVTEST_VERSION)) -+ -+ .PHONY: golangci-lint -+ golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary. -+ $(GOLANGCI_LINT): $(LOCALBIN) -+ $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint,${GOLANGCI_LINT_VERSION}) -+ -+ # go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist -+ # $1 - target path with name of binary (ideally with version) -+ # $2 - package url which can be installed -+ # $3 - specific version of package -+ define go-install-tool -+ @[ -f $(1) ] || { \ -+ set -e; \ -+ package=$(2)@$(3) ;\ -+ echo "Downloading $${package}" ;\ -+ GOBIN=$(LOCALBIN) go install $${package} ;\ -+ mv "$$(echo "$(1)" | sed "s/-$(3)$$//")" $(1) ;\ -+ } -+ endef ----- -==== -endif::[] - -ifdef::helm[] -. Update the Kustomize version in your Makefile by making the following changes: -+ -[source,diff] ----- -- curl -sSLo - https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/v5.2.1/kustomize_v5.2.1_$(OS)_$(ARCH).tar.gz | \ -+ curl -sSLo - https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/v5.3.0/kustomize_v5.3.0_$(OS)_$(ARCH).tar.gz | \ ----- -endif::[] - -ifeval::["{context}" == "osdk-golang-updating-projects"] -:!golang: -:!type: -endif::[] -ifeval::["{context}" == "osdk-ansible-updating-projects"] -:!ansible: -:!type: -endif::[] -ifeval::["{context}" == "osdk-helm-updating-projects"] -:!helm: -:!type: -endif::[] diff --git a/modules/osdk-updating-1361-to-138.adoc b/modules/osdk-updating-1361-to-138.adoc deleted file mode 100644 index ea31057a34..0000000000 --- a/modules/osdk-updating-1361-to-138.adoc +++ /dev/null @@ -1,419 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/golang/osdk-golang-updating-projects.adoc -// * operators/operator_sdk/ansible/osdk-ansible-updating-projects.adoc -// * operators/operator_sdk/helm/osdk-helm-updating-projects.adoc - -ifeval::["{context}" == "osdk-golang-updating-projects"] -:golang: -:type: Go -endif::[] -ifeval::["{context}" == "osdk-ansible-updating-projects"] -:ansible: -:type: Ansible -endif::[] -ifeval::["{context}" == "osdk-helm-updating-projects"] -:helm: -:type: Helm -endif::[] - -:_mod-docs-content-type: PROCEDURE -[id="osdk-upgrading-projects_{context}"] -= Updating {type}-based Operator projects for Operator SDK {osdk_ver} - -The following procedure updates an existing {type}-based Operator project for compatibility with {osdk_ver}. - -.Prerequisites - -* Operator SDK {osdk_ver} installed -* An Operator project created or maintained with Operator SDK {osdk_ver_n1} - -.Procedure - -// The following few steps should be retained/updated for each new migration procedure, as they're just bumping the OSDK version for each language type. - -. Edit the Makefile of your Operator project to update the Operator SDK version to {osdk_ver}, as shown in the following example: -+ -.Example Makefile -[source,make,subs="attributes+"] ----- -# Set the Operator SDK version to use. By default, what is installed on the system is used. -# This is useful for CI or a project to utilize a specific version of the operator-sdk toolkit. -OPERATOR_SDK_VERSION ?= v{osdk_ver} <1> ----- -<1> Change the version from `{osdk_ver_n1}` to `{osdk_ver}`. -ifdef::helm[] -. Edit the Makefile of your Operator project to update the `ose-helm-rhel9-operator` image tag to `{product-version}`, as shown in the following example: -+ -.Example Dockerfile -[source,docker,subs="attributes+"] ----- -FROM registry.redhat.io/openshift4/ose-helm-rhel9-operator:v{product-version} ----- -endif::helm[] - -ifdef::ansible[] -. Edit the Dockerfile of your Operator project to update the `ose-ansible-operator` image tag to `{product-version}`, as shown in the following example: -+ -.Example Dockerfile -[source,docker,subs="attributes+"] ----- -FROM registry.redhat.io/openshift4/ose-ansible-operator:v{product-version} ----- -endif::ansible[] - -. You must upgrade the Kubernetes versions in your Operator project to use 1.30 and Kubebuilder v4. -+ -[TIP] -==== -This update include complex scaffolding changes due to the removal of link:https://github.com/brancz/kube-rbac-proxy[kube-rbac-proxy]. If these migrations become difficult to follow, scaffold a new sample project for comparison. -==== - -ifdef::helm,ansible[] -.. Update the Kustomize version in your Makefile by making the following changes: -+ -[source,diff] ----- -- curl -sSLo - https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/v5.3.0/kustomize_v5.3.0_$(OS)_$(ARCH).tar.gz | \ -+ curl -sSLo - https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/v5.4.2/kustomize_v5.4.2_$(OS)_$(ARCH).tar.gz | \ ----- -endif::helm,ansible[] - -ifdef::golang[] -.. Update your `go.mod` file with the following changes to upgrade your dependencies: -+ -[source,go] ----- -go 1.22.0 - -github.com/onsi/ginkgo/v2 v2.17.1 -github.com/onsi/gomega v1.32.0 -k8s.io/api v0.30.1 -k8s.io/apimachinery v0.30.1 -k8s.io/client-go v0.30.1 -sigs.k8s.io/controller-runtime v0.18.4 ----- - -.. Download the upgraded dependencies by running the following command: -+ -[source,terminal] ----- -$ go mod tidy ----- - -.. Update your Makefile with the following changes: -+ -[source,diff] ----- -- ENVTEST_K8S_VERSION = 1.29.0 -+ ENVTEST_K8S_VERSION = 1.30.0 ----- -+ -[source,diff] ----- -- KUSTOMIZE ?= $(LOCALBIN)/kustomize-$(KUSTOMIZE_VERSION) -- CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen-$(CONTROLLER_TOOLS_VERSION) -- ENVTEST ?= $(LOCALBIN)/setup-envtest-$(ENVTEST_VERSION) -- GOLANGCI_LINT = $(LOCALBIN)/golangci-lint-$(GOLANGCI_LINT_VERSION) -+ KUSTOMIZE ?= $(LOCALBIN)/kustomize -+ CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen -+ ENVTEST ?= $(LOCALBIN)/setup-envtest -+ GOLANGCI_LINT = $(LOCALBIN)/golangci-lint ----- -+ -[source,diff] ----- -- KUSTOMIZE_VERSION ?= v5.3.0 -- CONTROLLER_TOOLS_VERSION ?= v0.14.0 -- ENVTEST_VERSION ?= release-0.17 -- GOLANGCI_LINT_VERSION ?= v1.57.2 -+ KUSTOMIZE_VERSION ?= v5.4.2 -+ CONTROLLER_TOOLS_VERSION ?= v0.15.0 -+ ENVTEST_VERSION ?= release-0.18 -+ GOLANGCI_LINT_VERSION ?= v1.59.1 ----- -+ -[source,diff] ----- -- $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint,${GOLANGCI_LINT_VERSION}) -+ $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION)) ----- -+ -[source,diff] ----- -- $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint,${GOLANGCI_LINT_VERSION}) -+ $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION)) ----- -+ -[source,diff] ----- -- @[ -f $(1) ] || { \ -+ @[ -f "$(1)-$(3)" ] || { \ - echo "Downloading $${package}" ;\ -+ rm -f $(1) || true ;\ -- mv "$$(echo "$(1)" | sed "s/-$(3)$$//")" $(1) ;\ -- } -+ mv $(1) $(1)-$(3) ;\ -+ } ;\ -+ ln -sf $(1)-$(3) $(1) ----- - -.. Update your `.golangci.yml` file with the following changes: -+ -[source,diff] ----- -- exportloopref -+ - ginkgolinter - - prealloc -+ - revive -+ -+ linters-settings: -+ revive: -+ rules: -+ - name: comment-spacings ----- - -.. Update your Dockerfile with the following changes: -+ -[source,diff] ----- -- FROM golang:1.21 AS builder -+ FROM golang:1.22 AS builder ----- - -.. Update your `main.go` file with the following changes: -+ -[source,diff] ----- - "sigs.k8s.io/controller-runtime/pkg/log/zap" -+ "sigs.k8s.io/controller-runtime/pkg/metrics/filters" - - var enableHTTP2 bool -- flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") -+ var tlsOpts []func(*tls.Config) -+ flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. "+ -+ "Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.") - flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") - flag.BoolVar(&enableLeaderElection, "leader-elect", false, - "Enable leader election for controller manager. "+ - "Enabling this will ensure there is only one active controller manager.") -- flag.BoolVar(&secureMetrics, "metrics-secure", false, -- "If set the metrics endpoint is served securely") -+ flag.BoolVar(&secureMetrics, "metrics-secure", true, -+ "If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.") - -- tlsOpts := []func(*tls.Config){} - -+ // Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server. -+ // More info: -+ // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.4/pkg/metrics/server -+ // - https://book.kubebuilder.io/reference/metrics.html -+ metricsServerOptions := metricsserver.Options{ -+ BindAddress: metricsAddr, -+ SecureServing: secureMetrics, -+ // TODO(user): TLSOpts is used to allow configuring the TLS config used for the server. If certificates are -+ // not provided, self-signed certificates will be generated by default. This option is not recommended for -+ // production environments as self-signed certificates do not offer the same level of trust and security -+ // as certificates issued by a trusted Certificate Authority (CA). The primary risk is potentially allowing -+ // unauthorized access to sensitive metrics data. Consider replacing with CertDir, CertName, and KeyName -+ // to provide certificates, ensuring the server communicates using trusted and secure certificates. -+ TLSOpts: tlsOpts, -+ } -+ -+ if secureMetrics { -+ // FilterProvider is used to protect the metrics endpoint with authn/authz. -+ // These configurations ensure that only authorized users and service accounts -+ // can access the metrics endpoint. The RBAC are configured in 'config/rbac/kustomization.yaml'. More info: -+ // https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.4/pkg/metrics/filters#WithAuthenticationAndAuthorization -+ metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization -+ } -+ - mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ -- Scheme: scheme, -- Metrics: metricsserver.Options{ -- BindAddress: metricsAddr, -- SecureServing: secureMetrics, -- TLSOpts: tlsOpts, -- }, -+ Scheme: scheme, -+ Metrics: metricsServerOptions, ----- -endif::golang[] - -.. Update your `config/default/kustomization.yaml` file with the following changes: -+ -[source,diff] ----- - # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. - #- ../prometheus -+ # [METRICS] Expose the controller manager metrics service. -+ - metrics_service.yaml - -+ # Uncomment the patches line if you enable Metrics, and/or are using webhooks and cert-manager - patches: -- # Protect the /metrics endpoint by putting it behind auth. -- # If you want your controller-manager to expose the /metrics -- # endpoint w/o any authn/z, please comment the following line. -- - path: manager_auth_proxy_patch.yaml -+ # [METRICS] The following patch will enable the metrics endpoint using HTTPS and the port :8443. -+ # More info: https://book.kubebuilder.io/reference/metrics -+ - path: manager_metrics_patch.yaml -+ target: -+ kind: Deployment ----- - -.. Remove the `config/default/manager_auth_proxy_patch.yaml` and `config/default/manager_config_patch.yaml` files. - -.. Create a `config/default/manager_metrics_patch.yaml` file with the following content: -+ -[source,text,subs="attributes+"] ----- -# This patch adds the args to allow exposing the metrics endpoint using HTTPS -- op: add - path: /spec/template/spec/containers/0/args/0 - value: --metrics-bind-address=:8443 -ifdef::helm,ansible[] -# This patch adds the args to allow securing the metrics endpoint -- op: add - path: /spec/template/spec/containers/0/args/0 - value: --metrics-secure -# This patch adds the args to allow RBAC-based authn/authz the metrics endpoint -- op: add - path: /spec/template/spec/containers/0/args/0 - value: --metrics-require-rbac -endif::helm,ansible[] ----- - -.. Create a `config/default/metrics_service.yaml` file with the following content: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Service -metadata: - labels: - control-plane: controller-manager - app.kubernetes.io/name: - app.kubernetes.io/managed-by: kustomize - name: controller-manager-metrics-service - namespace: system -spec: - ports: - - name: https - port: 8443 - protocol: TCP - targetPort: 8443 - selector: - control-plane: controller-manager ----- - -.. Update your `config/manager/manager.yaml` file with the following changes: -+ -[source,diff,subs="attributes+"] ----- - - --leader-elect -ifdef::golang,helm[] -+ - --health-probe-bind-address=:8081 -endif::[] -ifdef::ansible[] -+ - --health-probe-bind-address=:6789 -endif::[] ----- - -.. Update your `config/prometheus/monitor.yaml` file with the following changes: -+ -[source,diff] ----- - - path: /metrics -- port: https -+ port: https # Ensure this is the name of the port that exposes HTTPS metrics - tlsConfig: -+ # TODO(user): The option insecureSkipVerify: true is not recommended for production since it disables -+ # certificate verification. This poses a significant security risk by making the system vulnerable to -+ # man-in-the-middle attacks, where an attacker could intercept and manipulate the communication between -+ # Prometheus and the monitored services. This could lead to unauthorized access to sensitive metrics data, -+ # compromising the integrity and confidentiality of the information. -+ # Please use the following options for secure configurations: -+ # caFile: /etc/metrics-certs/ca.crt -+ # certFile: /etc/metrics-certs/tls.crt -+ # keyFile: /etc/metrics-certs/tls.key - insecureSkipVerify: true ----- - -.. Remove the following files from the `config/rbac/` directory: -+ --- -* `auth_proxy_client_clusterrole.yaml` -* `auth_proxy_role.yaml` -* `auth_proxy_role_binding.yaml` -* `auth_proxy_service.yaml` --- - -.. Update your `config/rbac/kustomization.yaml` file with the following changes: -+ -[source,diff] ----- - - leader_election_role_binding.yaml -- # Comment the following 4 lines if you want to disable -- # the auth proxy (https://github.com/brancz/kube-rbac-proxy) -- # which protects your /metrics endpoint. -- - auth_proxy_service.yaml -- - auth_proxy_role.yaml -- - auth_proxy_role_binding.yaml -- - auth_proxy_client_clusterrole.yaml -+ # The following RBAC configurations are used to protect -+ # the metrics endpoint with authn/authz. These configurations -+ # ensure that only authorized users and service accounts -+ # can access the metrics endpoint. Comment the following -+ # permissions if you want to disable this protection. -+ # More info: https://book.kubebuilder.io/reference/metrics.html -+ - metrics_auth_role.yaml -+ - metrics_auth_role_binding.yaml -+ - metrics_reader_role.yaml ----- - -.. Create a `config/rbac/metrics_auth_role_binding.yaml` file with the following content: -+ -[source,yaml] ----- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: metrics-auth-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: metrics-auth-role -subjects: - - kind: ServiceAccount - name: controller-manager - namespace: system ----- - -.. Create a `config/rbac/metrics_reader_role.yaml` file with the following content: -+ -[source,yaml] ----- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: metrics-reader -rules: -- nonResourceURLs: - - "/metrics" - verbs: - - get ----- - -ifeval::["{context}" == "osdk-golang-updating-projects"] -:!golang: -:!type: -endif::[] -ifeval::["{context}" == "osdk-ansible-updating-projects"] -:!ansible: -:!type: -endif::[] -ifeval::["{context}" == "osdk-helm-updating-projects"] -:!helm: -:!type: -endif::[] \ No newline at end of file diff --git a/modules/osdk-workflow.adoc b/modules/osdk-workflow.adoc deleted file mode 100644 index efc7b0962f..0000000000 --- a/modules/osdk-workflow.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-about.adoc - -[id="osdk-workflow_{context}"] -= Development workflow - -The Operator SDK provides the following workflow to develop a new Operator: - -. Create an Operator project by using the Operator SDK command-line interface (CLI). -. Define new resource APIs by adding custom resource definitions (CRDs). -. Specify resources to watch by using the Operator SDK API. -. Define the Operator reconciling logic in a designated handler and use the Operator SDK API to interact with resources. -. Use the Operator SDK CLI to build and generate the Operator deployment manifests. - -.Operator SDK workflow -image::osdk-workflow.png[] - -At a high level, an Operator that uses the Operator SDK processes events for watched resources in an Operator author-defined handler and takes actions to reconcile the state of the application. diff --git a/observability/monitoring/about-ocp-monitoring/monitoring-stack-architecture.adoc b/observability/monitoring/about-ocp-monitoring/monitoring-stack-architecture.adoc index 2f84137c38..744538e34b 100644 --- a/observability/monitoring/about-ocp-monitoring/monitoring-stack-architecture.adoc +++ b/observability/monitoring/about-ocp-monitoring/monitoring-stack-architecture.adoc @@ -31,7 +31,6 @@ include::modules/monitoring-targets-for-user-defined-projects.adoc[leveloffset=+ include::modules/monitoring-monitoring-stack-in-ha-clusters.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources -* xref:../../../operators/operator_sdk/osdk-ha-sno.adoc#osdk-ha-sno[High-availability or single-node cluster detection and support] * xref:../../../observability/monitoring/configuring-core-platform-monitoring/storing-and-recording-data.adoc#configuring-persistent-storage_storing-and-recording-data[Configuring persistent storage] * xref:../../../observability/monitoring/configuring-core-platform-monitoring/configuring-performance-and-scalability.adoc#configuring-performance-and-scalability[Configuring performance and scalability] @@ -46,9 +45,3 @@ ifndef::openshift-dedicated,openshift-rosa[] * xref:../../../observability/monitoring/configuring-user-workload-monitoring/preparing-to-configure-the-monitoring-stack-uwm.adoc#granting-users-permission-to-monitor-user-defined-projects_preparing-to-configure-the-monitoring-stack-uwm[Granting users permissions for monitoring for user-defined projects] * xref:../../../security/tls-security-profiles.adoc#tls-security-profiles[Configuring TLS security profiles] endif::openshift-dedicated,openshift-rosa[] - - - - - - diff --git a/observability/monitoring/monitoring-overview.adoc b/observability/monitoring/monitoring-overview.adoc index a4f61c7c56..7e670d02ef 100644 --- a/observability/monitoring/monitoring-overview.adoc +++ b/observability/monitoring/monitoring-overview.adoc @@ -56,7 +56,6 @@ include::modules/monitoring-targets-for-user-defined-projects.adoc[leveloffset=+ include::modules/monitoring-monitoring-stack-in-ha-clusters.adoc[leveloffset=+2] [role="_additional-resources"] .Additional resources -* xref:../../operators/operator_sdk/osdk-ha-sno.adoc#osdk-ha-sno[High-availability or single-node cluster detection and support] * xref:../../observability/monitoring/configuring-the-monitoring-stack.adoc#configuring-persistent-storage_configuring-the-monitoring-stack[Configuring persistent storage] * xref:../../observability/monitoring/configuring-the-monitoring-stack.adoc#configuring-the-monitoring-stack_configuring-the-monitoring-stack[Configuring the monitoring stack] diff --git a/operators/admin/olm-configuring-proxy-support.adoc b/operators/admin/olm-configuring-proxy-support.adoc index 6a597c3c9b..5d020a222a 100644 --- a/operators/admin/olm-configuring-proxy-support.adoc +++ b/operators/admin/olm-configuring-proxy-support.adoc @@ -24,9 +24,6 @@ ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] * xref:../../networking/configuring-a-custom-pki.adoc#configuring-a-custom-pki[Configuring a custom PKI] (custom CA certificate) endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -* Developing Operators that support proxy settings for xref:../../operators/operator_sdk/golang/osdk-golang-tutorial.adoc#osdk-run-proxy_osdk-golang-tutorial[Go], xref:../../operators/operator_sdk/ansible/osdk-ansible-tutorial.adoc#osdk-run-proxy_osdk-ansible-tutorial[Ansible], and xref:../../operators/operator_sdk/helm/osdk-helm-tutorial.adoc#osdk-run-proxy_osdk-helm-tutorial[Helm] - - include::modules/olm-overriding-proxy-settings.adoc[leveloffset=+1] include::modules/olm-injecting-custom-ca.adoc[leveloffset=+1] diff --git a/operators/admin/olm-managing-custom-catalogs.adoc b/operators/admin/olm-managing-custom-catalogs.adoc index 290cde3bb8..a52c03ab15 100644 --- a/operators/admin/olm-managing-custom-catalogs.adoc +++ b/operators/admin/olm-managing-custom-catalogs.adoc @@ -17,8 +17,6 @@ and Operator catalog maintainers can create and manage custom catalogs packaged [IMPORTANT] ==== Kubernetes periodically deprecates certain APIs that are removed in subsequent releases. As a result, Operators are unable to use removed APIs starting with the version of {product-title} that uses the Kubernetes version that removed the API. - -If your cluster is using custom catalogs, see xref:../../operators/operator_sdk/osdk-working-bundle-images#osdk-control-compat_osdk-working-bundle-images[Controlling Operator compatibility with {product-title} versions] for more details about how Operator authors can update their projects to help avoid workload issues and prevent incompatible upgrades. ==== [role="_additional-resources"] @@ -138,4 +136,4 @@ include::modules/olm-removing-catalogs.adoc[leveloffset=+1] endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] ifdef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] include::modules/sd-olm-removing-catalogs.adoc[leveloffset=+1] -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] \ No newline at end of file +endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] diff --git a/operators/index.adoc b/operators/index.adoc index af8ff45658..58003a7fa6 100644 --- a/operators/index.adoc +++ b/operators/index.adoc @@ -13,15 +13,6 @@ include::modules/operators-overview.adoc[leveloffset=+1] As an Operator author, you can perform the following development tasks for OLM-based Operators: -** xref:../operators/operator_sdk/osdk-installing-cli.adoc#osdk-installing-cli[Install Operator SDK CLI]. -// The Operator quickstarts aren't published for OSD/ROSA, so for OSD/ROSA, these xrefs point to the tutorials instead. -ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -** Create xref:../operators/operator_sdk/golang/osdk-golang-quickstart.adoc#osdk-golang-quickstart[Go-based Operators], xref:../operators/operator_sdk/ansible/osdk-ansible-quickstart.adoc#osdk-ansible-quickstart[Ansible-based Operators], and xref:../operators/operator_sdk/helm/osdk-helm-quickstart.adoc#osdk-helm-quickstart[Helm-based Operators]. -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -ifdef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -** Create xref:../operators/operator_sdk/golang/osdk-golang-tutorial.adoc#osdk-golang-tutorial[Go-based Operators], xref:../operators/operator_sdk/ansible/osdk-ansible-tutorial.adoc#osdk-ansible-tutorial[Ansible-based Operators], and xref:../operators/operator_sdk/helm/osdk-helm-tutorial.adoc#osdk-helm-tutorial[Helm-based Operators]. -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -** xref:../operators/operator_sdk/osdk-about.adoc#osdk-about[Use Operator SDK to build, test, and deploy an Operator]. ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] ** xref:../operators/user/olm-installing-operators-in-namespace.adoc#olm-installing-operators-in-namespace[Install and subscribe an Operator to your namespace]. endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] @@ -71,4 +62,4 @@ endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] [id="operators-overview-next-steps"] == Next steps -To understand more about Operators, see xref:../operators/understanding/olm-what-operators-are.adoc#olm-what-operators-are[What are Operators?] +* xref:../operators/understanding/olm-what-operators-are.adoc#olm-what-operators-are[What are Operators?] diff --git a/operators/operator-reference.adoc b/operators/operator-reference.adoc index 004a94293f..a202f2f522 100644 --- a/operators/operator-reference.adoc +++ b/operators/operator-reference.adoc @@ -167,7 +167,6 @@ In {product-title}, OLM functionality is provided across a set of cluster Operat [id="cluster-operators-ref-olm-addtl-resources"] === Additional resources * xref:../operators/understanding/olm/olm-understanding-olm.adoc#olm-understanding-olm[Understanding Operator Lifecycle Manager (OLM)] -* xref:../operators/operator_sdk/osdk-working-bundle-images.adoc#osdk-control-compat_osdk-working-bundle-images[Controlling Operator compatibility with {product-title} versions] include::modules/olmv1-clusteroperator.adoc[leveloffset=+1] diff --git a/operators/operator_sdk/ansible/_attributes b/operators/operator_sdk/ansible/_attributes deleted file mode 120000 index bf7c2529fd..0000000000 --- a/operators/operator_sdk/ansible/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../../_attributes/ \ No newline at end of file diff --git a/operators/operator_sdk/ansible/images b/operators/operator_sdk/ansible/images deleted file mode 120000 index 5fa6987088..0000000000 --- a/operators/operator_sdk/ansible/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/operators/operator_sdk/ansible/modules b/operators/operator_sdk/ansible/modules deleted file mode 120000 index 8b0e854007..0000000000 --- a/operators/operator_sdk/ansible/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules \ No newline at end of file diff --git a/operators/operator_sdk/ansible/osdk-ansible-cr-status.adoc b/operators/operator_sdk/ansible/osdk-ansible-cr-status.adoc deleted file mode 100644 index de952fad9e..0000000000 --- a/operators/operator_sdk/ansible/osdk-ansible-cr-status.adoc +++ /dev/null @@ -1,12 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osdk-ansible-cr-status"] -= Custom resource status management -include::_attributes/common-attributes.adoc[] -:context: osdk-ansible-cr-mgmt - -toc::[] - -include::snippets/osdk-deprecation.adoc[] - -include::modules/osdk-ansible-cr-status-about.adoc[leveloffset=+1] -include::modules/osdk-ansible-cr-status-manual.adoc[leveloffset=+1] diff --git a/operators/operator_sdk/ansible/osdk-ansible-inside-operator.adoc b/operators/operator_sdk/ansible/osdk-ansible-inside-operator.adoc deleted file mode 100644 index dff4c10ee1..0000000000 --- a/operators/operator_sdk/ansible/osdk-ansible-inside-operator.adoc +++ /dev/null @@ -1,19 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osdk-ansible-inside-operator"] -= Using Ansible inside an Operator -include::_attributes/common-attributes.adoc[] -:context: osdk-ansible-inside-operator - -toc::[] - -After you are familiar with xref:../../../operators/operator_sdk/ansible/osdk-ansible-k8s-collection.adoc#osdk-ansible-k8s-collection[using the Kubernetes Collection for Ansible locally], you can trigger the same Ansible logic inside of an Operator when a custom resource (CR) changes. This example maps an Ansible role to a specific Kubernetes resource that the Operator watches. This mapping is done in the `watches.yaml` file. - -include::snippets/osdk-deprecation.adoc[] - -include::modules/osdk-ansible-custom-resource-files.adoc[leveloffset=+1] -include::modules/osdk-ansible-inside-operator-local.adoc[leveloffset=+1] -include::modules/osdk-run-deployment.adoc[leveloffset=+1] -include::modules/osdk-ansible-inside-operator-logs.adoc[leveloffset=+1] -include::modules/osdk-ansible-inside-operator-logs-view.adoc[leveloffset=+2] -include::modules/osdk-ansible-inside-operator-logs-full-result.adoc[leveloffset=+2] -include::modules/osdk-ansible-inside-operator-logs-verbose.adoc[leveloffset=+2] diff --git a/operators/operator_sdk/ansible/osdk-ansible-k8s-collection.adoc b/operators/operator_sdk/ansible/osdk-ansible-k8s-collection.adoc deleted file mode 100644 index 98c112a9ad..0000000000 --- a/operators/operator_sdk/ansible/osdk-ansible-k8s-collection.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osdk-ansible-k8s-collection"] -= Kubernetes Collection for Ansible -include::_attributes/common-attributes.adoc[] -:context: osdk-ansible-k8s-collection - -toc::[] - -To manage the lifecycle of your application on Kubernetes using Ansible, you can use the link:https://galaxy.ansible.com/community/kubernetes[Kubernetes Collection for Ansible]. This collection of Ansible modules allows a developer to either leverage their existing Kubernetes resource files written in YAML or express the lifecycle management in native Ansible. - -include::snippets/osdk-deprecation.adoc[] - -One of the biggest benefits of using Ansible in conjunction with existing Kubernetes resource files is the ability to use Jinja templating so that you can customize resources with the simplicity of a few variables in Ansible. - -This section goes into detail on usage of the Kubernetes Collection. To get started, install the collection on your local workstation and test it using a playbook before moving on to using it within an Operator. - -include::modules/osdk-ansible-k8s-install.adoc[leveloffset=+1] -include::modules/osdk-ansible-k8s-local.adoc[leveloffset=+1] - -[id="osdk-ansible-k8s-collection-next-steps"] -== Next steps - -* See xref:../../../operators/operator_sdk/ansible/osdk-ansible-inside-operator.adoc#osdk-ansible-inside-operator[Using Ansible inside an Operator] for details on triggering your custom Ansible logic inside of an Operator when a custom resource (CR) changes. diff --git a/operators/operator_sdk/ansible/osdk-ansible-project-layout.adoc b/operators/operator_sdk/ansible/osdk-ansible-project-layout.adoc deleted file mode 100644 index dcfa17ee5c..0000000000 --- a/operators/operator_sdk/ansible/osdk-ansible-project-layout.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osdk-ansible-project-layout"] -= Project layout for Ansible-based Operators -include::_attributes/common-attributes.adoc[] -:context: osdk-ansible-project-layout - -toc::[] - -The `operator-sdk` CLI can generate, or _scaffold_, a number of packages and files for each Operator project. - -include::snippets/osdk-deprecation.adoc[] - -include::modules/osdk-ansible-project-layout.adoc[leveloffset=+1] diff --git a/operators/operator_sdk/ansible/osdk-ansible-quickstart.adoc b/operators/operator_sdk/ansible/osdk-ansible-quickstart.adoc deleted file mode 100644 index 1e65c2a2fc..0000000000 --- a/operators/operator_sdk/ansible/osdk-ansible-quickstart.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osdk-ansible-quickstart"] -= Getting started with Operator SDK for Ansible-based Operators -include::_attributes/common-attributes.adoc[] -:context: osdk-ansible-quickstart - -toc::[] - -// This assembly is currently excluded from the OSD and ROSA docs, because it requires cluster-admin permissions. - -The Operator SDK includes options for generating an Operator project that leverages existing Ansible playbooks and modules to deploy Kubernetes resources as a unified application, without having to write any Go code. - -include::snippets/osdk-deprecation.adoc[] - -To demonstrate the basics of setting up and running an link:https://docs.ansible.com/ansible/latest/index.html[Ansible]-based Operator using tools and libraries provided by the Operator SDK, Operator developers can build an example Ansible-based Operator for Memcached, a distributed key-value store, and deploy it to a cluster. - -include::modules/osdk-common-prereqs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../../operators/operator_sdk/osdk-installing-cli.adoc#osdk-installing-cli[Installing the Operator SDK CLI] -* xref:../../../cli_reference/openshift_cli/getting-started-cli.adoc#getting-started-cli[Getting started with the OpenShift CLI] - -include::modules/osdk-quickstart.adoc[leveloffset=+1] - -[id="osdk-ansible-quickstart-next-steps"] -== Next steps - -* See xref:../../../operators/operator_sdk/ansible/osdk-ansible-tutorial.adoc#osdk-ansible-tutorial[Operator SDK tutorial for Ansible-based Operators] for a more in-depth walkthrough on building an Ansible-based Operator. diff --git a/operators/operator_sdk/ansible/osdk-ansible-support.adoc b/operators/operator_sdk/ansible/osdk-ansible-support.adoc deleted file mode 100644 index 80aa84d3cd..0000000000 --- a/operators/operator_sdk/ansible/osdk-ansible-support.adoc +++ /dev/null @@ -1,14 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osdk-ansible-support"] -= Ansible support in Operator SDK -include::_attributes/common-attributes.adoc[] -:context: osdk-ansible-support - -toc::[] - -include::snippets/osdk-deprecation.adoc[] - -include::modules/osdk-ansible-custom-resource-files.adoc[leveloffset=+1] -include::modules/osdk-ansible-watches-file.adoc[leveloffset=+1] -include::modules/osdk-ansible-extra-variables.adoc[leveloffset=+1] -include::modules/osdk-ansible-runner-directory.adoc[leveloffset=+1] diff --git a/operators/operator_sdk/ansible/osdk-ansible-tutorial.adoc b/operators/operator_sdk/ansible/osdk-ansible-tutorial.adoc deleted file mode 100644 index 700bdb3698..0000000000 --- a/operators/operator_sdk/ansible/osdk-ansible-tutorial.adoc +++ /dev/null @@ -1,93 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osdk-ansible-tutorial"] -= Operator SDK tutorial for Ansible-based Operators -include::_attributes/common-attributes.adoc[] -:context: osdk-ansible-tutorial - -toc::[] - -Operator developers can take advantage of link:https://docs.ansible.com/ansible/latest/index.html[Ansible] support in the Operator SDK to build an example Ansible-based Operator for Memcached, a distributed key-value store, and manage its lifecycle. This tutorial walks through the following process: - -* Create a Memcached deployment -* Ensure that the deployment size is the same as specified by the `Memcached` custom resource (CR) spec -* Update the `Memcached` CR status using the status writer with the names of the `memcached` pods - -include::snippets/osdk-deprecation.adoc[] - -This process is accomplished by using two centerpieces of the Operator Framework: - -Operator SDK:: The `operator-sdk` CLI tool and `controller-runtime` library API - -Operator Lifecycle Manager (OLM):: Installation, upgrade, and role-based access control (RBAC) of Operators on a cluster - -ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -[NOTE] -==== -This tutorial goes into greater detail than xref:../../../operators/operator_sdk/ansible/osdk-ansible-quickstart.adoc#osdk-ansible-quickstart[Getting started with Operator SDK for Ansible-based Operators]. -==== -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] - -// The "Getting started" quickstarts require cluster-admin and are therefore only available in OCP. -ifdef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -[NOTE] -==== -This tutorial goes into greater detail than link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.13/html-single/operators/index#osdk-ansible-quickstart[Getting started with Operator SDK for Ansible-based Operators] in the OpenShift Container Platform documentation. -==== -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] - -include::modules/osdk-common-prereqs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../../operators/operator_sdk/osdk-installing-cli.adoc#osdk-installing-cli[Installing the Operator SDK CLI] -// TODO-HCP remove line 44 and 46 ifndef conditions for HCP after cli_tools book is migrated -ifndef::openshift-rosa-hcp[] -* xref:../../../cli_reference/openshift_cli/getting-started-cli.adoc#getting-started-cli[Getting started with the OpenShift CLI] -endif::openshift-rosa-hcp[] - -include::modules/osdk-create-project.adoc[leveloffset=+1] -include::modules/osdk-project-file.adoc[leveloffset=+2] - -include::modules/osdk-ansible-create-api.adoc[leveloffset=+1] -include::modules/osdk-ansible-modify-manager.adoc[leveloffset=+1] - -include::modules/osdk-run-proxy.adoc[leveloffset=+1] - -include::modules/osdk-run-operator.adoc[leveloffset=+1] - -ifdef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -[role="_additional-resources"] -.Additional resources -* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.13/html-single/operators/index#osdk-run-locally_osdk-ansible-tutorial[Running locally outside the cluster] (OpenShift Container Platform documentation) -* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.13/html-single/operators/index#osdk-run-deployment_osdk-ansible-tutorial[Running as a deployment on the cluster] (OpenShift Container Platform documentation) -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] - -// In OSD/ROSA, the only applicable option for running the Operator is to bundle and deploy with OLM. -ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -include::modules/osdk-run-locally.adoc[leveloffset=+2] -include::modules/osdk-run-deployment.adoc[leveloffset=+2] -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] - -[id="osdk-bundle-deploy-olm_{context}"] -=== Bundling an Operator and deploying with Operator Lifecycle Manager - -include::modules/osdk-bundle-operator.adoc[leveloffset=+3] -include::modules/osdk-deploy-olm.adoc[leveloffset=+3] - -include::modules/osdk-create-cr.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="osdk-ansible-tutorial-addtl-resources"] -== Additional resources - -* See xref:../../../operators/operator_sdk/ansible/osdk-ansible-project-layout.adoc#osdk-ansible-project-layout[Project layout for Ansible-based Operators] to learn about the directory structures created by the Operator SDK. -ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -* If a xref:../../../networking/enable-cluster-wide-proxy.adoc#enable-cluster-wide-proxy[cluster-wide egress proxy is configured], cluster administrators can xref:../../../operators/admin/olm-configuring-proxy-support.adoc#olm-configuring-proxy-support[override the proxy settings or inject a custom CA certificate] for specific Operators running on Operator Lifecycle Manager (OLM). -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -ifdef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -// TODO-HCP remove line 88 and 91 ifndef conditions for HCP after networking book is migrated -ifndef::openshift-rosa-hcp[] -* If a xref:../../../networking/configuring-cluster-wide-proxy.adoc#configuring-a-cluster-wide-proxy[cluster-wide egress proxy is configured] -endif::openshift-rosa-hcp[] -, administrators with the `dedicated-admin` role can xref:../../../operators/admin/olm-configuring-proxy-support.adoc#olm-configuring-proxy-support[override the proxy settings or inject a custom CA certificate] for specific Operators running on Operator Lifecycle Manager (OLM). -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] diff --git a/operators/operator_sdk/ansible/osdk-ansible-updating-projects.adoc b/operators/operator_sdk/ansible/osdk-ansible-updating-projects.adoc deleted file mode 100644 index f0e5f17a51..0000000000 --- a/operators/operator_sdk/ansible/osdk-ansible-updating-projects.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osdk-ansible-updating-projects"] -= Updating projects for newer Operator SDK versions -include::_attributes/common-attributes.adoc[] -:context: osdk-ansible-updating-projects - -toc::[] - -{product-title} {product-version} supports Operator SDK {osdk_ver}. If you already have the {osdk_ver_n1} CLI installed on your workstation, you can update the CLI to {osdk_ver} by xref:../../../operators/operator_sdk/osdk-installing-cli.adoc#osdk-installing-cli[installing the latest version]. - -include::snippets/osdk-deprecation.adoc[] - -However, to ensure your existing Operator projects maintain compatibility with Operator SDK {osdk_ver}, update steps are required for the associated breaking changes introduced since {osdk_ver_n1}. You must perform the update steps manually in any of your Operator projects that were previously created or maintained with {osdk_ver_n1}. - -include::modules/osdk-updating-1361-to-138.adoc[leveloffset=+1] - -[id="additional-resources_osdk-ansible-upgrading-projects"] -[role="_additional-resources"] -== Additional resources - -* link:https://docs.redhat.com/en/documentation/openshift_container_platform/4.17/html-single/operators/index#osdk-upgrading-projects_osdk-ansible-updating-projects[Updating Ansible-based Operator projects for Operator SDK 1.36.1] ({product-title} 4.17) -* xref:../../../operators/operator_sdk/osdk-pkgman-to-bundle.adoc#osdk-pkgman-to-bundle[Migrating package manifest projects to bundle format] diff --git a/operators/operator_sdk/ansible/snippets b/operators/operator_sdk/ansible/snippets deleted file mode 120000 index ce62fd7c41..0000000000 --- a/operators/operator_sdk/ansible/snippets +++ /dev/null @@ -1 +0,0 @@ -../../../snippets/ \ No newline at end of file diff --git a/operators/operator_sdk/golang/_attributes b/operators/operator_sdk/golang/_attributes deleted file mode 120000 index 20cc1dcb77..0000000000 --- a/operators/operator_sdk/golang/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/operators/operator_sdk/golang/images b/operators/operator_sdk/golang/images deleted file mode 120000 index 5e67573196..0000000000 --- a/operators/operator_sdk/golang/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/operators/operator_sdk/golang/modules b/operators/operator_sdk/golang/modules deleted file mode 120000 index 464b823aca..0000000000 --- a/operators/operator_sdk/golang/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/operators/operator_sdk/golang/osdk-golang-project-layout.adoc b/operators/operator_sdk/golang/osdk-golang-project-layout.adoc deleted file mode 100644 index d0cff91bce..0000000000 --- a/operators/operator_sdk/golang/osdk-golang-project-layout.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osdk-golang-project-layout"] -= Project layout for Go-based Operators -include::_attributes/common-attributes.adoc[] -:context: osdk-golang-project-layout - -toc::[] - -The `operator-sdk` CLI can generate, or _scaffold_, a number of packages and files for each Operator project. - -include::snippets/osdk-deprecation.adoc[] - -include::modules/osdk-golang-project-layout.adoc[leveloffset=+1] diff --git a/operators/operator_sdk/golang/osdk-golang-quickstart.adoc b/operators/operator_sdk/golang/osdk-golang-quickstart.adoc deleted file mode 100644 index 04a0c32dc5..0000000000 --- a/operators/operator_sdk/golang/osdk-golang-quickstart.adoc +++ /dev/null @@ -1,27 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osdk-golang-quickstart"] -= Getting started with Operator SDK for Go-based Operators -include::_attributes/common-attributes.adoc[] -:context: osdk-golang-quickstart - -toc::[] - -// This assembly is currently excluded from the OSD and ROSA docs, because it requires cluster-admin permissions. - -To demonstrate the basics of setting up and running a Go-based Operator using tools and libraries provided by the Operator SDK, Operator developers can build an example Go-based Operator for Memcached, a distributed key-value store, and deploy it to a cluster. - -include::snippets/osdk-deprecation.adoc[] - -include::modules/osdk-common-prereqs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../../operators/operator_sdk/osdk-installing-cli.adoc#osdk-installing-cli[Installing the Operator SDK CLI] -* xref:../../../cli_reference/openshift_cli/getting-started-cli.adoc#getting-started-cli[Getting started with the OpenShift CLI] - -include::modules/osdk-quickstart.adoc[leveloffset=+1] - -[id="osdk-golang-quickstart-next-steps"] -== Next steps - -* See xref:../../../operators/operator_sdk/golang/osdk-golang-tutorial.adoc#osdk-golang-tutorial[Operator SDK tutorial for Go-based Operators] for a more in-depth walkthrough on building a Go-based Operator. diff --git a/operators/operator_sdk/golang/osdk-golang-tutorial.adoc b/operators/operator_sdk/golang/osdk-golang-tutorial.adoc deleted file mode 100644 index 51cb7b75d6..0000000000 --- a/operators/operator_sdk/golang/osdk-golang-tutorial.adoc +++ /dev/null @@ -1,103 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osdk-golang-tutorial"] -= Operator SDK tutorial for Go-based Operators -include::_attributes/common-attributes.adoc[] -:context: osdk-golang-tutorial - -toc::[] - -Operator developers can take advantage of Go programming language support in the Operator SDK to build an example Go-based Operator for Memcached, a distributed key-value store, and manage its lifecycle. - -include::snippets/osdk-deprecation.adoc[] - -This process is accomplished using two centerpieces of the Operator Framework: - -Operator SDK:: The `operator-sdk` CLI tool and `controller-runtime` library API - -Operator Lifecycle Manager (OLM):: Installation, upgrade, and role-based access control (RBAC) of Operators on a cluster - -ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -[NOTE] -==== -This tutorial goes into greater detail than xref:../../../operators/operator_sdk/golang/osdk-golang-quickstart.adoc#osdk-golang-quickstart[Getting started with Operator SDK for Go-based Operators]. -==== -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] - -// The "Getting started" quickstarts require cluster-admin and are therefore only available in OCP. -ifdef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -[NOTE] -==== -This tutorial goes into greater detail than link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.13/html-single/operators/index#osdk-golang-quickstart[Getting started with Operator SDK for Go-based Operators] in the OpenShift Container Platform documentation. -==== -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] - -include::modules/osdk-common-prereqs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../../operators/operator_sdk/osdk-installing-cli.adoc#osdk-installing-cli[Installing the Operator SDK CLI] -// TODO-HCP remove conditions ifndef line 40 & 42 for HCP after cli_tools book is migrated -ifndef::openshift-rosa-hcp[] -* xref:../../../cli_reference/openshift_cli/getting-started-cli.adoc#getting-started-cli[Getting started with the OpenShift CLI] -endif::openshift-rosa-hcp[] - -include::modules/osdk-create-project.adoc[leveloffset=+1] -include::modules/osdk-project-file.adoc[leveloffset=+2] -include::modules/osdk-golang-manager.adoc[leveloffset=+2] -include::modules/osdk-golang-multi-group-apis.adoc[leveloffset=+2] - -include::modules/osdk-golang-create-api-controller.adoc[leveloffset=+1] -include::modules/osdk-golang-define-api.adoc[leveloffset=+2] -include::modules/osdk-golang-generate-crd.adoc[leveloffset=+2] -include::modules/osdk-about-openapi-validation.adoc[leveloffset=+3] - -include::modules/osdk-golang-implement-controller.adoc[leveloffset=+1] - -The next subsections explain how the controller in the example implementation watches resources and how the reconcile loop is triggered. You can skip these subsections to go directly to xref:../../../operators/operator_sdk/golang/osdk-golang-tutorial.adoc#osdk-run-operator_osdk-golang-tutorial[Running the Operator]. - -include::modules/osdk-golang-controller-resources.adoc[leveloffset=+2] -include::modules/osdk-golang-controller-configs.adoc[leveloffset=+2] -include::modules/osdk-golang-controller-reconcile-loop.adoc[leveloffset=+2] -include::modules/osdk-golang-controller-rbac-markers.adoc[leveloffset=+2] - -include::modules/osdk-run-proxy.adoc[leveloffset=+1] - -include::modules/osdk-run-operator.adoc[leveloffset=+1] -ifdef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -[role="_additional-resources"] -.Additional resources -* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.13/html-single/operators/index#osdk-run-locally_osdk-golang-tutorial[Running locally outside the cluster] (OpenShift Container Platform documentation) -* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.13/html-single/operators/index#osdk-run-deployment_osdk-golang-tutorial[Running as a deployment on the cluster] (OpenShift Container Platform documentation) -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] - -// In OSD/ROSA, the only applicable option for running the Operator is to bundle and deploy with OLM. -ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -include::modules/osdk-run-locally.adoc[leveloffset=+2] -include::modules/osdk-run-deployment.adoc[leveloffset=+2] -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] - -[id="osdk-bundle-deploy-olm_{context}"] -=== Bundling an Operator and deploying with Operator Lifecycle Manager - -include::modules/osdk-bundle-operator.adoc[leveloffset=+3] -include::modules/osdk-deploy-olm.adoc[leveloffset=+3] - -include::modules/osdk-create-cr.adoc[leveloffset=+1] - -[id="osdk-golang-tutorial-addtl-resources"] -[role="_additional-resources"] -== Additional resources - -* See xref:../../../operators/operator_sdk/golang/osdk-golang-project-layout.adoc#osdk-golang-project-layout[Project layout for Go-based Operators] to learn about the directory structures created by the Operator SDK. -ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -* If a xref:../../../networking/enable-cluster-wide-proxy.adoc#enable-cluster-wide-proxy[cluster-wide egress proxy is configured], cluster administrators can xref:../../../operators/admin/olm-configuring-proxy-support.adoc#olm-configuring-proxy-support[override the proxy settings or inject a custom CA certificate] for specific Operators running on Operator Lifecycle Manager (OLM). -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -ifdef::openshift-dedicated,openshift-rosa[] -// TODO-HCP remove line 97 and 99 conditions and add the HCP condition to line 92 and 98 for HCP after networking book is migrated -ifndef::openshift-rosa-hcp[] -* If a xref:../../../networking/configuring-cluster-wide-proxy.adoc#configuring-a-cluster-wide-proxy[cluster-wide egress proxy is configured], -endif::openshift-rosa-hcp[] -administrators with the `dedicated-admin` role can xref:../../../operators/admin/olm-configuring-proxy-support.adoc#olm-configuring-proxy-support[override the proxy settings or inject a custom CA certificate] for specific Operators running on Operator Lifecycle Manager (OLM). -endif::openshift-dedicated,openshift-rosa[] - - diff --git a/operators/operator_sdk/golang/osdk-golang-updating-projects.adoc b/operators/operator_sdk/golang/osdk-golang-updating-projects.adoc deleted file mode 100644 index 3f0b2e7d10..0000000000 --- a/operators/operator_sdk/golang/osdk-golang-updating-projects.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osdk-golang-updating-projects"] -= Updating Go-based Operator projects for newer Operator SDK versions -include::_attributes/common-attributes.adoc[] -:context: osdk-golang-updating-projects - -toc::[] - -{product-title} {product-version} supports Operator SDK {osdk_ver}. If you already have the {osdk_ver_n1} CLI installed on your workstation, you can update the CLI to {osdk_ver} by xref:../../../operators/operator_sdk/osdk-installing-cli.adoc#osdk-installing-cli[installing the latest version]. - -include::snippets/osdk-deprecation.adoc[] - -However, to ensure your existing Operator projects maintain compatibility with Operator SDK {osdk_ver}, update steps are required for the associated breaking changes introduced since {osdk_ver_n1}. You must perform the update steps manually in any of your Operator projects that were previously created or maintained with {osdk_ver_n1}. - -include::modules/osdk-updating-1361-to-138.adoc[leveloffset=+1] - -[id="additional-resources_osdk-upgrading-projects-golang"] -[role="_additional-resources"] -== Additional resources - -* link:https://docs.redhat.com/en/documentation/openshift_container_platform/4.17/html-single/operators/index#osdk-upgrading-projects_osdk-golang-updating-projects[Updating Go-based projects for Operator SDK 1.36.1] ({product-title} 4.17) -* xref:../../../operators/operator_sdk/osdk-pkgman-to-bundle.adoc#osdk-pkgman-to-bundle[Migrating package manifest projects to bundle format] \ No newline at end of file diff --git a/operators/operator_sdk/golang/snippets b/operators/operator_sdk/golang/snippets deleted file mode 120000 index ce62fd7c41..0000000000 --- a/operators/operator_sdk/golang/snippets +++ /dev/null @@ -1 +0,0 @@ -../../../snippets/ \ No newline at end of file diff --git a/operators/operator_sdk/helm/_attributes b/operators/operator_sdk/helm/_attributes deleted file mode 120000 index 20cc1dcb77..0000000000 --- a/operators/operator_sdk/helm/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/operators/operator_sdk/helm/images b/operators/operator_sdk/helm/images deleted file mode 120000 index 5fa6987088..0000000000 --- a/operators/operator_sdk/helm/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/operators/operator_sdk/helm/modules b/operators/operator_sdk/helm/modules deleted file mode 120000 index 8b0e854007..0000000000 --- a/operators/operator_sdk/helm/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules \ No newline at end of file diff --git a/operators/operator_sdk/helm/osdk-helm-project-layout.adoc b/operators/operator_sdk/helm/osdk-helm-project-layout.adoc deleted file mode 100644 index 4fe0b7d173..0000000000 --- a/operators/operator_sdk/helm/osdk-helm-project-layout.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osdk-helm-project-layout"] -= Project layout for Helm-based Operators -include::_attributes/common-attributes.adoc[] -:context: osdk-helm-project-layout - -toc::[] - -The `operator-sdk` CLI can generate, or _scaffold_, a number of packages and files for each Operator project. - -include::snippets/osdk-deprecation.adoc[] - -include::modules/osdk-helm-project-layout.adoc[leveloffset=+1] diff --git a/operators/operator_sdk/helm/osdk-helm-quickstart.adoc b/operators/operator_sdk/helm/osdk-helm-quickstart.adoc deleted file mode 100644 index e00197d69b..0000000000 --- a/operators/operator_sdk/helm/osdk-helm-quickstart.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osdk-helm-quickstart"] -= Getting started with Operator SDK for Helm-based Operators -include::_attributes/common-attributes.adoc[] -:context: osdk-helm-quickstart - -toc::[] - -// This assembly is currently excluded from the OSD and ROSA docs, because it requires cluster-admin permissions. - -The Operator SDK includes options for generating an Operator project that leverages existing link:https://helm.sh/docs/[Helm] charts to deploy Kubernetes resources as a unified application, without having to write any Go code. - -include::snippets/osdk-deprecation.adoc[] - -To demonstrate the basics of setting up and running an link:https://helm.sh/docs/[Helm]-based Operator using tools and libraries provided by the Operator SDK, Operator developers can build an example Helm-based Operator for Nginx and deploy it to a cluster. - -include::modules/osdk-common-prereqs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../../operators/operator_sdk/osdk-installing-cli.adoc#osdk-installing-cli[Installing the Operator SDK CLI] -* xref:../../../cli_reference/openshift_cli/getting-started-cli.adoc#getting-started-cli[Getting started with the OpenShift CLI] - -include::modules/osdk-quickstart.adoc[leveloffset=+1] - -[id="osdk-helm-quickstart-next-steps"] -== Next steps - -* See xref:../../../operators/operator_sdk/helm/osdk-helm-tutorial.adoc#osdk-helm-tutorial[Operator SDK tutorial for Helm-based Operators] for a more in-depth walkthrough on building a Helm-based Operator. diff --git a/operators/operator_sdk/helm/osdk-helm-support.adoc b/operators/operator_sdk/helm/osdk-helm-support.adoc deleted file mode 100644 index 2b3eacc0f3..0000000000 --- a/operators/operator_sdk/helm/osdk-helm-support.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osdk-helm-support"] -= Helm support in Operator SDK -include::_attributes/common-attributes.adoc[] -:context: osdk-helm-support - -toc::[] - -include::snippets/osdk-deprecation.adoc[] - -include::modules/osdk-helm-charts.adoc[leveloffset=+1] diff --git a/operators/operator_sdk/helm/osdk-helm-tutorial.adoc b/operators/operator_sdk/helm/osdk-helm-tutorial.adoc deleted file mode 100644 index ec6d25f9b4..0000000000 --- a/operators/operator_sdk/helm/osdk-helm-tutorial.adoc +++ /dev/null @@ -1,96 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osdk-helm-tutorial"] -= Operator SDK tutorial for Helm-based Operators -include::_attributes/common-attributes.adoc[] -:context: osdk-helm-tutorial - -toc::[] - -Operator developers can take advantage of link:https://helm.sh/docs/[Helm] support in the Operator SDK to build an example Helm-based Operator for Nginx and manage its lifecycle. This tutorial walks through the following process: - -* Create a Nginx deployment -* Ensure that the deployment size is the same as specified by the `Nginx` custom resource (CR) spec -* Update the `Nginx` CR status using the status writer with the names of the `nginx` pods - -include::snippets/osdk-deprecation.adoc[] - -This process is accomplished using two centerpieces of the Operator Framework: - -Operator SDK:: The `operator-sdk` CLI tool and `controller-runtime` library API - -Operator Lifecycle Manager (OLM):: Installation, upgrade, and role-based access control (RBAC) of Operators on a cluster - -ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -[NOTE] -==== -This tutorial goes into greater detail than xref:../../../operators/operator_sdk/helm/osdk-helm-quickstart.adoc#osdk-helm-quickstart[Getting started with Operator SDK for Helm-based Operators]. -==== -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] - -// The "Getting started" quickstarts require cluster-admin and are therefore only available in OCP. -ifdef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -[NOTE] -==== -This tutorial goes into greater detail than link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.13/html-single/operators/index#osdk-helm-quickstart[Getting started with Operator SDK for Helm-based Operators] in the OpenShift Container Platform documentation. -==== -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] - -include::modules/osdk-common-prereqs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../../operators/operator_sdk/osdk-installing-cli.adoc#osdk-installing-cli[Installing the Operator SDK CLI] -// TODO-HCP remove line 44 and 46 ifndef conditions for HCP after cli_tools book is migrated -ifndef::openshift-rosa-hcp[] -* xref:../../../cli_reference/openshift_cli/getting-started-cli.adoc#getting-started-cli[Getting started with the OpenShift CLI] -endif::openshift-rosa-hcp[] - -include::modules/osdk-create-project.adoc[leveloffset=+1] -include::modules/osdk-helm-existing-chart.adoc[leveloffset=+2] -include::modules/osdk-project-file.adoc[leveloffset=+2] - -include::modules/osdk-helm-logic.adoc[leveloffset=+1] -include::modules/osdk-helm-sample-chart.adoc[leveloffset=+2] -include::modules/osdk-helm-modify-cr.adoc[leveloffset=+2] - -include::modules/osdk-run-proxy.adoc[leveloffset=+1] - - -include::modules/osdk-run-operator.adoc[leveloffset=+1] - -ifdef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -[role="_additional-resources"] -.Additional resources -* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.13/html-single/operators/index#osdk-run-locally_osdk-helm-tutorial[Running locally outside the cluster] (OpenShift Container Platform documentation) -* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.13/html-single/operators/index#osdk-run-deployment_osdk-helm-tutorial[Running as a deployment on the cluster] (OpenShift Container Platform documentation) -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] - -// In OSD/ROSA, the only applicable option for running the Operator is to bundle and deploy with OLM. -ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -include::modules/osdk-run-locally.adoc[leveloffset=+2] -include::modules/osdk-run-deployment.adoc[leveloffset=+2] -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] - -[id="osdk-bundle-deploy-olm_{context}"] -=== Bundling an Operator and deploying with Operator Lifecycle Manager - -include::modules/osdk-bundle-operator.adoc[leveloffset=+3] -include::modules/osdk-deploy-olm.adoc[leveloffset=+3] - -include::modules/osdk-create-cr.adoc[leveloffset=+1] - -[id="osdk-helm-tutorial-addtl-resources"] -[role="_additional-resources"] -== Additional resources - -* See xref:../../../operators/operator_sdk/helm/osdk-helm-project-layout.adoc#osdk-helm-project-layout[Project layout for Helm-based Operators] to learn about the directory structures created by the Operator SDK. -ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -* If a xref:../../../networking/enable-cluster-wide-proxy.adoc#enable-cluster-wide-proxy[cluster-wide egress proxy is configured], cluster administrators can xref:../../../operators/admin/olm-configuring-proxy-support.adoc#olm-configuring-proxy-support[override the proxy settings or inject a custom CA certificate] for specific Operators running on Operator Lifecycle Manager (OLM). -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -ifdef::openshift-dedicated,openshift-rosa[] -// TODO-HCP remove line 92 and 94 ifndef conditions for HCP after networking book is migrated ad put the hcp condition back on line 90 and 95 -ifndef::openshift-rosa-hcp[] -* If a xref:../../../networking/configuring-cluster-wide-proxy.adoc#configuring-a-cluster-wide-proxy[cluster-wide egress proxy is configured], administrators with the `dedicated-admin` role can xref:../../../operators/admin/olm-configuring-proxy-support.adoc#olm-configuring-proxy-support[override the proxy settings or inject a custom CA certificate] for specific Operators running on Operator Lifecycle Manager (OLM). -endif::openshift-rosa-hcp[] -endif::openshift-dedicated,openshift-rosa[] - diff --git a/operators/operator_sdk/helm/osdk-helm-updating-projects.adoc b/operators/operator_sdk/helm/osdk-helm-updating-projects.adoc deleted file mode 100644 index ebe3067e47..0000000000 --- a/operators/operator_sdk/helm/osdk-helm-updating-projects.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osdk-helm-updating-projects"] -= Updating Helm-based projects for newer Operator SDK versions -include::_attributes/common-attributes.adoc[] -:context: osdk-helm-updating-projects - -toc::[] - -{product-title} {product-version} supports Operator SDK {osdk_ver}. If you already have the {osdk_ver_n1} CLI installed on your workstation, you can update the CLI to {osdk_ver} by xref:../../../operators/operator_sdk/osdk-installing-cli.adoc#osdk-installing-cli[installing the latest version]. - -include::snippets/osdk-deprecation.adoc[] - -However, to ensure your existing Operator projects maintain compatibility with Operator SDK {osdk_ver}, update steps are required for the associated breaking changes introduced since {osdk_ver_n1}. You must perform the update steps manually in any of your Operator projects that were previously created or maintained with {osdk_ver_n1}. - -include::modules/osdk-updating-1361-to-138.adoc[leveloffset=+1] - -[id="additional-resources_osdk-helm-upgrading-projects"] -[role="_additional-resources"] -== Additional resources - -* link:https://docs.redhat.com/en/documentation/openshift_container_platform/4.17/html-single/operators/index#osdk-upgrading-projects_osdk-helm-updating-projects[Updating Helm-based Operator projects for Operator SDK 1.36.1] ({product-title} 4.17) -* xref:../../../operators/operator_sdk/osdk-pkgman-to-bundle.adoc#osdk-pkgman-to-bundle[Migrating package manifest projects to bundle format] \ No newline at end of file diff --git a/operators/operator_sdk/helm/snippets b/operators/operator_sdk/helm/snippets deleted file mode 120000 index ce62fd7c41..0000000000 --- a/operators/operator_sdk/helm/snippets +++ /dev/null @@ -1 +0,0 @@ -../../../snippets/ \ No newline at end of file diff --git a/operators/operator_sdk/osdk-about.adoc b/operators/operator_sdk/osdk-about.adoc deleted file mode 100644 index 65e45c5849..0000000000 --- a/operators/operator_sdk/osdk-about.adoc +++ /dev/null @@ -1,64 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osdk-about"] -= About the Operator SDK -include::_attributes/common-attributes.adoc[] -:context: osdk-about - -toc::[] - -The link:https://operatorframework.io/[Operator Framework] is an open source toolkit to manage Kubernetes native applications, called _Operators_, in an effective, automated, and scalable way. Operators take advantage of Kubernetes extensibility to deliver the automation advantages of cloud services, like provisioning, scaling, and backup and restore, while being able to run anywhere that Kubernetes can run. - -Operators make it easy to manage complex, stateful applications on top of Kubernetes. However, writing an Operator today can be difficult because of challenges such as using low-level APIs, writing boilerplate, and a lack of modularity, which leads to duplication. - -The Operator SDK, a component of the Operator Framework, provides a command-line interface (CLI) tool that Operator developers can use to build, test, and deploy an Operator. - -include::snippets/osdk-deprecation.adoc[] - -**Why use the Operator SDK?** - -The Operator SDK simplifies this process of building Kubernetes-native applications, which can require deep, application-specific operational knowledge. The Operator SDK not only lowers that barrier, but it also helps reduce the amount of boilerplate code required for many common management capabilities, such as metering or monitoring. - -The Operator SDK is a framework that uses the link:https://github.com/kubernetes-sigs/controller-runtime[controller-runtime] library to make writing Operators easier by providing the following features: - -- High-level APIs and abstractions to write the operational logic more intuitively -- Tools for scaffolding and code generation to quickly bootstrap a new project -- Integration with Operator Lifecycle Manager (OLM) to streamline packaging, installing, and running Operators on a cluster -- Extensions to cover common Operator use cases -- Metrics set up automatically in any generated Go-based Operator for use on clusters where the Prometheus Operator is deployed - -ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -Operator authors with cluster administrator access to a Kubernetes-based cluster (such as {product-title}) -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -ifdef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -Operator authors with dedicated-admin access to {product-title} -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -can use the Operator SDK CLI to develop their own Operators based on Go, Ansible, Java, or Helm. link:https://kubebuilder.io/[Kubebuilder] is embedded into the Operator SDK as the scaffolding solution for Go-based Operators, which means existing Kubebuilder projects can be used as is with the Operator SDK and continue to work. - -[NOTE] -==== -{product-title} {product-version} supports Operator SDK {osdk_ver}. -==== - -[id="osdk-about-what-are-operators"] -== What are Operators? - -For an overview about basic Operator concepts and terminology, see xref:../../operators/understanding/olm-what-operators-are.adoc#olm-what-operators-are[Understanding Operators]. - -include::modules/osdk-workflow.adoc[leveloffset=+1] - -[id="osdk-about-addtl-resources"] -[role="_additional-resources"] -== Additional resources - -- link:https://redhat-connect.gitbook.io/certified-operator-guide/[Certified Operator Build Guide] - -ifdef::openshift-origin[] -[id="osdk-about-getting-involved"] -== Getting involved - -This guide provides an effective demonstration of the value of the Operator Framework for building and managing Operators, but it is limited in scope. The Operator Framework and its components are open source, so visit each project individually and learn what else you can do: - -link:https://github.com/operator-framework[*github.com/operator-framework*] - -If you want to discuss your experience, have questions, or want to get involved, join the link:https://groups.google.com/forum/#!forum/operator-framework[Operator Framework mailing list]. -endif::[] diff --git a/operators/operator_sdk/osdk-bundle-validate.adoc b/operators/operator_sdk/osdk-bundle-validate.adoc deleted file mode 100644 index 16773a53b3..0000000000 --- a/operators/operator_sdk/osdk-bundle-validate.adoc +++ /dev/null @@ -1,30 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osdk-bundle-validate"] -= Validating Operator bundles -include::_attributes/common-attributes.adoc[] -:context: osdk-bundle-validate - -toc::[] - -As an Operator author, you can run the `bundle validate` command in the Operator SDK to validate the content and format of an Operator bundle. You can run the command on a remote Operator bundle image or a local Operator bundle directory. - -include::snippets/osdk-deprecation.adoc[] - -include::modules/osdk-bundle-validate-about.adoc[leveloffset=+1] -include::modules/osdk-bundle-validate-tests.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../operators/understanding/olm-packaging-format.adoc#olm-bundle-format_olm-packaging-format[Bundle format] - -include::modules/osdk-bundle-validate-run.adoc[leveloffset=+1] - -ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -include::modules/osdk-multi-arch-validate.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../operators/operator_sdk/osdk-multi-arch-support.adoc#osdk-multi-platform-support[Configuring Operator projects for multi-platform support] -endif::[] diff --git a/operators/operator_sdk/osdk-cli-ref.adoc b/operators/operator_sdk/osdk-cli-ref.adoc deleted file mode 100644 index 33db59edca..0000000000 --- a/operators/operator_sdk/osdk-cli-ref.adoc +++ /dev/null @@ -1,70 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osdk-cli-ref"] -= Operator SDK CLI reference -include::_attributes/common-attributes.adoc[] -:context: osdk-cli-ref - -toc::[] - -The Operator SDK command-line interface (CLI) is a development kit designed to make writing Operators easier. - -include::snippets/osdk-deprecation.adoc[] - -.Operator SDK CLI syntax -[source,terminal] ----- -$ operator-sdk [] [] [] ----- - -Operator authors with cluster administrator access to a Kubernetes-based cluster (such as {product-title}) can use the Operator SDK CLI to develop their own Operators based on Go, Ansible, or Helm. link:https://kubebuilder.io/[Kubebuilder] is embedded into the Operator SDK as the scaffolding solution for Go-based Operators, which means existing Kubebuilder projects can be used as is with the Operator SDK and continue to work. - -include::modules/osdk-cli-ref-bundle.adoc[leveloffset=+1] -include::modules/osdk-cli-ref-cleanup.adoc[leveloffset=+1] -include::modules/osdk-cli-ref-completion.adoc[leveloffset=+1] -include::modules/osdk-cli-ref-create.adoc[leveloffset=+1] -include::modules/osdk-cli-ref-generate.adoc[leveloffset=+1] -include::modules/osdk-cli-ref-generate-bundle.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../operators/operator_sdk/osdk-working-bundle-images.adoc#osdk-bundle-operator_osdk-working-bundle-images[Bundling an Operator] for a full procedure that includes using the `make bundle` command to call the `generate bundle` subcommand. - -include::modules/osdk-cli-ref-generate-kustomize.adoc[leveloffset=+2] - -include::modules/osdk-cli-ref-init.adoc[leveloffset=+1] -include::modules/osdk-cli-ref-run.adoc[leveloffset=+1] -include::modules/osdk-cli-ref-run-bundle.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../operators/understanding/olm/olm-understanding-operatorgroups.adoc#olm-operatorgroups-membership_olm-understanding-operatorgroups[Operator group membership] for details on possible install modes. -* xref:../../operators/operator_sdk/osdk-complying-with-psa.adoc#osdk-complying-with-psa[Complying with pod security admission] -// TODO-HCP remove line 45 and 47 ifndef conditions for HCP after Authentication book is migrated -ifndef::openshift-rosa-hcp[] -* xref:../../authentication/understanding-and-managing-pod-security-admission.adoc#understanding-and-managing-pod-security-admission[Understanding and managing pod security admission] -endif::openshift-rosa-hcp[] - -include::modules/osdk-cli-ref-run-bundle-upgrade.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../operators/operator_sdk/osdk-complying-with-psa.adoc#osdk-complying-with-psa[Complying with pod security admission] -// TODO-HCP remove line 55 and 57 ifndef conditions for HCP after Authentication book is migrated -ifndef::openshift-rosa-hcp[] -* xref:../../authentication/understanding-and-managing-pod-security-admission.adoc#understanding-and-managing-pod-security-admission[Understanding and managing pod security admission] -endif::openshift-rosa-hcp[] - -include::modules/osdk-cli-ref-scorecard.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../operators/operator_sdk/osdk-scorecard.adoc#osdk-scorecard[Validating Operators using the scorecard tool] for details about running the scorecard tool. -* xref:../../operators/operator_sdk/osdk-complying-with-psa.adoc#osdk-complying-with-psa[Complying with pod security admission] -// TODO-HCP remove line 67 and 69 ifndef conditions for HCP after Authentication book is migrated -ifndef::openshift-rosa-hcp[] -* xref:../../authentication/understanding-and-managing-pod-security-admission.adoc#understanding-and-managing-pod-security-admission[Understanding and managing pod security admission] -endif::openshift-rosa-hcp[] \ No newline at end of file diff --git a/operators/operator_sdk/osdk-complying-with-psa.adoc b/operators/operator_sdk/osdk-complying-with-psa.adoc deleted file mode 100644 index cc448d4ab5..0000000000 --- a/operators/operator_sdk/osdk-complying-with-psa.adoc +++ /dev/null @@ -1,48 +0,0 @@ - -:_mod-docs-content-type: ASSEMBLY -[id="osdk-complying-with-psa"] -= Complying with pod security admission -include::_attributes/common-attributes.adoc[] -:context: osdk-complying-with-psa - -toc::[] - -_Pod security admission_ is an implementation of the link:https://kubernetes.io/docs/concepts/security/pod-security-standards/[Kubernetes pod security standards]. link:https://kubernetes.io/docs/concepts/security/pod-security-admission/[Pod security admission] restricts the behavior of pods. Pods that do not comply with the pod security admission defined globally or at the namespace level are not admitted to the cluster and cannot run. - -If your Operator project does not require escalated permissions to run, you can ensure your workloads run in namespaces set to the `restricted` pod security level. If your Operator project requires escalated permissions to run, you must set the following security context configurations: - -* The allowed pod security admission level for the Operator's namespace -* The allowed security context constraints (SCC) for the workload's service account -// TODO-HCP remove line 17 and 19 ifndef conditions for HCP after authentication book is migrated -ifndef::openshift-rosa-hcp[] -For more information, see xref:../../authentication/understanding-and-managing-pod-security-admission.adoc#understanding-and-managing-pod-security-admission[Understanding and managing pod security admission]. -endif::openshift-rosa-hcp[] - -include::snippets/osdk-deprecation.adoc[] - -// About pod security admission -include::modules/security-context-constraints-psa-about.adoc[leveloffset=+1] - -include::modules/security-context-constraints-psa-synchronization.adoc[leveloffset=+1] - -// Pod security admission synchronization namespace exclusions -include::modules/security-context-constraints-psa-sync-exclusions.adoc[leveloffset=+2] - -include::modules/osdk-ensuring-operator-workloads-run-restricted-psa.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -// TODO-HCP remove line 36 and 38 ifndef conditions for HCP after authentication book is migrated -ifndef::openshift-rosa-hcp[] -* xref:../../authentication/managing-security-context-constraints.adoc#managing-security-context-constraints[Managing security context constraints] -endif::openshift-rosa-hcp[] - -include::modules/osdk-managing-psa-for-operators-with-escalated-permissions.adoc[leveloffset=+1] - -[id="osdk-complying-with-psa-additional-resources"] -[role="_additional-resources"] -== Additional resources -// TODO-HCP remove line 46 and 48 ifndef conditions for HCP after authentication book is migrated -ifndef::openshift-rosa-hcp[] -* xref:../../authentication/understanding-and-managing-pod-security-admission.adoc#understanding-and-managing-pod-security-admission[Understanding and managing pod security admission] -endif::openshift-rosa-hcp[] \ No newline at end of file diff --git a/operators/operator_sdk/osdk-generating-csvs.adoc b/operators/operator_sdk/osdk-generating-csvs.adoc deleted file mode 100644 index a4020e9de8..0000000000 --- a/operators/operator_sdk/osdk-generating-csvs.adoc +++ /dev/null @@ -1,99 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osdk-generating-csvs"] -= Defining cluster service versions (CSVs) -include::_attributes/common-attributes.adoc[] -:context: osdk-generating-csvs - -toc::[] - -A _cluster service version_ (CSV), defined by a `ClusterServiceVersion` object, is a YAML manifest created from Operator metadata that assists Operator Lifecycle Manager (OLM) in running the Operator in a cluster. It is the metadata that accompanies an Operator container image, used to populate user interfaces with information such as its logo, description, and version. It is also a source of technical information that is required to run the Operator, like the RBAC rules it requires and which custom resources (CRs) it manages or depends on. - -The Operator SDK includes the CSV generator to generate a CSV for the current Operator project, customized using information contained in YAML manifests and Operator source files. - -include::snippets/osdk-deprecation.adoc[] - -A CSV-generating command removes the responsibility of Operator authors having in-depth OLM knowledge in order for their Operator to interact with OLM or publish metadata to the Catalog Registry. Further, because the CSV spec will likely change over time as new Kubernetes and OLM features are implemented, the Operator SDK is equipped to easily extend its update system to handle new CSV features going forward. - -include::modules/osdk-how-csv-gen-works.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../operators/operator_sdk/osdk-working-bundle-images.adoc#osdk-bundle-operator_osdk-working-bundle-images[Bundling an Operator] for a full procedure that includes generating a bundle and CSV. - -include::modules/osdk-csv-bundle-files.adoc[leveloffset=+2] -include::modules/osdk-csv-ver.adoc[leveloffset=+2] - - -include::modules/osdk-manually-defined-csv-fields.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../operators/understanding/olm-what-operators-are.adoc#olm-maturity-model_olm-what-operators-are[Operator maturity model] - -include::modules/osdk-csv-manual-annotations.adoc[leveloffset=+1] -include::modules/osdk-csv-annotations-infra.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../../operators/operator_sdk/osdk-generating-csvs.adoc#olm-enabling-operator-for-restricted-network_osdk-generating-csvs[Enabling your Operator for restricted network environments] (disconnected mode) -ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -* xref:../../installing/overview/installing-fips.adoc#installing-fips[Support for FIPS cryptography] -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] - -include::modules/osdk-csv-annotations-dep.adoc[leveloffset=+2] -include::modules/osdk-csv-annotations-other.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../../operators/operator_sdk/osdk-generating-csvs.adoc#osdk-crds-templates_osdk-generating-csvs[CRD templates] -* xref:../../operators/operator_sdk/osdk-generating-csvs.adoc#osdk-init-resource_osdk-generating-csvs[Initializing required custom resources] -* xref:../../operators/operator_sdk/osdk-generating-csvs.adoc#osdk-suggested-namespace_osdk-generating-csvs[Setting a suggested namespace] -* xref:../../operators/operator_sdk/osdk-generating-csvs.adoc#osdk-suggested-namespace-default-node_osdk-generating-csvs[Setting a suggested namespace with default node selector] -* xref:../../operators/operator_sdk/osdk-generating-csvs.adoc#osdk-hiding-internal-objects_osdk-generating-csvs[Hiding internal objects] - -include::modules/olm-enabling-operator-restricted-network.adoc[leveloffset=+1] -include::modules/olm-enabling-operator-for-multi-arch.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See the link:https://docs.docker.com/registry/spec/manifest-v2-2/#manifest-list[Image Manifest V 2, Schema 2] specification for more information on manifest lists. - -include::modules/olm-arch-os-support.adoc[leveloffset=+2] - -include::modules/osdk-suggested-namespace.adoc[leveloffset=+1] -include::modules/osdk-suggested-namespace-node-selector.adoc[leveloffset=+1] -include::modules/osdk-operatorconditions.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../operators/understanding/olm/olm-operatorconditions.adoc#olm-operatorconditions[Operator conditions] - -include::modules/olm-defining-csv-webhooks.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -// This xref points to a topic that is not currently included in the OSD and ROSA docs. -ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -* xref:../../architecture/admission-plug-ins.adoc#admission-webhook-types_admission-plug-ins[Types of webhook admission plugins] -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -* Kubernetes documentation: -** link:https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#validatingadmissionwebhook[Validating admission webhooks] -** link:https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#mutatingadmissionwebhook[Mutating admission webhooks] -** link:https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#webhook-conversion[Conversion webhooks] - -include::modules/olm-webhook-considerations.adoc[leveloffset=+2] - -include::modules/osdk-crds.adoc[leveloffset=+1] -include::modules/osdk-owned-crds.adoc[leveloffset=+2] -include::modules/osdk-required-crds.adoc[leveloffset=+2] -include::modules/olm-dependency-resolution-crd-upgrades.adoc[leveloffset=+2] -include::modules/olm-adding-new-crd-version.adoc[leveloffset=+3] -include::modules/olm-removing-crd-version.adoc[leveloffset=+3] -include::modules/osdk-crd-templates.adoc[leveloffset=+2] -include::modules/osdk-hiding-internal-objects.adoc[leveloffset=+2] -include::modules/osdk-init-resource.adoc[leveloffset=+2] - -include::modules/osdk-apiservices.adoc[leveloffset=+1] diff --git a/operators/operator_sdk/osdk-ha-sno.adoc b/operators/operator_sdk/osdk-ha-sno.adoc deleted file mode 100644 index 325cae8cab..0000000000 --- a/operators/operator_sdk/osdk-ha-sno.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osdk-ha-sno"] -= High-availability or single-node cluster detection and support -include::_attributes/common-attributes.adoc[] -:context: osdk-ha-sno - -toc::[] - -// OSD/ROSA don't support single-node clusters, but these Operator authors still need to know how to handle this configuration for their Operators to work correctly in OCP. -ifdef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -To ensure that your Operator runs well on both high-availability (HA) and non-HA modes in OpenShift Container Platform clusters, you can use the Operator SDK to detect the cluster's infrastructure topology and set the resource requirements to fit the cluster's topology. -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] - -// Not using {product-title} here, because HA mode and non-HA mode are specific to OCP and should be spelled out this way in other distros. -An OpenShift Container Platform cluster can be configured in high-availability (HA) mode, which uses multiple nodes, or in non-HA mode, which uses a single node. A single-node cluster, also known as {sno}, is likely to have more conservative resource constraints. Therefore, it is important that Operators installed on a single-node cluster can adjust accordingly and still run well. - -By accessing the cluster high-availability mode API provided in {product-title}, Operator authors can use the Operator SDK to enable their Operator to detect a cluster's infrastructure topology, either HA or non-HA mode. Custom Operator logic can be developed that uses the detected cluster topology to automatically switch the resource requirements, both for the Operator and for any Operands or workloads it manages, to a profile that best fits the topology. - -include::snippets/osdk-deprecation.adoc[] - -include::modules/osdk-ha-sno-api.adoc[leveloffset=+1] -include::modules/osdk-ha-sno-api-examples.adoc[leveloffset=+1] diff --git a/operators/operator_sdk/osdk-installing-cli.adoc b/operators/operator_sdk/osdk-installing-cli.adoc deleted file mode 100644 index 857d24cdc4..0000000000 --- a/operators/operator_sdk/osdk-installing-cli.adoc +++ /dev/null @@ -1,28 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osdk-installing-cli"] -= Installing the Operator SDK CLI -include::_attributes/common-attributes.adoc[] -:context: osdk-installing-cli - -toc::[] - -The Operator SDK provides a command-line interface (CLI) tool that Operator developers can use to build, test, and deploy an Operator. You can install the Operator SDK CLI on your workstation so that you are prepared to start authoring your own Operators. - -include::snippets/osdk-deprecation.adoc[] - -ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -Operator authors with cluster administrator access to a Kubernetes-based cluster, such as {product-title}, -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -ifdef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -Operator authors with dedicated-admin access to {product-title} -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -can use the Operator SDK CLI to develop their own Operators based on Go, Ansible, Java, or Helm. link:https://kubebuilder.io/[Kubebuilder] is embedded into the Operator SDK as the scaffolding solution for Go-based Operators, which means existing Kubebuilder projects can be used as is with the Operator SDK and continue to work. - -[NOTE] -==== -{product-title} {product-version} supports Operator SDK {osdk_ver}. -==== - -include::modules/osdk-installing-cli-linux-macos.adoc[leveloffset=+1] - -include::modules/osdk-installing-cli-macos.adoc[leveloffset=+1] diff --git a/operators/operator_sdk/osdk-leader-election.adoc b/operators/operator_sdk/osdk-leader-election.adoc deleted file mode 100644 index 9cd2d5c0e4..0000000000 --- a/operators/operator_sdk/osdk-leader-election.adoc +++ /dev/null @@ -1,21 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osdk-leader-election"] -= Configuring leader election -include::_attributes/common-attributes.adoc[] -:context: osdk-leader-election - -toc::[] - -During the lifecycle of an Operator, it is possible that there may be more than one instance running at any given time, for example when rolling out an upgrade for the Operator. In such a scenario, it is necessary to avoid contention between multiple Operator instances using leader election. This ensures only one leader instance handles the reconciliation while the other instances are inactive but ready to take over when the leader steps down. - -There are two different leader election implementations to choose from, each with its own tradeoff: - -Leader-for-life:: The leader pod only gives up leadership, using garbage collection, when it is deleted. This implementation precludes the possibility of two instances mistakenly running as leaders, a state also known as split brain. However, this method can be subject to a delay in electing a new leader. For example, when the leader pod is on an unresponsive or partitioned node, you can specify `node.kubernetes.io/unreachable` and `node.kubernetes.io/not-ready` tolerations on the leader pod and use the `tolerationSeconds` value to dictate how long it takes for the leader pod to be deleted from the node and step down. These tolerations are added to the pod by default on admission with a `tolerationSeconds` value of 5 minutes. See the link:https://godoc.org/github.com/operator-framework/operator-sdk/pkg/leader[Leader-for-life] Go documentation for more. - -Leader-with-lease:: The leader pod periodically renews the leader lease and gives up leadership when it cannot renew the lease. This implementation allows for a faster transition to a new leader when the existing leader is isolated, but there is a possibility of split brain in link:https://github.com/kubernetes/client-go/blob/30b06a83d67458700a5378239df6b96948cb9160/tools/leaderelection/leaderelection.go#L21-L24[certain situations]. See the link:https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/leaderelection[Leader-with-lease] Go documentation for more. - -By default, the Operator SDK enables the Leader-for-life implementation. Consult the related Go documentation for both approaches to consider the trade-offs that make sense for your use case. - -include::snippets/osdk-deprecation.adoc[] - -include::modules/osdk-leader-election-types.adoc[leveloffset=+1] diff --git a/operators/operator_sdk/osdk-migrating-to-v0-1-0.adoc b/operators/operator_sdk/osdk-migrating-to-v0-1-0.adoc deleted file mode 100644 index fe1a1fddba..0000000000 --- a/operators/operator_sdk/osdk-migrating-to-v0-1-0.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osdk-migrating-to-v0-1-0"] -= Migrating to Operator SDK v0.1.0 -include::_attributes/common-attributes.adoc[] -:context: osdk-migrating-to-v0-1-0 - -toc::[] - -This guide describes how to migrate an Operator project built using Operator SDK v0.0.x to the project structure required by link:https://github.com/operator-framework/operator-sdk/releases[Operator SDK v0.1.0]. - -include::snippets/osdk-deprecation.adoc[] - -The recommended method for migrating your project is to: - -. Initialize a new v0.1.0 project. -. Copy your code into the new project. -. Modify the new project as described for v0.1.0. - -This guide uses the `memcached-operator`, the example project from xref:../../operators/operator_sdk/osdk-about.adoc#osdk-about[the Operator SDK], to illustrate the migration steps. See the link:https://github.com/operator-framework/operator-sdk-samples/tree/aa15bd278eec0959595e0a0a7282a26055d7f9d6/memcached-operator[v0.0.7 memcached-operator] and link:https://github.com/operator-framework/operator-sdk-samples/tree/4c6934448684a6953ece4d3d9f3f77494b1c125e/memcached-operator[v0.1.0 memcached-operator] project structures for pre- and post-migration examples, respectively. - -include::modules/creating-new-osdk-v0-1-0-project.adoc[leveloffset=+1] -include::modules/migrating-custom-types-pkg-apis.adoc[leveloffset=+1] -include::modules/migrating-reconcile-code.adoc[leveloffset=+1] diff --git a/operators/operator_sdk/osdk-monitoring-prometheus.adoc b/operators/operator_sdk/osdk-monitoring-prometheus.adoc deleted file mode 100644 index b1b292ca61..0000000000 --- a/operators/operator_sdk/osdk-monitoring-prometheus.adoc +++ /dev/null @@ -1,43 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osdk-monitoring-prometheus"] -= Configuring built-in monitoring with Prometheus -include::_attributes/common-attributes.adoc[] -:context: osdk-monitoring-prometheus - -toc::[] - -// Dedicated-admins in OSD and ROSA don't have the permissions to complete the procedures in this assembly. Also, the procedures use the default Prometheus Operator in the openshift-monitoring project, which OSD/ROSA customers should not use. - -ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -This guide describes the built-in monitoring support provided by the Operator SDK using the Prometheus Operator and details usage for authors of Go-based and Ansible-based Operators. - -include::snippets/osdk-deprecation.adoc[] - -include::modules/osdk-monitoring-prometheus-operator-support.adoc[leveloffset=+1] -include::modules/osdk-monitoring-custom-metrics.adoc[leveloffset=+1] -include::modules/osdk-ansible-metrics.adoc[leveloffset=+1] -endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] - -ifdef::openshift-dedicated,openshift-rosa[] -// Since OSD/ROSA dedicated-admins can't do the procedures in this assembly, point to the OCP docs. -The Operator SDK provides built-in monitoring support using the Prometheus Operator, which you can use to expose custom metrics for your Operator. - -include::snippets/osdk-deprecation.adoc[] - -[WARNING] -==== -By default, {product-title} provides a Prometheus Operator in the `openshift-user-workload-monitoring` project. You should use this Prometheus instance to monitor user workloads in {product-title}. - -Do not use the Prometheus Operator in the `openshift-monitoring` project. Red Hat Site Reliability Engineers (SRE) use this Prometheus instance to monitor core cluster components. -==== - -[role="_additional-resources"] -.Additional resources -* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.13/html-single/operators/index#osdk-monitoring-custom-metrics_osdk-monitoring-prometheus[Exposing custom metrics for Go-based Operators] (OpenShift Container Platform documentation) -* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.13/html-single/operators/index#osdk-ansible-metrics_osdk-monitoring-prometheus[Exposing custom metrics for Ansible-based Operators] (OpenShift Container Platform documentation) -// TODO-HCP remove line 39 and 41 ifndef conditions for HCP after Observability book is migrated and add back HCP condition to line 41 and 21 -ifndef::openshift-rosa-hcp[] -* xref:../../observability/monitoring/monitoring-overview.adoc#understanding-the-monitoring-stack_monitoring-overview[Understanding the monitoring stack] in {product-title} -endif::openshift-rosa-hcp[] -endif::openshift-dedicated,openshift-rosa[] - diff --git a/operators/operator_sdk/osdk-multi-arch-support.adoc b/operators/operator_sdk/osdk-multi-arch-support.adoc deleted file mode 100644 index 74d1baad7e..0000000000 --- a/operators/operator_sdk/osdk-multi-arch-support.adoc +++ /dev/null @@ -1,48 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osdk-multi-platform-support"] -= Configuring Operator projects for multi-platform support -include::_attributes/common-attributes.adoc[] -:context: osdk-multi-arch - -toc::[] - -Operator projects that support multiple architectures and operating systems, or _platforms_, can run on more Kubernetes and {product-title} clusters than Operator projects that support only a single platform. Example architectures include `amd64`, `arm64`, `ppc64le`, and `s390x`. Example operating systems include Linux and Windows. - -Perform the following actions to ensure your Operator project can run on multiple {product-title} platforms: - -* Build a manifest list that specifies the platforms that your Operator supports. -* Set your Operator's node affinity to support multi-architecture compute machines. - -include::snippets/osdk-deprecation.adoc[] - -include::modules/osdk-multi-arch-building-images.adoc[leveloffset=+1] -include::modules/osdk-multi-arch-node-affinity.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../nodes/scheduling/nodes-scheduler-node-affinity.adoc#nodes-scheduler-node-affinity[Controlling pod placement on nodes using node affinity rules] -* xref:../../nodes/scheduling/nodes-scheduler-node-affinity.adoc#olm-overriding-operator-pod-affinity_nodes-scheduler-node-affinity[Using node affinity to control where an Operator is installed] -* xref:../../post_installation_configuration/configuring-multi-arch-compute-machines/multi-architecture-configuration.adoc#post-install-multi-architecture-configuration[About clusters with multi-architecture compute machines] - -include::modules/osdk-multi-arch-node-reqs.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../nodes/scheduling/nodes-scheduler-node-affinity.adoc#nodes-scheduler-node-affinity-configuring-required_nodes-scheduler-node-affinity[Configuring a required node affinity rule] -* xref:../../nodes/scheduling/nodes-scheduler-node-affinity.adoc#nodes-scheduler-node-affinity-example_nodes-scheduler-node-affinity[Sample node affinity rules] - -include::modules/osdk-multi-arch-node-preference.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../nodes/scheduling/nodes-scheduler-node-affinity.adoc#nodes-scheduler-node-affinity-configuring-preferred_nodes-scheduler-node-affinity[Configuring a preferred node affinity rule] - -[id="next-steps_osdk-multi-arch-support"] -== Next steps - -* xref:../../operators/operator_sdk/osdk-generating-csvs.adoc#olm-enabling-operator-for-multi-arch_osdk-generating-csvs[Label the platforms your Operator supports for Operator Lifecycle Manager (OLM)] -* Bundle your Operator and Deploy with OLM -** xref:../../operators/operator_sdk/golang/osdk-golang-tutorial.adoc#osdk-bundle-deploy-olm_osdk-golang-tutorial[Go-based Operator projects] -** xref:../../operators/operator_sdk/ansible/osdk-ansible-tutorial.adoc#osdk-bundle-deploy-olm_osdk-ansible-tutorial[Ansible-based Operator projects] -** xref:../../operators/operator_sdk/helm/osdk-helm-tutorial.html#osdk-bundle-deploy-olm_osdk-helm-tutorial[Helm-based Operator projects] -* xref:../../operators/operator_sdk/osdk-bundle-validate.html#osdk-multi-arch-validate_osdk-bundle-validate[Validate your Operator's multi-platform readiness] diff --git a/operators/operator_sdk/osdk-pkgman-to-bundle.adoc b/operators/operator_sdk/osdk-pkgman-to-bundle.adoc deleted file mode 100644 index bdf25f2c2f..0000000000 --- a/operators/operator_sdk/osdk-pkgman-to-bundle.adoc +++ /dev/null @@ -1,20 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osdk-pkgman-to-bundle"] -= Migrating package manifest projects to bundle format -include::_attributes/common-attributes.adoc[] -:context: osdk-pkgman-to-bundle - -toc::[] - -Support for the legacy _package manifest format_ for Operators is removed in {product-title} 4.8 and later. If you have an Operator project that was initially created using the package manifest format, you can use the Operator SDK to migrate the project to the bundle format. The bundle format is the preferred packaging format for Operator Lifecycle Manager (OLM) starting in {product-title} 4.6. -//Consider updating this during the 4.10 to 4.11 version scrub. - -include::snippets/osdk-deprecation.adoc[] - -include::modules/osdk-about-pkg-format-migration.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../operators/understanding/olm-packaging-format.adoc#olm-packaging-format[Operator Framework packaging format] - -include::modules/osdk-migrating-pkgman.adoc[leveloffset=+1] diff --git a/operators/operator_sdk/osdk-pruning-utility.adoc b/operators/operator_sdk/osdk-pruning-utility.adoc deleted file mode 100644 index 430f33f940..0000000000 --- a/operators/operator_sdk/osdk-pruning-utility.adoc +++ /dev/null @@ -1,14 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osdk-pruning-utility"] -= Object pruning utility for Go-based Operators -include::_attributes/common-attributes.adoc[] -:context: osdk-pruning-utility - -toc::[] - -The `operator-lib` pruning utility lets Go-based Operators clean up, or prune, objects when they are no longer needed. Operator authors can also use the utility to create custom hooks and strategies. - -include::snippets/osdk-deprecation.adoc[] - -include::modules/osdk-pruning-utility-about.adoc[leveloffset=+1] -include::modules/osdk-pruning-utility-config.adoc[leveloffset=+1] diff --git a/operators/operator_sdk/osdk-scorecard.adoc b/operators/operator_sdk/osdk-scorecard.adoc deleted file mode 100644 index e9a40e31f8..0000000000 --- a/operators/operator_sdk/osdk-scorecard.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osdk-scorecard"] -= Validating Operators using the scorecard tool -include::_attributes/common-attributes.adoc[] -:context: osdk-scorecard - -toc::[] - -As an Operator author, you can use the scorecard tool in the Operator SDK to do the following tasks: - -* Validate that your Operator project is free of syntax errors and packaged correctly -* Review suggestions about ways you can improve your Operator - -include::snippets/osdk-deprecation.adoc[] - -include::modules/osdk-scorecard-about.adoc[leveloffset=+1] -include::modules/osdk-scorecard-config.adoc[leveloffset=+1] -include::modules/osdk-scorecard-tests.adoc[leveloffset=+1] -include::modules/osdk-scorecard-run.adoc[leveloffset=+1] -include::modules/osdk-scorecard-output.adoc[leveloffset=+1] -include::modules/osdk-scorecard-select-tests.adoc[leveloffset=+1] -include::modules/osdk-scorecard-parallel.adoc[leveloffset=+1] -include::modules/osdk-scorecard-custom-tests.adoc[leveloffset=+1] diff --git a/operators/operator_sdk/osdk-working-bundle-images.adoc b/operators/operator_sdk/osdk-working-bundle-images.adoc deleted file mode 100644 index ae75ca8fe4..0000000000 --- a/operators/operator_sdk/osdk-working-bundle-images.adoc +++ /dev/null @@ -1,52 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osdk-working-bundle-images"] -= Working with bundle images -include::_attributes/common-attributes.adoc[] -:context: osdk-working-bundle-images - -toc::[] - -You can use the Operator SDK to package, deploy, and upgrade Operators in the bundle format for use on Operator Lifecycle Manager (OLM). - -include::snippets/osdk-deprecation.adoc[] - -include::modules/osdk-bundle-operator.adoc[leveloffset=+1] -include::modules/osdk-deploy-olm.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../operators/understanding/olm-packaging-format.adoc#olm-file-based-catalogs_olm-packaging-format[File-based catalogs] in Operator Framework packaging format -* xref:../../operators/admin/olm-managing-custom-catalogs.adoc#olm-managing-custom-catalogs-fb[File-based catalogs] in Managing custom catalogs -* xref:../../operators/understanding/olm-packaging-format.adoc#olm-bundle-format_olm-packaging-format[Bundle format] - -include::modules/osdk-publish-catalog.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../operators/admin/olm-managing-custom-catalogs.adoc#olm-managing-custom-catalogs[Managing custom catalogs] for details on direct usage of the `opm` CLI for more advanced use cases. - -include::modules/osdk-bundle-upgrade-olm.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../operators/admin/olm-adding-operators-to-cluster.adoc#olm-adding-operators-to-a-cluster[Traditional Operator installation with OLM] - -include::modules/osdk-control-compat.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* link:https://redhat-connect.gitbook.io/certified-operator-guide/ocp-deployment/operator-metadata/bundle-directory/managing-openshift-versions[Managing OpenShift Versions] in the _Certified Operator Build Guide_ -* xref:../../operators/admin/olm-upgrading-operators.adoc#olm-upgrading-operators[Updating installed Operators] -* xref:../../operators/understanding/olm-rh-catalogs.adoc#olm-rh-catalogs[Red Hat-provided Operator catalogs] - -[id="osdk-working-bundle-images-additional-resources"] -[role="_additional-resources"] -== Additional resources - -* See xref:../../operators/understanding/olm-packaging-format.adoc#olm-bundle-format_olm-packaging-format[Operator Framework packaging format] for details on the bundle format. -* See xref:../../operators/admin/olm-managing-custom-catalogs.adoc#olm-managing-custom-catalogs[Managing custom catalogs] for details on adding bundle images to index images by using the `opm` command. -* See xref:../../operators/understanding/olm/olm-workflow.adoc#olm-workflow[Operator Lifecycle Manager workflow] for details on how upgrades work for installed Operators. diff --git a/operators/understanding/olm-common-terms.adoc b/operators/understanding/olm-common-terms.adoc index 3fbf8724d8..0a72ebd2bb 100644 --- a/operators/understanding/olm-common-terms.adoc +++ b/operators/understanding/olm-common-terms.adoc @@ -6,6 +6,6 @@ include::_attributes/common-attributes.adoc[] toc::[] -This topic provides a glossary of common terms related to the Operator Framework, including Operator Lifecycle Manager (OLM) and the Operator SDK. +This topic provides a glossary of common terms related to the Operator Framework, including Operator Lifecycle Manager (OLM). include::snippets/of-terms-snippet.adoc[leveloffset=+0] diff --git a/operators/understanding/olm-multitenancy.adoc b/operators/understanding/olm-multitenancy.adoc index 0d9ed89db1..002ff52470 100644 --- a/operators/understanding/olm-multitenancy.adoc +++ b/operators/understanding/olm-multitenancy.adoc @@ -20,7 +20,6 @@ include::modules/olm-default-install-behavior.adoc[leveloffset=+1] .Additional resources * xref:../../operators/admin/olm-adding-operators-to-cluster.adoc#olm-adding-operators-to-a-cluster[Adding Operators to a cluster] * xref:../../operators/understanding/olm/olm-understanding-operatorgroups.adoc#olm-operatorgroups-membership_olm-understanding-operatorgroups[Install modes types] -* xref:../../operators/operator_sdk/osdk-generating-csvs.adoc#osdk-suggested-namespace_osdk-generating-csvs[Setting a suggested namespace] include::modules/olm-multitenancy-solution.adoc[leveloffset=+1] [role="_additional-resources"] @@ -36,4 +35,4 @@ endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] Operator Lifecycle Manager (OLM) handles OLM-managed Operators that are installed in the same namespace, meaning their `Subscription` resources are colocated in the same namespace, as related Operators. Even if they are not actually related, OLM considers their states, such as their version and update policy, when any one of them is updated. -For more information on Operator colocation and using Operator groups effectively, see xref:../../operators/understanding/olm/olm-colocation.adoc#olm-colocation[Operator Lifecycle Manager (OLM) -> Multitenancy and Operator colocation]. \ No newline at end of file +For more information on Operator colocation and using Operator groups effectively, see xref:../../operators/understanding/olm/olm-colocation.adoc#olm-colocation[Operator Lifecycle Manager (OLM) -> Multitenancy and Operator colocation]. diff --git a/operators/understanding/olm-understanding-operatorhub.adoc b/operators/understanding/olm-understanding-operatorhub.adoc index 76b6a36aa1..ffbce482b2 100644 --- a/operators/understanding/olm-understanding-operatorhub.adoc +++ b/operators/understanding/olm-understanding-operatorhub.adoc @@ -14,8 +14,6 @@ include::modules/olm-operatorhub-architecture.adoc[leveloffset=+1] == Additional resources * xref:../../operators/understanding/olm/olm-understanding-olm.adoc#olm-catalogsource_olm-understanding-olm[Catalog source] -* xref:../../operators/operator_sdk/osdk-about.adoc#osdk-about[About the Operator SDK] -* xref:../../operators/operator_sdk/osdk-generating-csvs.adoc#osdk-generating-csvs[Defining cluster service versions (CSVs)] * xref:../../operators/understanding/olm/olm-workflow.adoc#olm-upgrades_olm-workflow[Operator installation and upgrade workflow in OLM] * link:https://connect.redhat.com[Red Hat Partner Connect] * link:https://marketplace.redhat.com[Red Hat Marketplace] diff --git a/operators/understanding/olm/olm-operatorconditions.adoc b/operators/understanding/olm/olm-operatorconditions.adoc index 5a148915c4..975dc4ccf9 100644 --- a/operators/understanding/olm/olm-operatorconditions.adoc +++ b/operators/understanding/olm/olm-operatorconditions.adoc @@ -16,7 +16,6 @@ include::modules/olm-supported-operatorconditions.adoc[leveloffset=+1] == Additional resources * xref:../../../operators/admin/olm-managing-operatorconditions.adoc#olm-operatorconditions[Managing Operator conditions] -* xref:../../../operators/operator_sdk/osdk-generating-csvs.adoc#osdk-operatorconditions_osdk-generating-csvs[Enabling Operator conditions] // The following xrefs point to topics that are not currently included in the OSD/ROSA docs. ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] * xref:../../../nodes/pods/nodes-pods-configuring.adoc#nodes-pods-configuring-pod-distruption-about_nodes-pods-configuring[Using pod disruption budgets to specify the number of pods that must be up] diff --git a/operators/understanding/olm/olm-understanding-dependency-resolution.adoc b/operators/understanding/olm/olm-understanding-dependency-resolution.adoc index ba1643b673..fb7c368cbe 100644 --- a/operators/understanding/olm/olm-understanding-dependency-resolution.adoc +++ b/operators/understanding/olm/olm-understanding-dependency-resolution.adoc @@ -25,12 +25,6 @@ include::modules/olm-dependency-resolution-preferences.adoc[leveloffset=+1] include::modules/olm-dependency-resolution-crd-upgrades.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources - -* xref:../../../operators/operator_sdk/osdk-generating-csvs.adoc#olm-dependency-resolution-adding-new-crd-version_osdk-generating-csvs[Adding a new CRD version] -* xref:../../../operators/operator_sdk/osdk-generating-csvs.adoc#olm-dependency-resolution-removing-crd-version_osdk-generating-csvs[Deprecating or removing a CRD version] - include::modules/olm-dependencies-best-practices.adoc[leveloffset=+1] [role="_additional-resources"] @@ -39,4 +33,4 @@ include::modules/olm-dependencies-best-practices.adoc[leveloffset=+1] * Kubernetes documentation: link:https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api_changes.md#readme[Changing the API] include::modules/olm-dependencies-caveats.adoc[leveloffset=+1] -include::modules/olm-dependency-resolution-examples.adoc[leveloffset=+1] \ No newline at end of file +include::modules/olm-dependency-resolution-examples.adoc[leveloffset=+1] diff --git a/operators/understanding/olm/olm-webhooks.adoc b/operators/understanding/olm/olm-webhooks.adoc index 4c0490b8dc..b4f85fe776 100644 --- a/operators/understanding/olm/olm-webhooks.adoc +++ b/operators/understanding/olm/olm-webhooks.adoc @@ -8,8 +8,6 @@ toc::[] Webhooks allow Operator authors to intercept, modify, and accept or reject resources before they are saved to the object store and handled by the Operator controller. Operator Lifecycle Manager (OLM) can manage the lifecycle of these webhooks when they are shipped alongside your Operator. -See xref:../../../operators/operator_sdk/osdk-generating-csvs.adoc#olm-defining-csv-webhook_osdk-generating-csvs[Defining cluster service versions (CSVs)] for details on how an Operator developer can define webhooks for their Operator, as well as considerations when running on OLM. - [id="olm-webhooks-additional-resources"] [role="_additional-resources"] == Additional resources diff --git a/rosa_architecture/index.adoc b/rosa_architecture/index.adoc index cb2a8207fd..c3dbeddefe 100644 --- a/rosa_architecture/index.adoc +++ b/rosa_architecture/index.adoc @@ -189,9 +189,6 @@ xref:../applications/working_with_helm_charts/understanding-helm.adoc#understand - **xref:../operators/understanding/olm-what-operators-are.adoc#olm-what-operators-are[Understand Operators]**: Operators are the preferred method for creating on-cluster applications for {product-title} {product-version}. Learn about the Operator Framework and how to deploy applications by using installed Operators into your projects. -- **xref:../operators/operator_sdk/osdk-about.adoc#osdk-about[Develop Operators]**: Operators are the preferred method for creating on-cluster applications for {product-title} {product-version}. Learn the workflow for building, testing, and deploying Operators. You can then create your own Operators based on xref:../operators/operator_sdk/ansible/osdk-ansible-support.adoc#osdk-ansible-support[Ansible] or -xref:../operators/operator_sdk/helm/osdk-helm-support.adoc#osdk-helm-support[Helm], or configure xref:../operators/operator_sdk/osdk-monitoring-prometheus.adoc#osdk-monitoring-prometheus[built-in Prometheus monitoring] by using the Operator SDK. - - **Reference the xref:../rest_api/overview/index.adoc#api-index[REST API index]**: Learn about {product-title} application programming interface endpoints. // Need to provide a link closer to 4.15 GA diff --git a/snippets/osdk-deprecation.adoc b/snippets/osdk-deprecation.adoc deleted file mode 100644 index bdd1c0f2e7..0000000000 --- a/snippets/osdk-deprecation.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Text snippet included in the following assemblies: -// * cli_reference/osdk/cli-osdk-install.adoc -// * cli_reference/osdk/cli-osdk-ref.adoc -// * operators/operator_sdk/ansible/osdk-ansible-cr-status.adoc -// * operators/operator_sdk/ansible/osdk-ansible-inside-operator.adoc -// * operators/operator_sdk/ansible/osdk-ansible-k8s-collection.adoc -// * operators/operator_sdk/ansible/osdk-ansible-project-layout.adoc -// * opearotors/operator_sdk/ansible/osdk-ansible-quickstart.adoc -// * operators/operator_sdk/ansible/osdk-ansible-support.adoc -// * operators/operator_sdk/ansible/osdk-ansible-tutorial.adoc -// * operators/ansible/osdk-ansible-updating-projects.adoc -// * operator/operator_sdk/golang/osdk-golang-project-layout.adoc -// * operators/operator_sdk/golang/osdk-golang-quickstart.adoc -// * operators/operator_sdk/golang/osdk-golang-tutorial.adoc -// * operators/operator_sdk/golang/osdk-golang-updating-projects.adoc -// * operators/operator_sdk/helm/osdk-helm-project-layout.adoc -// * operators/operator_sdk/helm/osdk-helm-quickstart.adoc -// * operators/operator_sdk/helm/osdk-helm-support.adoc -// * operators/operator_sdk/helm/osdk-helm-tutorial.adoc -// * operators/operator_sdk/helm/osdk-helm-updating-projects.adoc -// * operators/operator_sdk/osdk-about.adoc -// * operators/operator_sdk/osdk-bundle-validate.adoc -// * operators/operator_sdk/osdk-cli-ref.adoc -// * operators/operator_sdk/osdk-complying-with-psa.adoc -// * operators/operator_sdk/osdk-generating-csvs.adoc -// * operators/operator_sdk/osdk-ha-sno.adoc -// * operators/operator_sdk/osdk-installing-cli.adoc -// * operators/operator_sdk/osdk-leader-election.adoc -// * operators/operator_sdk/osdk-migrating-to-v0-1-0.adoc -// * operators/operator_sdk/osdk-monitoring-prometheus.adoc -// * operators/operator_sdk/osdk-multi-arch-support.adoc -// * operators/operator_sdk/osdk-pkgman-to-bundle.adoc -// * operators/operator_sdk/osdk-pruning-utility.adoc -// * operators/operator_sdk/osdk-scorecard.adoc -// * operators/operator_sdk/osdk-working-bundle-images.adoc - -[IMPORTANT] -==== -[subs="attributes+"] -The Red{nbsp}Hat-supported version of the Operator SDK CLI tool, including the related scaffolding and testing tools for Operator projects, is deprecated and is planned to be removed in a future release of {product-title}. Red{nbsp}Hat will provide bug fixes and support for this feature during the current release lifecycle, but this feature will no longer receive enhancements and will be removed from future {product-title} releases. - -The Red{nbsp}Hat-supported version of the Operator SDK is not recommended for creating new Operator projects. Operator authors with existing Operator projects can use the version of the Operator SDK CLI tool released with {product-title} {product-version} to maintain their projects and create Operator releases targeting newer versions of {product-title}. - -The following related base images for Operator projects are _not_ deprecated. The runtime functionality and configuration APIs for these base images are still supported for bug fixes and for addressing CVEs. - -* The base image for Ansible-based Operator projects -* The base image for Helm-based Operator projects - -ifndef::openshift-rosa,openshift-dedicated[] -For the most recent list of major functionality that has been deprecated or removed within {product-title}, refer to the _Deprecated and removed features_ section of the {product-title} release notes. -endif::openshift-rosa,openshift-dedicated[] - -For information about the unsupported, community-maintained, version of the Operator SDK, see link:https://sdk.operatorframework.io[Operator SDK (Operator Framework)]. -====