From 2d78e7cb44ac34293d499f2f411d8a0ce84d5d76 Mon Sep 17 00:00:00 2001 From: Russell Teague Date: Fri, 22 Mar 2019 09:58:01 -0400 Subject: [PATCH] Docs update for 4.1 --- BUILD.md | 19 - CONTRIBUTING.md | 28 - DEPLOYMENT_TYPES.md | 16 - HOOKS.md | 43 +- README.md | 175 +-- docs/openshift_components.md | 152 --- docs/proposals/README.md | 27 - docs/proposals/crt_management_proposal.md | 113 -- docs/proposals/playbook_consolidation.md | 178 --- docs/proposals/proposal_template.md | 30 - docs/proposals/role_decomposition.md | 353 ------ docs/repo_structure.md | 14 - examples/README.md | 96 -- examples/certificate-check-upload.yaml | 53 - examples/certificate-check-volume.yaml | 60 - examples/scheduled-certcheck-upload.yaml | 50 - examples/scheduled-certcheck-volume.yaml | 55 - hack/hooks/README.md | 37 - hack/hooks/verify_generated_modules/README.md | 19 - .../hooks/verify_generated_modules/pre-commit | 55 - inventory/.gitignore | 1 - inventory/40_basic_inventory.ini | 24 - inventory/hosts.example | 1097 +---------------- inventory/hosts.glusterfs.external.example | 61 - inventory/hosts.glusterfs.mixed.example | 64 - inventory/hosts.glusterfs.native.example | 51 - .../hosts.glusterfs.registry-only.example | 57 - ...sts.glusterfs.storage-and-registry.example | 68 - inventory/hosts.grafana.example | 17 - inventory/hosts.localhost | 27 - inventory/hosts.openstack | 37 - inventory/install-config-example.yml | 30 - meta/main.yml | 2 - playbooks/README.md | 16 - 34 files changed, 45 insertions(+), 3080 deletions(-) delete mode 100644 DEPLOYMENT_TYPES.md delete mode 100644 docs/openshift_components.md delete mode 100644 docs/proposals/README.md delete mode 100644 docs/proposals/crt_management_proposal.md delete mode 100644 docs/proposals/playbook_consolidation.md delete mode 100644 docs/proposals/proposal_template.md delete mode 100644 docs/proposals/role_decomposition.md delete mode 100644 examples/README.md delete mode 100644 examples/certificate-check-upload.yaml delete mode 100644 examples/certificate-check-volume.yaml delete mode 100644 examples/scheduled-certcheck-upload.yaml delete mode 100644 examples/scheduled-certcheck-volume.yaml delete mode 100644 hack/hooks/README.md delete mode 100644 hack/hooks/verify_generated_modules/README.md delete mode 100755 hack/hooks/verify_generated_modules/pre-commit delete mode 100644 inventory/40_basic_inventory.ini delete mode 100644 inventory/hosts.glusterfs.external.example delete mode 100644 inventory/hosts.glusterfs.mixed.example delete mode 100644 inventory/hosts.glusterfs.native.example delete mode 100644 inventory/hosts.glusterfs.registry-only.example delete mode 100644 inventory/hosts.glusterfs.storage-and-registry.example delete mode 100644 inventory/hosts.grafana.example delete mode 100644 inventory/hosts.localhost delete mode 100644 inventory/hosts.openstack delete mode 100644 inventory/install-config-example.yml delete mode 100644 meta/main.yml diff --git a/BUILD.md b/BUILD.md index 1c270db23..6c1aeb490 100644 --- a/BUILD.md +++ b/BUILD.md @@ -32,22 +32,3 @@ To build a container image of `openshift-ansible` using standalone **Docker**: cd openshift-ansible docker build -f images/installer/Dockerfile -t openshift-ansible . - -## Build the Atomic System Container - -A system container runs using runC instead of Docker and it is managed -by the [atomic](https://github.com/projectatomic/atomic/) tool. As it -doesn't require Docker to run, the installer can run on a node of the -cluster without interfering with the Docker daemon that is configured -by the installer itself. - -The first step is to build the [container image](#build-an-openshift-ansible-container-image) -as described before. The container image already contains all the -required files to run as a system container. - -Once the container image is built, we can import it into the OSTree -storage: - -``` -atomic pull --storage ostree docker:openshift-ansible:latest -``` diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ef0a302dc..a2c582722 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -74,27 +74,6 @@ If you are new to Git, these links might help: --- -## Simple all-in-one localhost installation -``` -git clone https://github.com/openshift/openshift-ansible -cd openshift-ansible -sudo ansible-playbook -i inventory/hosts.localhost playbooks/prerequisites.yml -sudo ansible-playbook -i inventory/hosts.localhost playbooks/deploy_cluster.yml -``` - -## Development process -Most changes can be applied by re-running the config playbook. However, while -the config playbook will run faster the second time through it's still going to -take a very long time. As such, you may wish to run a smaller subsection of the -installation playbooks. You can for instance run the node, master, or hosted -playbooks in playbooks/openshift-node/config.yml, -playbooks/openshift-master/config.yml, playbooks/openshift-hosted/config.yml -respectively. - -We're actively working to refactor the playbooks into smaller discrete -components and we'll be documenting that structure shortly, for now those are -the most sensible logical units of work. - ## Running tests and other verification tasks We use [`tox`](http://readthedocs.org/docs/tox/) to manage virtualenvs where @@ -171,13 +150,6 @@ be reinstalled. Here are some useful tips that might improve your workflow while working on this repository. -#### Git Hooks - -Git hooks are included in this repository to aid in development. Check -out the README in the -[hack/hooks](http://github.com/openshift/openshift-ansible/blob/master/hack/hooks/README.md) -directory for more information. - #### Activating a virtualenv managed by tox If you want to enter a virtualenv created by tox to do additional debugging, you diff --git a/DEPLOYMENT_TYPES.md b/DEPLOYMENT_TYPES.md deleted file mode 100644 index b42569ce9..000000000 --- a/DEPLOYMENT_TYPES.md +++ /dev/null @@ -1,16 +0,0 @@ -# Deployment Types - -This repository supports OpenShift Origin and OpenShift Container Platform. - -Various defaults used throughout the playbooks and roles in this repository are -set based on the deployment type configuration (usually defined in an Ansible -hosts file). - -The table below outlines the defaults per `openshift_deployment_type`: - -| openshift_deployment_type | origin | openshift-enterprise | -|-----------------------------------------------------------------|------------------------------------------|----------------------------------------| -| **openshift_service_type** (also used for package names) | origin | atomic-openshift | -| **openshift.common.config_base** | /etc/origin | /etc/origin | -| **openshift_data_dir** | /var/lib/origin | /var/lib/origin | -| **Image Streams** | centos | rhel | diff --git a/HOOKS.md b/HOOKS.md index 071057ad7..01cabf98b 100644 --- a/HOOKS.md +++ b/HOOKS.md @@ -1,6 +1,6 @@ # Hooks -The ansible installer allows for operators to execute custom tasks during +OpenShift Ansible allows for operators to execute custom tasks during specific operations through a system called hooks. Hooks allow operators to provide files defining tasks to execute before and/or after specific areas during installations and upgrades. This can be very helpful to validate @@ -16,21 +16,17 @@ need to be updated to meet the new standard. ## Using Hooks -Hooks are defined in the ``hosts`` inventory file under the ``OSEv3:vars`` +Hooks are defined in the ``hosts`` inventory file under the ``nodes:vars`` section. Each hook should point to a yaml file which defines Ansible tasks. This file will be used as an include meaning that the file can not be a playbook but a set of tasks. Best practice suggests using absolute paths to the hook file to avoid any ambiguity. -### Example +### Example inventory variables ```ini -[OSEv3:vars] +[nodes:vars] # -openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml -openshift_master_upgrade_hook=/usr/share/custom/master.yml -openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml - openshift_node_upgrade_pre_hook=/usr/share/custom/pre_node.yml openshift_node_upgrade_hook=/usr/share/custom/node.yml openshift_node_upgrade_post_hook=/usr/share/custom/post_node.yml @@ -40,38 +36,23 @@ openshift_node_upgrade_post_hook=/usr/share/custom/post_node.yml Hook files must be a yaml formatted file that defines a set of Ansible tasks. The file may **not** be a playbook. -### Example +### Example hook task file ```yaml + --- # Trivial example forcing an operator to ack the start of an upgrade -# file=/usr/share/custom/pre_master.yml +# file=/usr/share/custom/pre_node.yml -- name: note the start of a master upgrade +- name: note the start of a node upgrade debug: - msg: "Master upgrade of {{ inventory_hostname }} is about to start" + msg: "Node upgrade of {{ inventory_hostname }} is about to start" - name: require an operator agree to start an upgrade pause: - prompt: "Hit enter to start the master upgrade" + prompt: "Hit enter to start the node upgrade" ``` -## Upgrade Hooks - -### openshift_master_upgrade_pre_hook -- Runs **before** each master is upgraded. -- This hook runs against **each master** in serial. -- If a task needs to run against a different host, said task will need to use [``delegate_to`` or ``local_action``](http://docs.ansible.com/ansible/playbooks_delegation.html#delegation). - -### openshift_master_upgrade_hook -- Runs **after** each master is upgraded but **before** it's service/system restart. -- This hook runs against **each master** in serial. -- If a task needs to run against a different host, said task will need to use [``delegate_to`` or ``local_action``](http://docs.ansible.com/ansible/playbooks_delegation.html#delegation). - - -### openshift_master_upgrade_post_hook -- Runs **after** each master is upgraded and has had it's service/system restart. -- This hook runs against **each master** in serial. -- If a task needs to run against a different host, said task will need to use [``delegate_to`` or ``local_action``](http://docs.ansible.com/ansible/playbooks_delegation.html#delegation). +## Available Upgrade Hooks ### openshift_node_upgrade_pre_hook - Runs **before** each node is upgraded. @@ -79,7 +60,7 @@ The file may **not** be a playbook. - If a task needs to run against a different host, said task will need to use [``delegate_to`` or ``local_action``](http://docs.ansible.com/ansible/playbooks_delegation.html#delegation). ### openshift_node_upgrade_hook -- Runs **after** each node is upgraded but **before** it's marked schedulable again.. +- Runs **after** each node is upgraded but **before** it's marked schedulable again. - This hook runs against **each node** in serial. - If a task needs to run against a different host, said task will need to use [``delegate_to`` or ``local_action``](http://docs.ansible.com/ansible/playbooks_delegation.html#delegation). diff --git a/README.md b/README.md index 0cbf19f7d..047e60491 100644 --- a/README.md +++ b/README.md @@ -1,171 +1,54 @@ [![Join the chat at https://gitter.im/openshift/openshift-ansible](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/openshift/openshift-ansible) [![Build Status](https://travis-ci.org/openshift/openshift-ansible.svg?branch=master)](https://travis-ci.org/openshift/openshift-ansible) -[![Coverage Status](https://coveralls.io/repos/github/openshift/openshift-ansible/badge.svg?branch=master)](https://coveralls.io/github/openshift/openshift-ansible?branch=master) - -NOTICE -====== - -Master branch is closed! A major refactor is ongoing in devel-40. -Changes for 3.x should be made directly to the latest release branch they're -relevant to and backported from there. - -WARNING -======= - -This branch is under heavy development. If you are interested in deploying a -working cluster, please utilize a release branch. # OpenShift Ansible - This repository contains [Ansible](https://www.ansible.com/) roles and -playbooks to install, upgrade, and manage -[OpenShift](https://www.openshift.com/) clusters. +playbooks for [OpenShift](https://www.openshift.com/) clusters. -## Getting the correct version -When choosing an openshift release, ensure that the necessary origin packages -are available in your distribution's repository. By default, openshift-ansible -will not configure extra repositories for testing or staging packages for -end users. +## Previous OpenShift Ansible 3.x releases +For 3.x releases of OpenShift Ansible please reference the release branch for +specific versions. The last 3.x release is +[3.11 release](https://github.com/openshift/openshift-ansible/tree/release-3.11). -We recommend using a release branch. We maintain stable branches -corresponding to upstream Origin releases, e.g.: we guarantee an -openshift-ansible 3.2 release will fully support an origin -[1.2 release](https://github.com/openshift/openshift-ansible/tree/release-1.2). +## OpenShift 4.x +Installation of OpenShift 4.x uses a command-line installation wizard instead of +Ansible playbooks. Learn more about the OpenShift Installer in this +[overview](https://github.com/openshift/installer/blob/master/docs/user/overview.md#installer-overview). -The most recent branch will often receive minor feature backports and -fixes. Older branches will receive only critical fixes. +For OpenShift 4.x, this repo only provides playbooks necessary for scaling up an +existing 4.x cluster with RHEL hosts. -In addition to the release branches, the master branch -[master branch](https://github.com/openshift/openshift-ansible/tree/master) -tracks our current work **in development** and should be compatible -with the -[Origin master branch](https://github.com/openshift/origin/tree/master) -(code in development). - - - -**Getting the right openshift-ansible release** - -Follow this release pattern and you can't go wrong: - -| Origin/OCP | OpenShift-Ansible version | openshift-ansible branch | -| ------------- | ----------------- |----------------------------------| -| 1.3 / 3.3 | 3.3 | release-1.3 | -| 1.4 / 3.4 | 3.4 | release-1.4 | -| 1.5 / 3.5 | 3.5 | release-1.5 | -| 3.*X* | 3.*X* | release-3.x | - -If you're running from the openshift-ansible **master branch** we can -only guarantee compatibility with the newest origin releases **in -development**. Use a branch corresponding to your origin version if -you are not running a stable release. - - -## Setup - -Install base dependencies: +The [master branch](https://github.com/openshift/openshift-ansible/tree/master) +tracks our current work **in development**. Requirements: - Ansible >= 2.7.8 -- Jinja >= 2.7 - pyOpenSSL -- python-lxml - ----- - -Fedora: - -``` -dnf install -y ansible pyOpenSSL python-cryptography python-lxml -``` - -## Simple all-in-one localhost Installation -This assumes that you've installed the base dependencies and you're running on -Fedora or RHEL -``` -git clone https://github.com/openshift/openshift-ansible -cd openshift-ansible -sudo ansible-playbook -i inventory/hosts.localhost playbooks/prerequisites.yml -sudo ansible-playbook -i inventory/hosts.localhost playbooks/deploy_cluster.yml -``` +- python2-openshift # Quickstart -Install the new installer from https://www.github.com/openshift/installer - -Construct a proper install-config.yml, and make a copy called -install-config-ansible.yml. - -## Hosts -You will need the following hosts - -### Boostrap host -This is a special host that is not part of the cluster but is required to be -available to help the cluster bootstrap itself. This is not a bastion host, -it will initially be part of the cluster and should be able to communicate with -the masters in the cluster. - -### Masters -You need 1 or 3 masters. - -### Workers -You need 0 or more workers. Note, by default, masters are unschedulable so -you will need one or more workers if you want to schedule workloads. - -## DNS -4.x installs require specific dns records to be in place, and there is no way -to complete an install without working DNS. You are in charge of ensuring the -following DNS records are resolvable from your cluster, the openshift-ansible -installer will not make any attempt to do any of this for you. - -First, the output of ```hostname``` on each host must be resolvable to other hosts. -The nodes will communicate with each other based on this value. - -install-config.yml value of 'baseDomain' must be a working domain. - -### A records -```sh --api. # ex: mycluster-api.example.com --master-0. # ex: mycluster-master-0.example.com --etcd-0. # ex: mycluster-etcd-0.example.com --bootstrap. # ex: mycluster-bootstrap.example.com -``` - -Note: There should be a master/etcd record for each master host in your cluster -(either 1 or 3). etcd hosts must be master hosts, and the records must resolve -to the same host for each master/etcd record, respectively. - -### SRV records -```sh -SRV _etcd-client-ssl._tcp.. '1 1 2379 -etcd-0.' -SRV _etcd-server-ssl._tcp.. '1 1 2380 -etcd-0.' -... -SRV _etcd-client-ssl._tcp.. '1 1 2379 -etcd-.' -SRV _etcd-server-ssl._tcp.. '1 1 2380 -etcd-.' - -# ex: _etcd-client-ssl._tcp.mycluster.example.com '1 1 2379 mycluster-etcd-0.example.com' -``` - -Consult with your DNS provider about the proper way to create SRV records. In -any case, there should be a client and server SRV record for each etcd backend, -and you MUST use the etcd FQDN you created earlier, not the master or any other -record. +## Install an OpenShift 4.x cluster +Install a cluster using the [OpenShift Installer](https://www.github.com/openshift/installer). ## Inventory -Check out inventory/40_basic_inventory.ini for an example. +Create an inventory file with the `new_workers` group to identify the hosts which +should be added to the cluster. +```yaml -## Generate ignition configs -Use the openshift-install command to generate ignition configs utilizing the -install-config.yml you created earlier. This will consume the install-config.yml -file, so ensure you have copied the file as mentioned previously. - -```sh -openshift-install create ignition-configs +--- +[new_workers] +mycluster-worker-0.example.com +mycluster-worker-1.example.com +mycluster-worker-2.example.com ``` -## Run playbook -playbooks/deploy_cluster_40.yml +## Run the scaleup playbook + +```bash +ansible-playbook playbooks/openshift_node/scaleup.yml +``` # Further reading diff --git a/docs/openshift_components.md b/docs/openshift_components.md deleted file mode 100644 index a4bec76be..000000000 --- a/docs/openshift_components.md +++ /dev/null @@ -1,152 +0,0 @@ -# OpenShift-Ansible Components - ->**TL;DR: Look at playbooks/openshift-web-console as an example** - -## General Guidelines - -Components in OpenShift-Ansible consist of two main parts: -* Entry point playbook(s) -* Ansible role -* OWNERS files in both the playbooks and roles associated with the component - -When writing playbooks and roles, follow these basic guidelines to ensure -success and maintainability. - -### Idempotency - -Definition: - ->_an idempotent operation is one that has no additional effect if it is called -more than once with the same input parameters_ - -Ansible playbooks and roles should be written such that when the playbook is run -again with the same configuration, no tasks should report `changed` as well as -no material changes should be made to hosts in the inventory. Playbooks should -be re-runnable, but also be idempotent. - -### Other advice for success - -* Try not to leave artifacts like files or directories -* Avoid using `failed_when:` where ever possible -* Always `name:` your tasks -* Document complex logic or code in tasks -* Set role defaults in `defaults/main.yml` -* Avoid the use of `set_fact:` - -## Building Component Playbooks - -Component playbooks are divided between the root of the component directory and -the `private` directory. This allows other parts of openshift-ansible to import -component playbooks without also running the common initialization playbooks -unnecessarily. - -Entry point playbooks are located in the `playbooks` directory and follow the -following structure: - -``` -playbooks/openshift-component_name -├── config.yml Entry point playbook -├── private -│   ├── config.yml Included by the Cluster Installer -│   └── roles -> ../../roles Don't forget to create this symlink -├── OWNERS Assign 2-3 approvers and reviewers -└── README.md Tell us what this component does -``` - -### Entry point config playbook - -The primary component entry point playbook will at a minimum run the common -initialization playbooks and then import the private playbook. - -```yaml -# playbooks/openshift-component_name/config.yml ---- -- import_playbook: ../init/main.yml - -- import_playbook: private/config.yml - -``` - -### Private config playbook - -The private component playbook will run the component role against the intended -host groups and provide any required variables. This playbook is also called -during cluster installs and upgrades. Think of this as the shareable portion of -the component playbooks. - -```yaml -# playbooks/openshift-component_name/private/config.yml ---- - -- name: OpenShift Component_Name Installation - hosts: oo_first_master - tasks: - - import_role: - name: openshift_component_name -``` - -NOTE: The private playbook may also include wrapper plays for the Installer -Checkpoint plugin which will be discussed later. - -## Building Component Roles - -Component roles contain all of the necessary files and logic to install and -configure the component. The install portion of the role should also support -performing upgrades on the component. - -Ansible roles are located in the `roles` directory and follow the following -structure: - -``` -roles/openshift_component_name -├── defaults -│   └── main.yml Defaults for variables used in the role -│ which can be overridden by the user -├── files -│   ├── component-config.yml -│   ├── component-rbac-template.yml -│   └── component-template.yml -├── handlers -│   └── main.yml -├── meta -│   └── main.yml -├── OWNERS Assign 2-3 approvers and reviewers -├── README.md -├── tasks -│   └── main.yml Default playbook used when calling the role -├── templates -└── vars - └── main.yml Internal roles variables -``` -### Component Installation - -Where possible, Ansible modules should be used to perform idempotent operations -with the OpenShift API. Avoid using the `command` or `shell` modules with the -`oc` cli unless the required operation is not available through either the -`lib_openshift` modules or Ansible core modules. - -The following is a basic flow of Ansible tasks for installation. - -- Create the project (oc_project) -- Create a temp directory for processing files -- Copy the client config to temp -- Copy templates to temp -- Read existing config map -- Copy existing config map to temp -- Generate/update config map -- Reconcile component RBAC (oc_process) -- Apply component template (oc_process) -- Poll healthz and wait for it to come up -- Log status of deployment -- Clean up temp - -### Component Removal - -- Remove the project (oc_project) - -## Enabling the Installer Checkpoint callback - -- Add the wrapper plays to the entry point playbook -- Update the installer_checkpoint callback plugin - -Details can be found in the installer_checkpoint role. diff --git a/docs/proposals/README.md b/docs/proposals/README.md deleted file mode 100644 index 89bbe5163..000000000 --- a/docs/proposals/README.md +++ /dev/null @@ -1,27 +0,0 @@ -# OpenShift-Ansible Proposal Process - -## Proposal Decision Tree -TODO: Add details about when a proposal is or is not required. - -## Proposal Process -The following process should be followed when a proposal is needed: - -1. Create a pull request with the initial proposal - * Use the [proposal template][template] - * Name the proposal using two or three topic words with underscores as a separator (i.e. proposal_template.md) - * Place the proposal in the docs/proposals directory -2. Notify the development team of the proposal and request feedback -3. Review the proposal on the OpenShift-Ansible Architecture Meeting -4. Update the proposal as needed and ask for feedback -5. Approved/Closed Phase - * If 75% or more of the active development team give the proposal a :+1: it is Approved - * If 50% or more of the active development team disagrees with the proposal it is Closed - * If the person proposing the proposal no longer wishes to continue they can request it to be Closed - * If there is no activity on a proposal, the active development team may Close the proposal at their discretion - * If none of the above is met the cycle can continue to Step 4. -6. For approved proposals, the current development lead(s) will: - * Update the Pull Request with the result and merge the proposal - * Create a card on the Cluster Lifecycle [Trello board][trello] so it may be scheduled for implementation. - -[template]: proposal_template.md -[trello]: https://trello.com/b/wJYDst6C diff --git a/docs/proposals/crt_management_proposal.md b/docs/proposals/crt_management_proposal.md deleted file mode 100644 index bf4048744..000000000 --- a/docs/proposals/crt_management_proposal.md +++ /dev/null @@ -1,113 +0,0 @@ -# Container Runtime Management - -## Description -origin and openshift-ansible support multiple container runtimes. This proposal -is related to refactoring how we handle those runtimes in openshift-ansible. - -### Problems addressed -We currently don't install docker during the install at a point early enough to -not fail health checks, and we don't have a good story around when/how to do it. -This is complicated by logic around containerized and non-containerized installs. - -A web of dependencies can cause changes to docker that are unintended and has -resulted in a series of work-around such as 'skip_docker' boolean. - -We don't handle docker storage because it's BYO. By moving docker to a prerequisite -play, we can tackle storage up front and never have to touch it again. - -container_runtime logic is currently spread across 3 roles: docker, openshift_docker, -and openshift_docker_facts. The name 'docker' does not accurately portray what -the role(s) do. - -## Rationale -* Refactor docker (and related meta/fact roles) into 'container_runtime' role. -* Strip all meta-depends on container runtime out of other roles and plays. -* Create a 'prerequisites.yml' entry point that will setup various items -such as container storage and container runtime before executing installation. -* All other roles and plays should merely consume container runtime, should not -configure, restart, or change the container runtime as much as feasible. - -## Design - -The container_runtime role should be comprised of 3 'pseudo-roles' which will be -consumed using import_role; each component area should be enabled/disabled with -a boolean value, defaulting to true. - -I call them 'pseudo-roles' because they are more or less independent functional -areas that may share some variables and act on closely related components. This -is an effort to reuse as much code as possible, limit role-bloat (we already have -an abundance of roles), and make things as modular as possible. - -```yaml -# prerequisites.yml -- include: std_include.yml -- include: container_runtime_setup.yml -... -# container_runtime_setup.yml -- hosts: "{{ openshift_runtime_manage_hosts | default('oo_nodes_to_config') }}" - tasks: - - import_role: - name: container_runtime - tasks_from: install.yml - when: openshift_container_runtime_install | default(True) | bool - - import_role: - name: container_runtime - tasks_from: storage.yml - when: openshift_container_runtime_storage | default(True) | bool - - import_role: - name: container_runtime - tasks_from: configure.yml - when: openshift_container_runtime_configure | default(True) | bool -``` - -Note the host group on the above play. No more guessing what hosts to run this -stuff against. If you want to use an atomic install, specify what hosts will need -us to setup container runtime (such as etcd hosts, loadbalancers, etc); - -We should direct users that are using atomic hosts to disable install in the docs, -let's not add a bunch of logic. - -Alternatively, we can create a new group. - -### Part 1, container runtime install -Install the container runtime components of the desired type. - -```yaml -# install.yml -- include: docker.yml - when: openshift_container_runtime_install_docker | bool - -- include: crio.yml - when: openshift_container_runtime_install_crio | bool - -... other container run times... -``` - -Alternatively to using booleans for each run time, we could use a variable like -"openshift_container_runtime_type". This would be my preference, as we could -use this information in later roles. - -### Part 2, configure/setup container runtime storage -Configure a supported storage solution for containers. - -Similar setup to the previous section. We might need to add some logic for the -different runtimes here, or we maybe create a matrix of possible options. - -### Part 3, configure container runtime. -Place config files, environment files, systemd units, etc. Start/restart -the container runtime as needed. - -Similar to Part 1 with how we should do things. - -## Checklist -* Strip docker from meta dependencies. -* Combine docker facts and meta roles into container_runtime role. -* Docs - -## User Story -As a user of openshift-ansible, I want to be able to manage my container runtime -and related components independent of openshift itself. - -## Acceptance Criteria -* Verify that each container runtime installs with this new method. -* Verify that openshift installs with this new method. diff --git a/docs/proposals/playbook_consolidation.md b/docs/proposals/playbook_consolidation.md deleted file mode 100644 index 98aedb021..000000000 --- a/docs/proposals/playbook_consolidation.md +++ /dev/null @@ -1,178 +0,0 @@ -# OpenShift-Ansible Playbook Consolidation - -## Description -The designation of `byo` is no longer applicable due to being able to deploy on -physical hardware or cloud resources using the playbooks in the `byo` directory. -Consolidation of these directories will make maintaining the code base easier -and provide a more straightforward project for users and developers. - -The main points of this proposal are: -* Consolidate initialization playbooks into one set of playbooks in - `playbooks/init`. -* Collapse the `playbooks/byo` and `playbooks/common` into one set of - directories at `playbooks/openshift-*`. - -This consolidation effort may be more appropriate when the project moves to -using a container as the default installation method. - -## Design - -### Initialization Playbook Consolidation -Currently there are two separate sets of initialization playbooks: -* `playbooks/byo/openshift-cluster/initialize_groups.yml` -* `playbooks/common/openshift-cluster/std_include.yml` - -Although these playbooks are located in the `openshift-cluster` directory they -are shared by all of the `openshift-*` areas. These playbooks would be better -organized in a `playbooks/init` directory collocated with all their related -playbooks. - -In the example below, the following changes have been made: -* `playbooks/byo/openshift-cluster/initialize_groups.yml` renamed to - `playbooks/init/initialize_host_groups.yml` -* `playbooks/common/openshift-cluster/std_include.yml` renamed to - `playbooks/init/main.yml` -* `- include: playbooks/init/initialize_host_groups.yml` has been added to the - top of `playbooks/init/main.yml` -* All other related files for initialization have been moved to `playbooks/init` - -The `initialize_host_groups.yml` playbook is only one play with one task for -importing variables for inventory group conversions. This task could be further -consolidated with the play in `evaluate_groups.yml`. - -The new standard initialization playbook would be -`playbooks/init/main.yml`. - - -``` - -> $ tree openshift-ansible/playbooks/init -. -├── evaluate_groups.yml -├── initialize_facts.yml -├── initialize_host_groups.yml -├── initialize_openshift_repos.yml -├── initialize_openshift_version.yml -├── main.yml -├── roles -> ../../roles -├── validate_hostnames.yml -└── vars - └── cluster_hosts.yml -``` - -```yaml -# openshift-ansible/playbooks/init/main.yml ---- -- include: initialize_host_groups.yml - -- include: evaluate_groups.yml - -- include: initialize_facts.yml - -- include: validate_hostnames.yml - -- include: initialize_openshift_repos.yml - -- include: initialize_openshift_version.yml -``` - -### `byo` and `common` Playbook Consolidation -Historically, the `byo` directory coexisted with other platform directories -which contained playbooks that then called into `common` playbooks to perform -common installation steps for all platforms. Since the other platform -directories have been removed this separation is no longer necessary. - -In the example below, the following changes have been made: -* `playbooks/byo/openshift-master` renamed to - `playbooks/openshift-master` -* `playbooks/common/openshift-master` renamed to - `playbooks/openshift-master/private` -* Original `byo` entry point playbooks have been updated to include their - respective playbooks from `private/`. -* Symbolic links have been updated as necessary - -All user consumable playbooks are in the root of `openshift-master` and no entry -point playbooks exist in the `private` directory. Maintaining the separation -between entry point playbooks and the private playbooks allows individual pieces -of the deployments to be used as needed by other components. - -``` -openshift-ansible/playbooks/openshift-master -> $ tree -. -├── config.yml -├── private -│   ├── additional_config.yml -│   ├── config.yml -│   ├── filter_plugins -> ../../../filter_plugins -│   ├── library -> ../../../library -│   ├── lookup_plugins -> ../../../lookup_plugins -│   ├── restart_hosts.yml -│   ├── restart_services.yml -│   ├── restart.yml -│   ├── roles -> ../../../roles -│   ├── scaleup.yml -│   └── validate_restart.yml -├── restart.yml -└── scaleup.yml -``` - -```yaml -# openshift-ansible/playbooks/openshift-master/config.yml ---- -- include: ../init/main.yml - -- include: private/config.yml -``` - -With the consolidation of the directory structure and component installs being -removed from `openshift-cluster`, that directory is no longer necessary. To -deploy an entire OpenShift cluster, a playbook would be created to tie together -all of the different components. The following example shows how multiple -components would be combined to perform a complete install. - -```yaml -# openshift-ansible/playbooks/deploy_cluster.yml ---- -- include: init/main.yml - -- include: openshift-etcd/private/config.yml - -- include: openshift-nfs/private/config.yml - -- include: openshift-loadbalancer/private/config.yml - -- include: openshift-master/private/config.yml - -- include: openshift-node/private/config.yml - -- include: openshift-glusterfs/private/config.yml - -- include: openshift-hosted/private/config.yml - -- include: openshift-service-catalog/private/config.yml -``` - -## User Story -As a developer of OpenShift-Ansible, -I want simplify the playbook directory structure -so that users can easily find deployment playbooks and developers know where new -features should be developed. - -## Implementation -Given the size of this refactoring effort, it should be broken into smaller -steps which can be completed independently while still maintaining a functional -project. - -Steps: -1. Update and merge consolidation of the initialization playbooks. -2. Update each merge consolidation of each `openshift-*` component area -3. Update and merge consolidation of `openshift-cluster` - -## Acceptance Criteria -* Verify that all entry points playbooks install or configure as expected. -* Verify that CI is updated for testing new playbook locations. -* Verify that repo documentation is updated -* Verify that user documentation is updated - -## References diff --git a/docs/proposals/proposal_template.md b/docs/proposals/proposal_template.md deleted file mode 100644 index ece288037..000000000 --- a/docs/proposals/proposal_template.md +++ /dev/null @@ -1,30 +0,0 @@ -# Proposal Title - -## Description - - -## Rationale - - -## Design -
- -## Checklist -* Item 1 -* Item 2 -* Item 3 - -## User Story -As a developer on OpenShift-Ansible, -I want ... -so that ... - -## Acceptance Criteria -* Verify that ... -* Verify that ... -* Verify that ... - -## References -* Link -* Link -* Link diff --git a/docs/proposals/role_decomposition.md b/docs/proposals/role_decomposition.md deleted file mode 100644 index fdf109a05..000000000 --- a/docs/proposals/role_decomposition.md +++ /dev/null @@ -1,353 +0,0 @@ -# Scaffolding for decomposing large roles - -## Why? - -Currently we have roles that are very large and encompass a lot of different -components. This makes for a lot of logic required within the role, can -create complex conditionals, and increases the learning curve for the role. - -## How? - -Creating a guide on how to approach breaking up a large role into smaller, -component based, roles. Also describe how to develop new roles, to avoid creating -large roles. - -## Proposal - -Create a new guide or append to the current contributing guide a process for -identifying large roles that can be split up, and how to compose smaller roles -going forward. - -### Large roles - -A role should be considered for decomposition if it: - -1) Configures/installs more than one product. -1) Can configure multiple variations of the same product that can live -side by side. -1) Has different entry points for upgrading and installing a product - -Large roles1 should be responsible for: -> 1 or composing playbooks - -1) Composing smaller roles to provide a full solution such as an Openshift Master -1) Ensuring that smaller roles are called in the correct order if necessary -1) Calling smaller roles with their required variables -1) Performing prerequisite tasks that small roles may depend on being in place -(openshift_logging certificate generation for example) - -### Small roles - -A small role should be able to: - -1) Be deployed independently of other products (this is different than requiring -being installed after other base components such as OCP) -1) Be self contained and able to determine facts that it requires to complete -1) Fail fast when facts it requires are not available or are invalid -1) "Make it so" based on provided variables and anything that may be required -as part of doing such (this should include data migrations) -1) Have a minimal set of dependencies in meta/main.yml, just enough to do its job - -### Example using decomposition of openshift_logging - -The `openshift_logging` role was created as a port from the deployer image for -the `3.5` deliverable. It was a large role that created the service accounts, -configmaps, secrets, routes, and deployment configs/daemonset required for each -of its different components (Fluentd, Kibana, Curator, Elasticsearch). - -It was possible to configure any of the components independently of one another, -up to a point. However, it was an all of nothing installation and there was a -need from customers to be able to do things like just deploy Fluentd. - -Also being able to support multiple versions of configuration files would become -increasingly messy with a large role. Especially if the components had changes -at different intervals. - -#### Folding of responsibility - -There was a duplicate of work within the installation of three of the four logging -components where there was a possibility to deploy both an 'operations' and -'non-operations' cluster side-by-side. The first step was to collapse that -duplicate work into a single path and allow a variable to be provided to -configure such that either possibility could be created. - -#### Consolidation of responsibility - -The generation of OCP objects required for each component were being created in -the same task file, all Service Accounts were created at the same time, all secrets, -configmaps, etc. The only components that were not generated at the same time were -the deployment configs and the daemonset. The second step was to make the small -roles self contained and generate their own required objects. - -#### Consideration for prerequisites - -Currently the Aggregated Logging stack generates its own certificates as it has -some requirements that prevent it from utilizing the OCP cert generation service. -In order to make sure that all components were able to trust one another as they -did previously, until the cert generation service can be used, the certificate -generation is being handled within the top level `openshift_logging` role and -providing the location of the generated certificates to the individual roles. - -#### Snippets - -[openshift_logging/tasks/install_logging.yaml](https://github.com/ewolinetz/openshift-ansible/blob/logging_component_subroles/roles/openshift_logging/tasks/install_logging.yaml) -```yaml -- name: Gather OpenShift Logging Facts - openshift_logging_facts: - oc_bin: "{{openshift.common.client_binary}}" - openshift_logging_namespace: "{{openshift_logging_namespace}}" - -- name: Set logging project - oc_project: - state: present - name: "{{ openshift_logging_namespace }}" - -- name: Create logging cert directory - file: - path: "{{ openshift.common.config_base }}/logging" - state: directory - mode: 0755 - changed_when: False - check_mode: no - -- include: generate_certs.yaml - vars: - generated_certs_dir: "{{openshift.common.config_base}}/logging" - -## Elasticsearch -- import_role: - name: openshift_logging_elasticsearch - vars: - generated_certs_dir: "{{openshift.common.config_base}}/logging" - -- import_role: - name: openshift_logging_elasticsearch - vars: - generated_certs_dir: "{{openshift.common.config_base}}/logging" - openshift_logging_es_ops_deployment: true - when: - - openshift_logging_use_ops | bool - - -## Kibana -- import_role: - name: openshift_logging_kibana - vars: - generated_certs_dir: "{{openshift.common.config_base}}/logging" - openshift_logging_kibana_namespace: "{{ openshift_logging_namespace }}" - openshift_logging_kibana_master_url: "{{ openshift_logging_master_url }}" - openshift_logging_kibana_master_public_url: "{{ openshift_logging_master_public_url }}" - openshift_logging_kibana_image_prefix: "{{ openshift_logging_image_prefix }}" - openshift_logging_kibana_image_version: "{{ openshift_logging_image_version }}" - openshift_logging_kibana_replicas: "{{ openshift_logging_kibana_replica_count }}" - openshift_logging_kibana_es_host: "{{ openshift_logging_es_host }}" - openshift_logging_kibana_es_port: "{{ openshift_logging_es_port }}" - openshift_logging_kibana_image_pull_secret: "{{ openshift_logging_image_pull_secret }}" - -- import_role: - name: openshift_logging_kibana - vars: - generated_certs_dir: "{{openshift.common.config_base}}/logging" - openshift_logging_kibana_ops_deployment: true - openshift_logging_kibana_namespace: "{{ openshift_logging_namespace }}" - openshift_logging_kibana_master_url: "{{ openshift_logging_master_url }}" - openshift_logging_kibana_master_public_url: "{{ openshift_logging_master_public_url }}" - openshift_logging_kibana_image_prefix: "{{ openshift_logging_image_prefix }}" - openshift_logging_kibana_image_version: "{{ openshift_logging_image_version }}" - openshift_logging_kibana_image_pull_secret: "{{ openshift_logging_image_pull_secret }}" - openshift_logging_kibana_es_host: "{{ openshift_logging_es_ops_host }}" - openshift_logging_kibana_es_port: "{{ openshift_logging_es_ops_port }}" - openshift_logging_kibana_nodeselector: "{{ openshift_logging_kibana_ops_nodeselector }}" - openshift_logging_kibana_memory_limit: "{{ openshift_logging_kibana_ops_memory_limit }}" - openshift_logging_kibana_cpu_request: "{{ openshift_logging_kibana_ops_cpu_request }}" - openshift_logging_kibana_hostname: "{{ openshift_logging_kibana_ops_hostname }}" - openshift_logging_kibana_replicas: "{{ openshift_logging_kibana_ops_replica_count }}" - openshift_logging_kibana_proxy_debug: "{{ openshift_logging_kibana_ops_proxy_debug }}" - openshift_logging_kibana_proxy_memory_limit: "{{ openshift_logging_kibana_ops_proxy_memory_limit }}" - openshift_logging_kibana_proxy_cpu_request: "{{ openshift_logging_kibana_ops_proxy_cpu_request }}" - openshift_logging_kibana_cert: "{{ openshift_logging_kibana_ops_cert }}" - openshift_logging_kibana_key: "{{ openshift_logging_kibana_ops_key }}" - openshift_logging_kibana_ca: "{{ openshift_logging_kibana_ops_ca}}" - when: - - openshift_logging_use_ops | bool - - -## Curator -- import_role: - name: openshift_logging_curator - vars: - generated_certs_dir: "{{openshift.common.config_base}}/logging" - openshift_logging_curator_namespace: "{{ openshift_logging_namespace }}" - openshift_logging_curator_master_url: "{{ openshift_logging_master_url }}" - openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix }}" - openshift_logging_curator_image_version: "{{ openshift_logging_image_version }}" - openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}" - -- import_role: - name: openshift_logging_curator - vars: - generated_certs_dir: "{{openshift.common.config_base}}/logging" - openshift_logging_curator_ops_deployment: true - openshift_logging_curator_namespace: "{{ openshift_logging_namespace }}" - openshift_logging_curator_master_url: "{{ openshift_logging_master_url }}" - openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix }}" - openshift_logging_curator_image_version: "{{ openshift_logging_image_version }}" - openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}" - openshift_logging_curator_memory_limit: "{{ openshift_logging_curator_ops_memory_limit }}" - openshift_logging_curator_cpu_request: "{{ openshift_logging_curator_ops_cpu_request }}" - openshift_logging_curator_nodeselector: "{{ openshift_logging_curator_ops_nodeselector }}" - when: - - openshift_logging_use_ops | bool - - -## Fluentd -- import_role: - name: openshift_logging_fluentd - vars: - generated_certs_dir: "{{openshift.common.config_base}}/logging" - -- include: update_master_config.yaml -``` - -[openshift_logging_elasticsearch/meta/main.yaml](https://github.com/ewolinetz/openshift-ansible/blob/logging_component_subroles/roles/openshift_logging_elasticsearch/meta/main.yaml) -```yaml ---- -galaxy_info: - author: OpenShift Red Hat - description: OpenShift Aggregated Logging Elasticsearch Component - company: Red Hat, Inc. - license: Apache License, Version 2.0 - min_ansible_version: 2.2 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud -dependencies: -- role: lib_openshift -``` - -[openshift_logging/meta/main.yaml](https://github.com/ewolinetz/openshift-ansible/blob/logging_component_subroles/roles/openshift_logging/meta/main.yaml) -```yaml ---- -galaxy_info: - author: OpenShift Red Hat - description: OpenShift Aggregated Logging - company: Red Hat, Inc. - license: Apache License, Version 2.0 - min_ansible_version: 2.2 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud -dependencies: -- role: lib_openshift -- role: openshift_facts -``` - -[openshift_logging/tasks/install_support.yaml - old](https://github.com/openshift/openshift-ansible/blob/master/roles/openshift_logging/tasks/install_support.yaml) -```yaml ---- -# This is the base configuration for installing the other components -- name: Check for logging project already exists - command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project {{openshift_logging_namespace}} --no-headers - register: logging_project_result - ignore_errors: yes - when: not ansible_check_mode - changed_when: no - -- name: "Create logging project" - command: > - {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_logging_namespace}} - when: not ansible_check_mode and "not found" in logging_project_result.stderr - -- name: Create logging cert directory - file: path={{openshift.common.config_base}}/logging state=directory mode=0755 - changed_when: False - check_mode: no - -- include: generate_certs.yaml - vars: - generated_certs_dir: "{{openshift.common.config_base}}/logging" - -- name: Create temp directory for all our templates - file: path={{mktemp.stdout}}/templates state=directory mode=0755 - changed_when: False - check_mode: no - -- include: generate_secrets.yaml - vars: - generated_certs_dir: "{{openshift.common.config_base}}/logging" - -- include: generate_configmaps.yaml - -- include: generate_services.yaml - -- name: Generate kibana-proxy oauth client - template: src=oauth-client.j2 dest={{mktemp.stdout}}/templates/oauth-client.yaml - vars: - secret: "{{oauth_secret}}" - when: oauth_secret is defined - check_mode: no - changed_when: no - -- include: generate_clusterroles.yaml - -- include: generate_rolebindings.yaml - -- include: generate_clusterrolebindings.yaml - -- include: generate_serviceaccounts.yaml - -- include: generate_routes.yaml -``` - -# Limitations - -There will always be exceptions for some of these rules, however the majority of -roles should be able to fall within these guidelines. - -# Additional considerations - -## Playbooks including playbooks -In some circumstances it does not make sense to have a composing role but instead -a playbook would be best for orchestrating the role flow. Decisions made regarding -playbooks including playbooks will need to be taken into consideration as part of -defining this process. -Ref: (link to rteague's presentation?) - -## Role dependencies -We want to make sure that our roles do not have any extra or unnecessary dependencies -in meta/main.yml without: - -1. Proposing the inclusion in a team meeting or as part of the PR review and getting agreement -1. Documenting in meta/main.yml why it is there and when it was agreed to (date) - -## Avoiding overly verbose roles -When we are splitting our roles up into smaller components we want to ensure we -avoid creating roles that are, for a lack of a better term, overly verbose. What -do we mean by that? If we have `openshift_control_plane` as an example, and we were to -split it up, we would have a component for `etcd`, `docker`, and possibly for -its rpms/configs. We would want to avoid creating a role that would just create -certificates as those would make sense to be contained with the rpms and configs. -Likewise, when it comes to being able to restart the master, we wouldn't have a -role where that was its sole purpose. - -The same would apply for the `etcd` and `docker` roles. Anything that is required -as part of installing `etcd` such as generating certificates, installing rpms, -and upgrading data between versions should all be contained within the single -`etcd` role. - -## Enforcing standards -Certain naming standards like variable names could be verified as part of a Travis -test. If we were going to also enforce that a role either has tasks or includes -(for example) then we could create tests for that as well. - -## CI tests for individual roles -If we are able to correctly split up roles, it should be possible to test role -installations/upgrades like unit tests (assuming they would be able to be installed -independently of other components). diff --git a/docs/repo_structure.md b/docs/repo_structure.md index 7d47b32ae..f0b3a4a48 100644 --- a/docs/repo_structure.md +++ b/docs/repo_structure.md @@ -54,17 +54,3 @@ _OpenShift Components_ └── test Contains tests. ``` -### CI - -These files are used by [PAPR](https://github.com/projectatomic/papr), -It is very similar in workflow to Travis, with the test -environment and test scripts defined in a YAML file. - -``` -. -├── .papr.yml -├── .papr.sh -└── .papr.inventory -├── .papr.all-in-one.inventory -└── .papr-master-ha.inventory -``` diff --git a/examples/README.md b/examples/README.md deleted file mode 100644 index 17398360e..000000000 --- a/examples/README.md +++ /dev/null @@ -1,96 +0,0 @@ -# openshift-ansible usage examples - -The primary use of `openshift-ansible` is to install, configure and upgrade OpenShift clusters. - -This is typically done by direct invocation of Ansible tools like `ansible-playbook`. This use case is covered in detail in the [OpenShift advanced installation documentation](https://docs.okd.io/latest/install_config/install/advanced_install.html) - -For OpenShift Container Platform there's also an installation utility that wraps `openshift-ansible`. This usage case is covered in the [Quick Installation](https://docs.openshift.com/container-platform/latest/install_config/install/quick_install.html) section of the documentation. - -The usage examples below cover use cases other than install/configure/upgrade. - -## Container image - -The examples below run [openshift-ansible in a container](../README_CONTAINER_IMAGE.md) to perform certificate expiration checks on an OpenShift cluster from pods running on the cluster itself. - -You can find more details about the certificate expiration check roles and example playbooks in [the openshift_certificate_expiry role's README](../roles/openshift_certificate_expiry/README.md). - -### Job to upload certificate expiration reports - -The example `Job` in [certificate-check-upload.yaml](certificate-check-upload.yaml) executes a [Job](https://docs.okd.io/latest/dev_guide/jobs.html) that checks the expiration dates of the internal certificates of the cluster and uploads HTML and JSON reports to `/etc/origin/certificate_expiration_report` in the masters. - -This example uses the [`easy-mode-upload.yaml`](../playbooks/openshift-checks/certificate_expiry/easy-mode-upload.yaml) example playbook, which generates reports and uploads them to the masters. The playbook can be customized via environment variables to control the length of the warning period (`CERT_EXPIRY_WARN_DAYS`) and the location in the masters where the reports are uploaded (`COPY_TO_PATH`). - -The job expects the inventory to be provided via the *hosts* key of a [ConfigMap](https://docs.okd.io/latest/dev_guide/configmaps.html) named *inventory*, and the passwordless ssh key that allows connecting to the hosts to be availalbe as *ssh-privatekey* from a [Secret](https://docs.okd.io/latest/dev_guide/secrets.html) named *sshkey*, so these are created first: - - oc new-project certcheck - oc create configmap inventory --from-file=hosts=/etc/ansible/hosts - oc create secret generic sshkey \ - --from-file=ssh-privatekey=$HOME/.ssh/id_rsa \ - --type=kubernetes.io/ssh-auth - -Note that `inventory`, `hosts`, `sshkey` and `ssh-privatekey` are referenced by name from the provided example Job definition. If you use different names for the objects/attributes you will have to adjust the Job accordingly. - -To create the Job: - - oc create -f examples/certificate-check-upload.yaml - -### Scheduled job for certificate expiration report upload - -The example `CronJob` in [scheduled-certcheck-upload.yaml](scheduled-certcheck-upload.yaml) does the same as the `Job` example above, but it is scheduled to automatically run every first day of the month (see the `spec.schedule` value in the example). - -The job definition is the same and it expects the same configuration: we provide the inventory and ssh key via a ConfigMap and a Secret respectively: - - oc new-project certcheck - oc create configmap inventory --from-file=hosts=/etc/ansible/hosts - oc create secret generic sshkey \ - --from-file=ssh-privatekey=$HOME/.ssh/id_rsa \ - --type=kubernetes.io/ssh-auth - -And then we create the CronJob: - - oc create -f examples/scheduled-certcheck-upload.yaml - -### Job and CronJob to check certificates using volumes - -There are two additional examples: - - - A `Job` [certificate-check-volume.yaml](certificate-check-volume.yaml) - - A `CronJob` [scheduled-certcheck-upload.yaml](scheduled-certcheck-upload.yaml) - -These perform the same work as the two examples above, but instead of uploading the generated reports to the masters they store them in a custom path within the container that is expected to be backed by a [PersistentVolumeClaim](https://docs.okd.io/latest/dev_guide/persistent_volumes.html), so that the reports are actually written to storage external to the container. - -These examples assume that there is an existing `PersistentVolumeClaim` called `certcheck-reports` and they use the [`html_and_json_timestamp.yaml`](../playbooks/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml) example playbook to write timestamped reports into it. - -You can later access the reports from another pod that mounts the same volume, or externally via direct access to the backend storage behind the matching `PersistentVolume`. - -To run these examples we prepare the inventory and ssh keys as in the other examples: - - oc new-project certcheck - oc create configmap inventory --from-file=hosts=/etc/ansible/hosts - oc create secret generic sshkey \ - --from-file=ssh-privatekey=$HOME/.ssh/id_rsa \ - --type=kubernetes.io/ssh-auth - -Additionally we allocate a `PersistentVolumeClaim` to store the reports: - - oc create -f - < 1.10. -# docker_version="1.12.1" - -# Specify whether to run Docker daemon with SELinux enabled in containers. Default is True. -# Uncomment below to disable; for example if your kernel does not support the -# Docker overlay/overlay2 storage drivers with SELinux enabled. -#openshift_docker_selinux_enabled=False - -# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone. -# docker_upgrade=False - -# Specify a list of block devices to be formatted and mounted on the nodes -# during prerequisites.yml. For each hash, "device", "path", "filesystem" are -# required. To add devices only on certain classes of node, redefine -# container_runtime_extra_storage as a group var. -#container_runtime_extra_storage='[{"device":"/dev/vdc","path":"/var/lib/origin/openshift.local.volumes","filesystem":"xfs","options":"gquota"}]' - -# Enable etcd debug logging, defaults to false -# etcd_debug=true -# Set etcd log levels by package -# etcd_log_package_levels="etcdserver=WARNING,security=DEBUG" -# Comma-separated list of etcd cipher suites -# etcd_cipher_suites="TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256" - -# Upgrade Hooks -# -# Hooks are available to run custom tasks at various points during a cluster -# upgrade. Each hook should point to a file with Ansible tasks defined. Suggest using -# absolute paths, if not the path will be treated as relative to the file where the -# hook is actually used. -# -# Tasks to run before each master is upgraded. -# openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml -# -# Tasks to run to upgrade the master. These tasks run after the main openshift-ansible -# upgrade steps, but before we restart system/services. -# openshift_master_upgrade_hook=/usr/share/custom/master.yml -# -# Tasks to run after each master is upgraded and system/services have been restarted. -# openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml - -# Cluster Image Source (registry) configuration -# openshift-enterprise default is 'registry.redhat.io/openshift3/ose-${component}:${version}' -# origin default is 'docker.io/openshift/origin-${component}:${version}' -#oreg_url=example.com/openshift3/ose-${component}:${version} -# If oreg_url points to a registry other than registry.redhat.io we can -# modify image streams to point at that registry by setting the following to true -#openshift_examples_modify_imagestreams=true -# Add insecure and blocked registries to global docker configuration -#openshift_docker_insecure_registries=registry.example.com -#openshift_docker_blocked_registries=registry.hacker.com -# You may also configure additional default registries for docker, however this -# is discouraged. Instead you should make use of fully qualified image names. -#openshift_docker_additional_registries=registry.example.com - -# If oreg_url points to a registry requiring authentication, provide the following: -#oreg_auth_user=some_user -#oreg_auth_password='my-pass' -# NOTE: oreg_url must be defined by the user for oreg_auth_* to have any affect. -# oreg_auth_pass should be generated from running docker login. - -# OpenShift repository configuration -#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}] -#openshift_repos_enable_testing=false - -# If the image for etcd needs to be pulled from anywhere else than registry.redhat.io, e.g. in -# a disconnected and containerized installation, use osm_etcd_image to specify the image to use: -#osm_etcd_image=registry.example.com/rhel7/etcd - -# htpasswd auth -#openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}] -# Defining htpasswd users -#openshift_master_htpasswd_users={'user1': '', 'user2': ''} -# or -#openshift_master_htpasswd_file= - -# Allow all auth -#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}] - -# LDAP auth -#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}] -# -# Configure LDAP CA certificate -# Specify either the ASCII contents of the certificate or the path to -# the local file that will be copied to the remote host. CA -# certificate contents will be copied to master systems and saved -# within /etc/origin/master/ with a filename matching the "ca" key set -# within the LDAPPasswordIdentityProvider. -# -#openshift_master_ldap_ca= -# or -#openshift_master_ldap_ca_file= - -# OpenID auth -#openshift_master_identity_providers=[{"name": "openid_auth", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "my_client_id", "client_secret": "my_client_secret", "claims": {"id": ["sub"], "preferredUsername": ["preferred_username"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://myidp.example.com/oauth2/authorize", "token": "https://myidp.example.com/oauth2/token"}] -# -# Configure OpenID CA certificate -# Specify either the ASCII contents of the certificate or the path to -# the local file that will be copied to the remote host. CA -# certificate contents will be copied to master systems and saved -# within /etc/origin/master/ with a filename matching the "ca" key set -# within the OpenIDIdentityProvider. -# -#openshift_master_openid_ca= -# or -#openshift_master_openid_ca_file= - -# Request header auth -#openshift_master_identity_providers=[{"name": "my_request_header_provider", "challenge": "true", "login": "true", "kind": "RequestHeaderIdentityProvider", "challengeURL": "https://www.example.com/challenging-proxy/oauth/authorize?${query}", "loginURL": "https://www.example.com/login-proxy/oauth/authorize?${query}", "clientCommonNames": ["my-auth-proxy"], "headers": ["X-Remote-User", "SSO-User"], "emailHeaders": ["X-Remote-User-Email"], "nameHeaders": ["X-Remote-User-Display-Name"], "preferredUsernameHeaders": ["X-Remote-User-Login"]}] -# -# Configure request header CA certificate -# Specify either the ASCII contents of the certificate or the path to -# the local file that will be copied to the remote host. CA -# certificate contents will be copied to master systems and saved -# within /etc/origin/master/ with a filename matching the "clientCA" -# key set within the RequestHeaderIdentityProvider. -# -#openshift_master_request_header_ca= -# or -#openshift_master_request_header_ca_file= - -# GitHub auth -#openshift_master_identity_providers=[{"name": "github", "login": "true", "challenge": "false", "kind": "GitHubIdentityProvider", "mappingMethod": "claim", "client_id": "my_client_id", "client_secret": "my_client_secret", "teams": ["team1", "team2"], "hostname": "githubenterprise.example.com", "ca": "" }] -# -# Configure github CA certificate -# Specify either the ASCII contents of the certificate or the path to -# the local file that will be copied to the remote host. CA -# certificate contents will be copied to master systems and saved -# within /etc/origin/master/ with a filename matching the "ca" key set -# within the GitHubIdentityProvider. -# -#openshift_master_github_ca= -# or -#openshift_master_github_ca_file= - -# CloudForms Management Engine (ManageIQ) App Install -# -# Enables installation of MIQ server. Recommended for dedicated -# clusters only. See roles/openshift_management/README.md for instructions -# and requirements. -#openshift_management_install_management=False - -# Cloud Provider Configuration -# -# Note: You may make use of environment variables rather than store -# sensitive configuration within the ansible inventory. -# For example: -#openshift_cloudprovider_aws_access_key="{{ lookup('env','AWS_ACCESS_KEY_ID') }}" -#openshift_cloudprovider_aws_secret_key="{{ lookup('env','AWS_SECRET_ACCESS_KEY') }}" -# -# AWS -#openshift_cloudprovider_kind=aws -# Note: IAM profiles may be used instead of storing API credentials on disk. -#openshift_cloudprovider_aws_access_key=aws_access_key_id -#openshift_cloudprovider_aws_secret_key=aws_secret_access_key -# -# Openstack -#openshift_cloudprovider_kind=openstack -#openshift_cloudprovider_openstack_auth_url=http://openstack.example.com:35357/v2.0/ -#openshift_cloudprovider_openstack_username=username -#openshift_cloudprovider_openstack_password=password -#openshift_cloudprovider_openstack_domain_id=domain_id -#openshift_cloudprovider_openstack_domain_name=domain_name -#openshift_cloudprovider_openstack_tenant_id=tenant_id -#openshift_cloudprovider_openstack_tenant_name=tenant_name -#openshift_cloudprovider_openstack_region=region -#openshift_cloudprovider_openstack_lb_subnet_id=subnet_id -# -# Note: If you're getting a "BS API version autodetection failed" when provisioning cinder volumes you may need this setting -#openshift_cloudprovider_openstack_blockstorage_version=v2 -# -# GCE -#openshift_cloudprovider_kind=gce -# Note: When using GCE, openshift_gcp_project and openshift_gcp_prefix must be -# defined. -# openshift_gcp_project is the project-id -#openshift_gcp_project= -# openshift_gcp_prefix is a unique string to identify each openshift cluster. -#openshift_gcp_prefix= -#openshift_gcp_multizone=False -# Note: To enable nested virtualization in gcp use the following variable and url -#openshift_gcp_licenses="https://www.googleapis.com/compute/v1/projects/vm-options/global/licenses/enable-vmx" -# Additional details regarding nested virtualization are available: -# https://cloud.google.com/compute/docs/instances/enable-nested-virtualization-vm-instances -# -# vSphere -#openshift_cloudprovider_kind=vsphere -#openshift_cloudprovider_vsphere_username=username -#openshift_cloudprovider_vsphere_password=password -#openshift_cloudprovider_vsphere_host=vcenter_host or vsphere_host -#openshift_cloudprovider_vsphere_datacenter=datacenter -#openshift_cloudprovider_vsphere_datastore=datastore -#openshift_cloudprovider_vsphere_folder=optional_folder_name - - -# Project Configuration -#osm_project_request_message='' -#osm_project_request_template='' -#osm_mcs_allocator_range='s0:/2' -#osm_mcs_labels_per_project=5 -#osm_uid_allocator_range='1000000000-1999999999/10000' - -# Configure additional projects -#openshift_additional_projects={'my-project': {'default_node_selector': 'label=value'}} - -# Enable cockpit -#osm_use_cockpit=true -# -# Set cockpit plugins -#osm_cockpit_plugins=['cockpit-kubernetes'] - -# Native high availability (default cluster method) -# If no lb group is defined, the installer assumes that a load balancer has -# been preconfigured. For installation the value of -# openshift_master_cluster_hostname must resolve to the load balancer -# or to one or all of the masters defined in the inventory if no load -# balancer is present. -#openshift_master_cluster_hostname=openshift-ansible.test.example.com - -# If an external load balancer is used public hostname should resolve to -# external load balancer address -#openshift_master_cluster_public_hostname=openshift-ansible.public.example.com - -# Configure controller arguments -#osm_controller_args={'resource-quota-sync-period': ['10s']} - -# Configure api server arguments -#osm_api_server_args={'max-requests-inflight': ['400']} - -# additional cors origins -#osm_custom_cors_origins=['foo.example.com', 'bar.example.com'] - -# default project node selector -#osm_default_node_selector='region=primary' - -# Override the default pod eviction timeout -#openshift_master_pod_eviction_timeout=5m - -# Override the default oauth tokenConfig settings: -# openshift_master_access_token_max_seconds=86400 -# openshift_master_auth_token_max_seconds=500 - -# Override master servingInfo.maxRequestsInFlight -#openshift_master_max_requests_inflight=500 - -# Override master and node servingInfo.minTLSVersion and .cipherSuites -# valid TLS versions are VersionTLS10, VersionTLS11, VersionTLS12 -# example cipher suites override, valid cipher suites are https://golang.org/pkg/crypto/tls/#pkg-constants -#openshift_master_min_tls_version=VersionTLS12 -#openshift_master_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...'] -# -#openshift_node_min_tls_version=VersionTLS12 -#openshift_node_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...'] - -# default storage plugin dependencies to install, by default the ceph and -# glusterfs plugin dependencies will be installed, if available. -#osn_storage_plugin_deps=['ceph','glusterfs','iscsi'] - -# OpenShift Router Options -# -# An OpenShift router will be created during install if there are -# nodes present with labels matching the default router selector, -# "node-role.kubernetes.io/infra=true". -# -# Example: -# [nodes] -# node.example.com openshift_node_group_name="node-config-infra" -# -# Router selector (optional) -# Router will only be created if nodes matching this label are present. -# Default value: 'node-role.kubernetes.io/infra=true' -#openshift_hosted_router_selector='node-role.kubernetes.io/infra=true' -# -# Router replicas (optional) -# Unless specified, openshift-ansible will calculate the replica count -# based on the number of nodes matching the openshift router selector. -#openshift_hosted_router_replicas=2 -# -# Router extended route validation (optional) -# If enabled, openshift-ansible will configure the router to perform extended -# validation on routes before admitting them. -#openshift_hosted_router_extended_validation=true -# -# Router force subdomain (optional) -# A router path format to force on all routes used by this router -# (will ignore the route host value) -#openshift_hosted_router_force_subdomain='${name}-${namespace}.apps.example.com' -# -# Router certificate (optional) -# Provide local certificate paths which will be configured as the -# router's default certificate. -#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"} -# -# Manage the OpenShift Router (optional) -#openshift_hosted_manage_router=true -# -# Router sharding support has been added and can be achieved by supplying the correct -# data to the inventory. The variable to house the data is openshift_hosted_routers -# and is in the form of a list. If no data is passed then a default router will be -# created. There are multiple combinations of router sharding. The one described -# below supports routers on separate nodes. -# -#openshift_hosted_routers=[{'name': 'router1', 'certificate': {'certfile': '/path/to/certificate/abc.crt', 'keyfile': '/path/to/certificate/abc.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router1', 'ports': ['80:80', '443:443']}, {'name': 'router2', 'certificate': {'certfile': '/path/to/certificate/xyz.crt', 'keyfile': '/path/to/certificate/xyz.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [{'action': 'append', 'key': 'spec.template.spec.containers[0].env', 'value': {'name': 'ROUTE_LABELS', 'value': 'route=external'}}], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router2', 'ports': ['80:80', '443:443']}] - -# OpenShift Registry Console Options -# Override the console image prefix: -# origin default is "cockpit/", enterprise default is "openshift3/" -#openshift_cockpit_deployer_prefix=registry.example.com/myrepo/ -# origin default is "kubernetes", enterprise default is "registry-console" -#openshift_cockpit_deployer_basename=my-console -# Override image version, defaults to latest for origin, vX.Y product version for enterprise -#openshift_cockpit_deployer_version=1.4.1 - -# Openshift Registry Options -# -# An OpenShift registry will be created during install if there are -# nodes present with labels matching the default registry selector, -# "node-role.kubernetes.io/infra=true". -# -# Example: -# [nodes] -# node.example.com openshift_node_group_name="node-config-infra" -# -# Registry selector (optional) -# Registry will only be created if nodes matching this label are present. -# Default value: 'node-role.kubernetes.io/infra=true' -#openshift_hosted_registry_selector='node-role.kubernetes.io/infra=true' -# -# Registry replicas (optional) -# Unless specified, openshift-ansible will calculate the replica count -# based on the number of nodes matching the openshift registry selector. -#openshift_hosted_registry_replicas=2 -# -# Validity of the auto-generated certificate in days (optional) -#openshift_hosted_registry_cert_expire_days=730 -# -# Manage the OpenShift Registry (optional) -#openshift_hosted_manage_registry=true -# Manage the OpenShift Registry Console (optional) -#openshift_hosted_manage_registry_console=true -# -# Registry Storage Options -# -# NFS Host Group -# An NFS volume will be created with path "nfs_directory/volume_name" -# on the host within the [nfs] host group. For example, the volume -# path using these options would be "/exports/registry". "exports" is -# is the name of the export served by the nfs server. "registry" is -# the name of a directory inside of "/exports". -#openshift_hosted_registry_storage_kind=nfs -#openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] -# nfs_directory must conform to DNS-1123 subdomain must consist of lower case -# alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character -#openshift_hosted_registry_storage_nfs_directory=/exports -#openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)' -#openshift_hosted_registry_storage_volume_name=registry -#openshift_hosted_registry_storage_volume_size=10Gi -# -# External NFS Host -# NFS volume must already exist with path "nfs_directory/_volume_name" on -# the storage_host. For example, the remote volume path using these -# options would be "nfs.example.com:/exports/registry". "exports" is -# is the name of the export served by the nfs server. "registry" is -# the name of a directory inside of "/exports". -#openshift_hosted_registry_storage_kind=nfs -#openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] -#openshift_hosted_registry_storage_host=nfs.example.com -# nfs_directory must conform to DNS-1123 subdomain must consist of lower case -# alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character -#openshift_hosted_registry_storage_nfs_directory=/exports -#openshift_hosted_registry_storage_volume_name=registry -#openshift_hosted_registry_storage_volume_size=10Gi -# -# Openstack -# Volume must already exist. -#openshift_hosted_registry_storage_kind=openstack -#openshift_hosted_registry_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_registry_storage_openstack_filesystem=ext4 -#openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57 -#openshift_hosted_registry_storage_volume_size=10Gi -# -# hostPath (local filesystem storage) -# Suitable for "all-in-one" or proof of concept deployments -# Must not be used for high-availability and production deployments -#openshift_hosted_registry_storage_kind=hostpath -#openshift_hosted_registry_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_registry_storage_hostpath_path=/var/lib/openshift_volumes -#openshift_hosted_registry_storage_volume_size=10Gi -# -# AWS S3 -# S3 bucket must already exist. -#openshift_hosted_registry_storage_kind=object -#openshift_hosted_registry_storage_provider=s3 -#openshift_hosted_registry_storage_s3_encrypt=false -#openshift_hosted_registry_storage_s3_kmskeyid=aws_kms_key_id -#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id -#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key -#openshift_hosted_registry_storage_s3_bucket=bucket_name -#openshift_hosted_registry_storage_s3_region=bucket_region -#openshift_hosted_registry_storage_s3_chunksize=26214400 -#openshift_hosted_registry_storage_s3_rootdirectory=/registry -#openshift_hosted_registry_pullthrough=true -#openshift_hosted_registry_acceptschema2=true -#openshift_hosted_registry_enforcequota=true -# -# Any S3 service (Minio, ExoScale, ...): Basically the same as above -# but with regionendpoint configured -# S3 bucket must already exist. -#openshift_hosted_registry_storage_kind=object -#openshift_hosted_registry_storage_provider=s3 -#openshift_hosted_registry_storage_s3_accesskey=access_key_id -#openshift_hosted_registry_storage_s3_secretkey=secret_access_key -#openshift_hosted_registry_storage_s3_regionendpoint=https://myendpoint.example.com/ -#openshift_hosted_registry_storage_s3_bucket=bucket_name -#openshift_hosted_registry_storage_s3_region=bucket_region -#openshift_hosted_registry_storage_s3_chunksize=26214400 -#openshift_hosted_registry_storage_s3_rootdirectory=/registry -#openshift_hosted_registry_pullthrough=true -#openshift_hosted_registry_acceptschema2=true -#openshift_hosted_registry_enforcequota=true -# -# Additional CloudFront Options. When using CloudFront all three -# of the followingg variables must be defined. -#openshift_hosted_registry_storage_s3_cloudfront_baseurl=https://myendpoint.cloudfront.net/ -#openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile=/full/path/to/secret.pem -#openshift_hosted_registry_storage_s3_cloudfront_keypairid=yourpairid -# vSphere Volume with vSphere Cloud Provider -# openshift_hosted_registry_storage_kind=vsphere -# openshift_hosted_registry_storage_access_modes=['ReadWriteOnce'] -# openshift_hosted_registry_storage_annotations=['volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/vsphere-volume'] -# -# GCS Storage Bucket -#openshift_hosted_registry_storage_provider=gcs -#openshift_hosted_registry_storage_gcs_bucket=bucket01 -#openshift_hosted_registry_storage_gcs_keyfile=test.key -#openshift_hosted_registry_storage_gcs_rootdirectory=/registry - -# Metrics deployment -# See: https://docs.openshift.com/container-platform/latest/install_config/cluster_metrics.html -# -# By default metrics are not automatically deployed, set this to enable them -#openshift_metrics_install_metrics=true -# -# metrics-server deployment -# By default, metrics-server is not automatically deployed, unless metrics is also -# deployed. Deploying metrics-server is necessary to use the HorizontalPodAutoscaler. -# Set this to enable it. -#openshift_metrics_server_install=true -# -# Storage Options -# If openshift_metrics_storage_kind is unset then metrics will be stored -# in an EmptyDir volume and will be deleted when the cassandra pod terminates. -# Storage options A & B currently support only one cassandra pod which is -# generally enough for up to 1000 pods. Additional volumes can be created -# manually after the fact and metrics scaled per the docs. -# -# Option A - NFS Host Group -# An NFS volume will be created with path "nfs_directory/volume_name" -# on the host within the [nfs] host group. For example, the volume -# path using these options would be "/exports/metrics". "exports" is -# is the name of the export served by the nfs server. "metrics" is -# the name of a directory inside of "/exports". -#openshift_metrics_storage_kind=nfs -#openshift_metrics_storage_access_modes=['ReadWriteOnce'] -#openshift_metrics_storage_nfs_directory=/exports -#openshift_metrics_storage_nfs_options='*(rw,root_squash)' -#openshift_metrics_storage_volume_name=metrics -#openshift_metrics_storage_volume_size=10Gi -#openshift_metrics_storage_labels={'storage': 'metrics'} -# -# Option B - External NFS Host -# NFS volume must already exist with path "nfs_directory/_volume_name" on -# the storage_host. For example, the remote volume path using these -# options would be "nfs.example.com:/exports/metrics". "exports" is -# is the name of the export served by the nfs server. "metrics" is -# the name of a directory inside of "/exports". -#openshift_metrics_storage_kind=nfs -#openshift_metrics_storage_access_modes=['ReadWriteOnce'] -#openshift_metrics_storage_host=nfs.example.com -#openshift_metrics_storage_nfs_directory=/exports -#openshift_metrics_storage_volume_name=metrics -#openshift_metrics_storage_volume_size=10Gi -#openshift_metrics_storage_labels={'storage': 'metrics'} -# -# Option C - Dynamic -- If openshift supports dynamic volume provisioning for -# your cloud platform use this. -#openshift_metrics_storage_kind=dynamic -# -# Other Metrics Options -- Common items you may wish to reconfigure, for the complete -# list of options please see roles/openshift_metrics/README.md -# -# Override metricsPublicURL in the master config for cluster metrics -# Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics -# Currently, you may only alter the hostname portion of the url, alterting the -# `/hawkular/metrics` path will break installation of metrics. -#openshift_metrics_hawkular_hostname=hawkular-metrics.example.com -# Configure the metrics component images # Note, these will be modified by oreg_url by default -#openshift_metrics_cassandra_image="docker.io/openshift/origin-metrics-cassandra:{{ openshift_image_tag }}" -#openshift_metrics_hawkular_agent_image="docker.io/openshift/origin-metrics-hawkular-openshift-agent:{{ openshift_image_tag }}" -#openshift_metrics_hawkular_metrics_image="docker.io/openshift/origin-metrics-hawkular-metrics:{{ openshift_image_tag }}" -#openshift_metrics_schema_installer_image="docker.io/openshift/origin-metrics-schema-installer:{{ openshift_image_tag }}" -#openshift_metrics_heapster_image="docker.io/openshift/origin-metrics-heapster:{{ openshift_image_tag }}" -# when openshift_deployment_type=='openshift-enterprise' -#openshift_metrics_cassandra_image="registry.redhat.io/openshift3/metrics-cassandra:{{ openshift_image_tag }}" -#openshift_metrics_hawkular_agent_image="registry.redhat.io/openshift3/metrics-hawkular-openshift-agent:{{ openshift_image_tag }}" -#openshift_metrics_hawkular_metrics_image="registry.redhat.io/openshift3/metrics-hawkular-metrics:{{ openshift_image_tag }}" -#openshift_metrics_schema_installer_image="registry.redhat.io/openshift3/metrics-schema-installer:{{ openshift_image_tag }}" -#openshift_metrics_heapster_image="registry.redhat.io/openshift3/metrics-heapster:{{ openshift_image_tag }}" -# -# StorageClass -# openshift_storageclass_name=gp2 -# openshift_storageclass_parameters={'type': 'gp2', 'encrypted': 'false'} -# openshift_storageclass_mount_options=['dir_mode=0777', 'file_mode=0777'] -# openshift_storageclass_reclaim_policy="Delete" -# -# PersistentLocalStorage -# If Persistent Local Storage is wanted, this boolean can be defined to True. -# This will create all necessary configuration to use persistent storage on nodes. -#openshift_persistentlocalstorage_enabled=False -#openshift_persistentlocalstorage_classes=[] -#openshift_persistentlocalstorage_path=/mnt/local-storage -#openshift_persistentlocalstorage_provisionner_image=quay.io/external_storage/local-volume-provisioner:v1.0.1 - -# Cluster monitoring -# -# Cluster monitoring is enabled by default, disable it by setting -# openshift_cluster_monitoring_operator_install=false -# -# Cluster monitoring configuration variables allow setting the amount of -# storage requested through PersistentVolumeClaims. -# -# openshift_cluster_monitoring_operator_prometheus_storage_capacity="50Gi" -# openshift_cluster_monitoring_operator_alertmanager_storage_capacity="2Gi" - -# Logging deployment -# -# Currently logging deployment is disabled by default, enable it by setting this -#openshift_logging_install_logging=true -# -# Logging storage config -# Option A - NFS Host Group -# An NFS volume will be created with path "nfs_directory/volume_name" -# on the host within the [nfs] host group. For example, the volume -# path using these options would be "/exports/logging". "exports" is -# is the name of the export served by the nfs server. "logging" is -# the name of a directory inside of "/exports". -#openshift_logging_storage_kind=nfs -#openshift_logging_storage_access_modes=['ReadWriteOnce'] -#openshift_logging_storage_nfs_directory=/exports -#openshift_logging_storage_nfs_options='*(rw,root_squash)' -#openshift_logging_storage_volume_name=logging -#openshift_logging_storage_volume_size=10Gi -#openshift_logging_storage_labels={'storage': 'logging'} -# -# Option B - External NFS Host -# NFS volume must already exist with path "nfs_directory/_volume_name" on -# the storage_host. For example, the remote volume path using these -# options would be "nfs.example.com:/exports/logging". "exports" is -# is the name of the export served by the nfs server. "logging" is -# the name of a directory inside of "/exports". -#openshift_logging_storage_kind=nfs -#openshift_logging_storage_access_modes=['ReadWriteOnce'] -#openshift_logging_storage_host=nfs.example.com -#openshift_logging_storage_nfs_directory=/exports -#openshift_logging_storage_volume_name=logging -#openshift_logging_storage_volume_size=10Gi -#openshift_logging_storage_labels={'storage': 'logging'} -# -# Option C - Dynamic -- If openshift supports dynamic volume provisioning for -# your cloud platform use this. -#openshift_logging_storage_kind=dynamic -# -# Option D - none -- Logging will use emptydir volumes which are destroyed when -# pods are deleted -# -# Other Logging Options -- Common items you may wish to reconfigure, for the complete -# list of options please see roles/openshift_logging/README.md -# -# Configure loggingPublicURL in the master config for aggregate logging, defaults -# to kibana.{{ openshift_master_default_subdomain }} -#openshift_logging_kibana_hostname=logging.apps.example.com -# Configure the number of elastic search nodes, unless you're using dynamic provisioning -# this value must be 1 -#openshift_logging_es_cluster_size=1 - -# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') -# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' - -# Disable the OpenShift SDN plugin -# openshift_use_openshift_sdn=False - -# Configure SDN cluster network and kubernetes service CIDR blocks. These -# network blocks should be private and should not conflict with network blocks -# in your infrastructure that pods may require access to. Can not be changed -# after deployment. -# -# WARNING : Do not pick subnets that overlap with the default Docker bridge subnet of -# 172.17.0.0/16. Your installation will fail and/or your configuration change will -# cause the Pod SDN or Cluster SDN to fail. -# -# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting -# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS -# environment variable located in /etc/sysconfig/docker-network. -# When upgrading or scaling up the following must match whats in your master config! -# Inventory: master yaml field -# osm_cluster_network_cidr: clusterNetworkCIDR -# openshift_portal_net: serviceNetworkCIDR -# When installing osm_cluster_network_cidr and openshift_portal_net must be set. -# Sane examples are provided below. -#osm_cluster_network_cidr=10.128.0.0/14 -#openshift_portal_net=172.30.0.0/16 - -# ExternalIPNetworkCIDRs controls what values are acceptable for the -# service external IP field. If empty, no externalIP may be set. It -# may contain a list of CIDRs which are checked for access. If a CIDR -# is prefixed with !, IPs in that CIDR will be rejected. Rejections -# will be applied first, then the IP checked against one of the -# allowed CIDRs. You should ensure this range does not overlap with -# your nodes, pods, or service CIDRs for security reasons. -#openshift_master_external_ip_network_cidrs=['0.0.0.0/0'] - -# IngressIPNetworkCIDR controls the range to assign ingress IPs from for -# services of type LoadBalancer on bare metal. If empty, ingress IPs will not -# be assigned. It may contain a single CIDR that will be allocated from. For -# security reasons, you should ensure that this range does not overlap with -# the CIDRs reserved for external IPs, nodes, pods, or services. -#openshift_master_ingress_ip_network_cidr=172.46.0.0/16 - -# Configure number of bits to allocate to each host's subnet e.g. 9 -# would mean a /23 network on the host. -# When upgrading or scaling up the following must match whats in your master config! -# Inventory: master yaml field -# osm_host_subnet_length: hostSubnetLength -# When installing osm_host_subnet_length must be set. A sane example is provided below. -#osm_host_subnet_length=9 - -# Configure master API and console ports. -#openshift_master_api_port=8443 -#openshift_master_console_port=8443 - -# set exact RPM version (include - prefix) -#openshift_pkg_version=-3.11.0 -# you may also specify version and release, ie: -#openshift_pkg_version=-3.11.0-0.126.0.git.0.9351aae.el7 - -# Configure custom ca certificate -#openshift_master_ca_certificate={'certfile': '/path/to/ca.crt', 'keyfile': '/path/to/ca.key'} -# -# NOTE: CA certificate will not be replaced with existing clusters. -# This option may only be specified when creating a new cluster or -# when redeploying cluster certificates with the redeploy-certificates -# playbook. - -# Configure custom named certificates (SNI certificates) -# -# https://docs.okd.io/latest/install_config/certificate_customization.html -# https://docs.openshift.com/container-platform/latest/install_config/certificate_customization.html -# -# NOTE: openshift_master_named_certificates is cached on masters and is an -# additive fact, meaning that each run with a different set of certificates -# will add the newly provided certificates to the cached set of certificates. -# -# An optional CA may be specified for each named certificate. CAs will -# be added to the OpenShift CA bundle which allows for the named -# certificate to be served for internal cluster communication. -# -# If you would like openshift_master_named_certificates to be overwritten with -# the provided value, specify openshift_master_overwrite_named_certificates. -#openshift_master_overwrite_named_certificates=true -# -# Provide local certificate paths which will be deployed to masters -#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "cafile": "/path/to/custom-ca1.crt"}] -# -# Detected names may be overridden by specifying the "names" key -#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"], "cafile": "/path/to/custom-ca1.crt"}] -# -# Add a trusted CA to all pods, copies from the control host, may be multiple -# certs in one file -#openshift_additional_ca=/path/to/additional-ca.crt - -# Session options -#openshift_master_session_name=ssn -#openshift_master_session_max_seconds=3600 - -# An authentication and encryption secret will be generated if secrets -# are not provided. If provided, openshift_master_session_auth_secrets -# and openshift_master_encryption_secrets must be equal length. -# -# Signing secrets, used to authenticate sessions using -# HMAC. Recommended to use secrets with 32 or 64 bytes. -#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] -# -# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32 -# characters long, to select AES-128, AES-192, or AES-256. -#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] - -# configure how often node iptables rules are refreshed -#openshift_node_iptables_sync_period=5s - -# Configure nodeIP in the node config -# This is needed in cases where node traffic is desired to go over an -# interface other than the default network interface. -#openshift_set_node_ip=True - -#openshift_node_kubelet_args is deprecated, use node config edits instead - -# Configure logrotate scripts -# See: https://github.com/nickhammond/ansible-logrotate -#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}] - -# The OpenShift-Ansible installer will fail when it detects that the -# value of openshift_kubelet_name_override resolves to an IP address not bound to any local -# interfaces. This mis-configuration is problematic for any pod leveraging host -# networking and liveness or readiness probes. -# Setting this variable to false will override that check. -#openshift_hostname_check=true - -# Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf -# This is useful for POC environments where DNS may not actually be available yet or to set -# options like 'strict-order' to alter dnsmasq configuration. -#openshift_node_dnsmasq_additional_config_file=/home/bob/ose-dnsmasq.conf - -# Global Proxy Configuration -# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment -# variables for docker and master services. -# -# Hosts in the openshift_no_proxy list will NOT use any globally -# configured HTTP(S)_PROXYs. openshift_no_proxy accepts domains -# (.example.com), hosts (example.com), and IP addresses. -#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT -#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT -#openshift_no_proxy='.hosts.example.com,some-host.com' -# -# Most environments don't require a proxy between openshift masters, nodes, and -# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list. -# If all of your hosts share a common domain you may wish to disable this and -# specify that domain above instead. -# -# For example, having hosts with FQDNs: m1.ex.com, n1.ex.com, and -# n2.ex.com, one would simply add '.ex.com' to the openshift_no_proxy -# variable (above) and set this value to False -#openshift_generate_no_proxy_hosts=True -# -# These options configure the BuildDefaults admission controller which injects -# configuration into Builds. Proxy related values will default to the global proxy -# config values. You only need to set these if they differ from the global proxy settings. -# See BuildDefaults documentation at -# https://docs.okd.io/latest/admin_guide/build_defaults_overrides.html -#openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT -#openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT -#openshift_builddefaults_no_proxy=mycorp.com -#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT -#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT -#openshift_builddefaults_git_no_proxy=mycorp.com -#openshift_builddefaults_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}] -#openshift_builddefaults_nodeselectors={'nodelabel1':'nodelabelvalue1'} -#openshift_builddefaults_annotations={'annotationkey1':'annotationvalue1'} -#openshift_builddefaults_resources_requests_cpu=100m -#openshift_builddefaults_resources_requests_memory=256Mi -#openshift_builddefaults_resources_limits_cpu=1000m -#openshift_builddefaults_resources_limits_memory=512Mi - -# Or you may optionally define your own build defaults configuration serialized as json -#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","gitNoProxy":"ose3-master.example.com","kind":"BuildDefaultsConfig"}}}' - -# These options configure the BuildOverrides admission controller which injects -# configuration into Builds. -# See BuildOverrides documentation at -# https://docs.okd.io/latest/admin_guide/build_defaults_overrides.html -#openshift_buildoverrides_force_pull=true -#openshift_buildoverrides_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}] -#openshift_buildoverrides_nodeselectors={'nodelabel1':'nodelabelvalue1'} -#openshift_buildoverrides_annotations={'annotationkey1':'annotationvalue1'} -#openshift_buildoverrides_tolerations=[{'key':'mykey1','value':'myvalue1','effect':'NoSchedule','operator':'Equal'}] - -# Or you may optionally define your own build overrides configuration serialized as json -#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}' - -# Enable service catalog -#openshift_enable_service_catalog=true - -# Enable template service broker (requires service catalog to be enabled, above) -#template_service_broker_install=true - -# Specify an openshift_service_catalog image -# (defaults for origin and openshift-enterprise, repsectively) -#openshift_service_catalog_image="docker.io/openshift/origin-service-catalog:{{ openshift_image_tag }}"" -#openshift_service_catalog_image="registry.redhat.io/openshift3/ose-service-catalog:{{ openshift_image_tag }}" - -# Configure one of more namespaces whose templates will be served by the TSB -#openshift_template_service_broker_namespaces=['openshift'] - -# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default -#openshift_master_dynamic_provisioning_enabled=True - -# Admission plugin config -#openshift_master_admission_plugin_config={"ProjectRequestLimit":{"configuration":{"apiVersion":"v1","kind":"ProjectRequestLimitConfig","limits":[{"selector":{"admin":"true"}},{"maxProjects":"1"}]}},"PodNodeConstraints":{"configuration":{"apiVersion":"v1","kind":"PodNodeConstraintsConfig"}}} - -# Configure usage of openshift_clock role. -#openshift_clock_enabled=true - -# OpenShift Per-Service Environment Variables -# Environment variables are added to /etc/sysconfig files for -# each OpenShift node. -# API and controllers environment variables are merged in single -# master environments. -#openshift_node_env_vars={"ENABLE_HTTP2": "true"} - -# Enable API service auditing -#openshift_master_audit_config={"enabled": "true"} -# -# In case you want more advanced setup for the auditlog you can -# use this line. -# The directory in "auditFilePath" will be created if it's not -# exist -#openshift_master_audit_config={"enabled": "true", "auditFilePath": "/var/lib/origin/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": "14", "maximumFileSizeMegabytes": "500", "maximumRetainedFiles": "5"} - -# Enable origin repos that point at Centos PAAS SIG, defaults to true, only used -# by openshift_deployment_type=origin -#openshift_enable_origin_repo=false - -# Validity of the auto-generated OpenShift certificates in days. -# See also openshift_hosted_registry_cert_expire_days above. -# -#openshift_ca_cert_expire_days=1825 -#openshift_node_cert_expire_days=730 -#openshift_master_cert_expire_days=730 - -# Validity of the auto-generated external etcd certificates in days. -# Controls validity for etcd CA, peer, server and client certificates. -# -#etcd_ca_default_days=1825 -# -# ServiceAccountConfig:LimitSecretRefences rejects pods that reference secrets their service accounts do not reference -# openshift_master_saconfig_limitsecretreferences=false - -# Upgrade Control -# -# By default nodes are upgraded in a serial manner one at a time and all failures -# are fatal, one set of variables for normal nodes, one set of variables for -# nodes that are part of control plane as the number of hosts may be different -# in those two groups. -#openshift_upgrade_nodes_serial=1 -#openshift_upgrade_nodes_max_fail_percentage=0 -#openshift_upgrade_control_plane_nodes_serial=1 -#openshift_upgrade_control_plane_nodes_max_fail_percentage=0 -# -# You can specify the number of nodes to upgrade at once. We do not currently -# attempt to verify that you have capacity to drain this many nodes at once -# so please be careful when specifying these values. You should also verify that -# the expected number of nodes are all schedulable and ready before starting an -# upgrade. If it's not possible to drain the requested nodes the upgrade will -# stall indefinitely until the drain is successful. -# -# If you're upgrading more than one node at a time you can specify the maximum -# percentage of failure within the batch before the upgrade is aborted. Any -# nodes that do fail are ignored for the rest of the playbook run and you should -# take care to investigate the failure and return the node to service so that -# your cluster. -# -# The percentage must exceed the value, this would fail on two failures -# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49 -# where as this would not -# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50 -# -# A timeout to wait for nodes to drain pods can be specified to ensure that the -# upgrade continues even if nodes fail to drain pods in the allowed time. The -# default value of 0 will wait indefinitely allowing the admin to investigate -# the root cause and ensuring that disruption budgets are respected. If the -# a timeout of 0 is used there will also be one attempt to re-try draining the -# node. If a non zero timeout is specified there will be no attempt to retry. -#openshift_upgrade_nodes_drain_timeout=0 -# -# Multiple data migrations take place and if they fail they will fail the upgrade -# You may wish to disable these or make them non fatal -# -# openshift_upgrade_pre_storage_migration_enabled=true -# openshift_upgrade_pre_storage_migration_fatal=true -# openshift_upgrade_post_storage_migration_enabled=true -# openshift_upgrade_post_storage_migration_fatal=false - -###################################################################### -# CloudForms/ManageIQ (CFME/MIQ) Configuration - -# See the readme for full descriptions and getting started -# instructions: ../../roles/openshift_management/README.md or go directly to -# their definitions: ../../roles/openshift_management/defaults/main.yml -# ../../roles/openshift_management/vars/main.yml -# -# Namespace for the CFME project -#openshift_management_project: openshift-management - -# Namespace/project description -#openshift_management_project_description: CloudForms Management Engine - -# Choose 'miq-template' for a podified database install -# Choose 'miq-template-ext-db' for an external database install -# -# If you are using the miq-template-ext-db template then you must add -# the required database parameters to the -# openshift_management_template_parameters variable. -#openshift_management_app_template: miq-template - -# Allowed options: nfs, nfs_external, preconfigured, cloudprovider. -#openshift_management_storage_class: nfs - -# [OPTIONAL] - If you are using an EXTERNAL NFS server, such as a -# netapp appliance, then you must set the hostname here. Leave the -# value as 'false' if you are not using external NFS. -#openshift_management_storage_nfs_external_hostname: false - -# [OPTIONAL] - If you are using external NFS then you must set the base -# path to the exports location here. -# -# Additionally: EXTERNAL NFS REQUIRES that YOU CREATE the nfs exports -# that will back the application PV and optionally the database -# pv. Export path definitions, relative to -# {{ openshift_management_storage_nfs_base_dir }} -# -# LOCAL NFS NOTE: -# -# You may also change this value if you want to change the default -# path used for local NFS exports. -#openshift_management_storage_nfs_base_dir: /exports - -# LOCAL NFS NOTE: -# -# You may override the automatically selected LOCAL NFS server by -# setting this variable. Useful for testing specific task files. -#openshift_management_storage_nfs_local_hostname: false - -# These are the default values for the username and password of the -# management app. Changing these values in your inventory will not -# change your username or password. You should only need to change -# these values in your inventory if you already changed the actual -# name and password AND are trying to use integration scripts. -# -# For example, adding this cluster as a container provider, -# playbooks/openshift-management/add_container_provider.yml -#openshift_management_username: admin -#openshift_management_password: smartvm - -# A hash of parameters you want to override or set in the -# miq-template.yaml or miq-template-ext-db.yaml templates. Set this in -# your inventory file as a simple hash. Acceptable values are defined -# under the .parameters list in files/miq-template{-ext-db}.yaml -# Example: -# -# openshift_management_template_parameters={'APPLICATION_MEM_REQ': '512Mi'} -#openshift_management_template_parameters: {} - -# Firewall configuration -# You can open additional firewall ports by defining them as a list. of service -# names and ports/port ranges for either masters or nodes. -#openshift_master_open_ports=[{"service":"svc1","port":"11/tcp"}] -#openshift_node_open_ports=[{"service":"svc2","port":"12-13/tcp"},{"service":"svc3","port":"14/udp"}] - -# Service port node range -#openshift_node_port_range=30000-32767 - -# Enable unsupported configurations, things that will yield a partially -# functioning cluster but would not be supported for production use -#openshift_enable_unsupported_configurations=false +[new_workers] +mycluster-worker-0.example.com +mycluster-worker-1.example.com +mycluster-worker-2.example.com diff --git a/inventory/hosts.glusterfs.external.example b/inventory/hosts.glusterfs.external.example deleted file mode 100644 index e718e3280..000000000 --- a/inventory/hosts.glusterfs.external.example +++ /dev/null @@ -1,61 +0,0 @@ -# This is an example of an OpenShift-Ansible host inventory for a cluster -# with natively hosted, containerized GlusterFS storage. -# -# This inventory may be used with the deploy_cluster.yml playbook to deploy a new -# cluster with GlusterFS storage, which will use that storage to create a -# volume that will provide backend storage for a hosted Docker registry. -# -# This inventory may also be used with openshift-glusterfs/config.yml to -# deploy GlusterFS storage on an existing cluster. With this playbook, the -# registry backend volume will be created but the administrator must then -# either deploy a hosted registry or change an existing hosted registry to use -# that volume. -# -# There are additional configuration parameters that can be specified to -# control the deployment and state of a GlusterFS cluster. Please see the -# documentation in playbooks/openshift-glusterfs/README.md and -# roles/openshift_storage_glusterfs/README.md for additional details. - -[OSEv3:children] -masters -nodes -etcd -# Specify there will be GlusterFS nodes -glusterfs - -[OSEv3:vars] -ansible_ssh_user=root -openshift_deployment_type=origin -# Specify that we want to use an external GlusterFS cluster -openshift_storage_glusterfs_is_native=False -# Specify the IP address or hostname of the external heketi service -openshift_storage_glusterfs_heketi_url=172.0.0.1 - -[masters] -master - -[nodes] -# masters should be schedulable to run web console pods -master openshift_schedulable=True -node0 openshift_schedulable=True -node1 openshift_schedulable=True -node2 openshift_schedulable=True - -[etcd] -master - -# Specify the glusterfs group, which contains the nodes of the external -# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname" -# and "glusterfs_devices" variables defined. -# -# The first variable indicates the hostname of the external GLusterFS node, -# and must be reachable by the external heketi service. -# -# The second variable is a list of block devices the node will have access to -# that are intended solely for use as GlusterFS storage. These block devices -# must be bare (e.g. have no data, not be marked as LVM PVs), and will be -# formatted. -[glusterfs] -node0.local glusterfs_ip='172.0.0.10' glusterfs_devices='[ "/dev/vdb" ]' -node1.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdb", "/dev/vdc" ]' -node2.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdd" ]' diff --git a/inventory/hosts.glusterfs.mixed.example b/inventory/hosts.glusterfs.mixed.example deleted file mode 100644 index b2fc00c58..000000000 --- a/inventory/hosts.glusterfs.mixed.example +++ /dev/null @@ -1,64 +0,0 @@ -# This is an example of an OpenShift-Ansible host inventory for a cluster -# with natively hosted, containerized GlusterFS storage. -# -# This inventory may be used with the deploy_cluster.yml playbook to deploy a new -# cluster with GlusterFS storage, which will use that storage to create a -# volume that will provide backend storage for a hosted Docker registry. -# -# This inventory may also be used with openshift-glusterfs/config.yml to -# deploy GlusterFS storage on an existing cluster. With this playbook, the -# registry backend volume will be created but the administrator must then -# either deploy a hosted registry or change an existing hosted registry to use -# that volume. -# -# There are additional configuration parameters that can be specified to -# control the deployment and state of a GlusterFS cluster. Please see the -# documentation in playbooks/openshift-glusterfs/README.md and -# roles/openshift_storage_glusterfs/README.md for additional details. - -[OSEv3:children] -masters -nodes -etcd -# Specify there will be GlusterFS nodes -glusterfs - -[OSEv3:vars] -ansible_ssh_user=root -openshift_deployment_type=origin -# Specify that we want to use an external GlusterFS cluster and a native -# heketi service -openshift_storage_glusterfs_is_native=False -openshift_storage_glusterfs_heketi_is_native=True -# Specify that heketi will use SSH to communicate to the GlusterFS nodes and -# the private key file it will use for authentication -openshift_storage_glusterfs_heketi_executor=ssh -openshift_storage_glusterfs_heketi_ssh_keyfile=/root/id_rsa -[masters] -master - -[nodes] -# masters should be schedulable to run web console pods -master openshift_schedulable=True -node0 openshift_schedulable=True -node1 openshift_schedulable=True -node2 openshift_schedulable=True - -[etcd] -master - -# Specify the glusterfs group, which contains the nodes of the external -# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname" -# and "glusterfs_devices" variables defined. -# -# The first variable indicates the hostname of the external GLusterFS node, -# and must be reachable by the external heketi service. -# -# The second variable is a list of block devices the node will have access to -# that are intended solely for use as GlusterFS storage. These block devices -# must be bare (e.g. have no data, not be marked as LVM PVs), and will be -# formatted. -[glusterfs] -node0.local glusterfs_ip='172.0.0.10' glusterfs_devices='[ "/dev/vdb" ]' -node1.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdb", "/dev/vdc" ]' -node2.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdd" ]' diff --git a/inventory/hosts.glusterfs.native.example b/inventory/hosts.glusterfs.native.example deleted file mode 100644 index c4178d4b4..000000000 --- a/inventory/hosts.glusterfs.native.example +++ /dev/null @@ -1,51 +0,0 @@ -# This is an example of an OpenShift-Ansible host inventory for a cluster -# with natively hosted, containerized GlusterFS storage for applications. It -# will also automatically create a StorageClass for this purpose. -# -# This inventory may be used with the deploy_cluster.yml playbook to deploy a new -# cluster with GlusterFS storage. -# -# This inventory may also be used with openshift-glusterfs/config.yml to -# deploy GlusterFS storage on an existing cluster. -# -# There are additional configuration parameters that can be specified to -# control the deployment and state of a GlusterFS cluster. Please see the -# documentation in playbooks/openshift-glusterfs/README.md and -# roles/openshift_storage_glusterfs/README.md for additional details. - -[OSEv3:children] -masters -nodes -etcd -# Specify there will be GlusterFS nodes -glusterfs - -[OSEv3:vars] -ansible_ssh_user=root -openshift_deployment_type=origin - -[masters] -master - -[nodes] -# masters should be schedulable to run web console pods -master openshift_schedulable=True -# A hosted registry, by default, will only be deployed on nodes labeled -# "node-role.kubernetes.io/infra=true". -node0 openshift_schedulable=True -node1 openshift_schedulable=True -node2 openshift_schedulable=True - -[etcd] -master - -# Specify the glusterfs group, which contains the nodes that will host -# GlusterFS storage pods. At a minimum, each node must have a -# "glusterfs_devices" variable defined. This variable is a list of block -# devices the node will have access to that is intended solely for use as -# GlusterFS storage. These block devices must be bare (e.g. have no data, not -# be marked as LVM PVs), and will be formatted. -[glusterfs] -node0 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' -node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' -node2 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' diff --git a/inventory/hosts.glusterfs.registry-only.example b/inventory/hosts.glusterfs.registry-only.example deleted file mode 100644 index 4c8ecd97f..000000000 --- a/inventory/hosts.glusterfs.registry-only.example +++ /dev/null @@ -1,57 +0,0 @@ -# This is an example of an OpenShift-Ansible host inventory for a cluster -# with natively hosted, containerized GlusterFS storage for exclusive use -# as storage for a natively hosted Docker registry. -# -# This inventory may be used with the deploy_cluster.yml playbook to deploy a new -# cluster with GlusterFS storage, which will use that storage to create a -# volume that will provide backend storage for a hosted Docker registry. -# -# This inventory may also be used with openshift-glusterfs/registry.yml to -# deploy GlusterFS storage on an existing cluster. With this playbook, the -# registry backend volume will be created but the administrator must then -# either deploy a hosted registry or change an existing hosted registry to use -# that volume. -# -# There are additional configuration parameters that can be specified to -# control the deployment and state of a GlusterFS cluster. Please see the -# documentation in playbooks/openshift-glusterfs/README.md and -# roles/openshift_storage_glusterfs/README.md for additional details. - -[OSEv3:children] -masters -nodes -etcd -# Specify there will be GlusterFS nodes -glusterfs_registry - -[OSEv3:vars] -ansible_ssh_user=root -openshift_deployment_type=origin -# Specify that we want to use GlusterFS storage for a hosted registry -openshift_hosted_registry_storage_kind=glusterfs - -[masters] -master openshift_node_group_name="node-config-master" - -[nodes] -# masters should be schedulable to run web console pods -master openshift_schedulable=True -# A hosted registry, by default, will only be deployed on nodes labeled -# "node-role.kubernetes.io/infra=true". -node0 openshift_node_group_name="node-config-infra" -node1 openshift_node_group_name="node-config-infra" -node2 openshift_node_group_name="node-config-infra" - -[etcd] -master - -# Specify the glusterfs group, which contains the nodes that will host -# GlusterFS storage pods. At a minimum, each node must have a -# "glusterfs_devices" variable defined. This variable is a list of block -# devices the node will have access to that is intended solely for use as -# GlusterFS storage. These block devices must be bare (e.g. have no data, not -# be marked as LVM PVs), and will be formatted. -[glusterfs_registry] -node0 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' -node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' -node2 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' diff --git a/inventory/hosts.glusterfs.storage-and-registry.example b/inventory/hosts.glusterfs.storage-and-registry.example deleted file mode 100644 index cabd8730b..000000000 --- a/inventory/hosts.glusterfs.storage-and-registry.example +++ /dev/null @@ -1,68 +0,0 @@ -# This is an example of an OpenShift-Ansible host inventory for a cluster -# with natively hosted, containerized GlusterFS storage for both general -# application use and a natively hosted Docker registry. It will also create a -# StorageClass for the general storage. -# -# This inventory may be used with the deploy_cluster.yml playbook to deploy a new -# cluster with GlusterFS storage. -# -# This inventory may also be used with openshift-glusterfs/config.yml to -# deploy GlusterFS storage on an existing cluster. With this playbook, the -# registry backend volume will be created but the administrator must then -# either deploy a hosted registry or change an existing hosted registry to use -# that volume. -# -# There are additional configuration parameters that can be specified to -# control the deployment and state of a GlusterFS cluster. Please see the -# documentation in playbooks/openshift-glusterfs/README.md and -# roles/openshift_storage_glusterfs/README.md for additional details. - -[OSEv3:children] -masters -nodes -etcd -# Specify there will be GlusterFS nodes -glusterfs -glusterfs_registry - -[OSEv3:vars] -ansible_ssh_user=root -openshift_deployment_type=origin -# Specify that we want to use GlusterFS storage for a hosted registry -openshift_hosted_registry_storage_kind=glusterfs - -[masters] -master - -[nodes] -# masters should be schedulable to run web console pods -master openshift_node_group_name="node-config-master" openshift_schedulable=True -# It is recommended to not use a single cluster for both general and registry -# storage, so two three-node clusters will be required. -node0 openshift_node_group_name="node-config-compute" -node1 openshift_node_group_name="node-config-compute" -node2 openshift_node_group_name="node-config-compute" -# A hosted registry, by default, will only be deployed on nodes labeled -# "node-role.kubernetes.io/infra=true". -node3 openshift_node_group_name="node-config-infra" -node4 openshift_node_group_name="node-config-infra" -node5 openshift_node_group_name="node-config-infra" - -[etcd] -master - -# Specify the glusterfs group, which contains the nodes that will host -# GlusterFS storage pods. At a minimum, each node must have a -# "glusterfs_devices" variable defined. This variable is a list of block -# devices the node will have access to that is intended solely for use as -# GlusterFS storage. These block devices must be bare (e.g. have no data, not -# be marked as LVM PVs), and will be formatted. -[glusterfs] -node0 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' -node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' -node2 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' - -[glusterfs_registry] -node3 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' -node4 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' -node5 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' diff --git a/inventory/hosts.grafana.example b/inventory/hosts.grafana.example deleted file mode 100644 index 9660ebf3c..000000000 --- a/inventory/hosts.grafana.example +++ /dev/null @@ -1,17 +0,0 @@ -[OSEv3:children] -masters -nodes - -[OSEv3:vars] -# Grafana Configuration -#grafana_namespace=grafana -#grafana_user=grafana -#grafana_password=grafana -#grafana_datasource_name="example" -#grafana_prometheus_namespace="openshift-metrics" -#grafana_prometheus_sa=prometheus -#grafana_node_exporter=false -#grafana_graph_granularity="2m" - -[masters] -master diff --git a/inventory/hosts.localhost b/inventory/hosts.localhost deleted file mode 100644 index 3c06bb7e7..000000000 --- a/inventory/hosts.localhost +++ /dev/null @@ -1,27 +0,0 @@ -#bare minimum hostfile - -[OSEv3:children] -masters -nodes -etcd - -[OSEv3:vars] -# if your target hosts are Fedora uncomment this -#ansible_python_interpreter=/usr/bin/python3 -openshift_deployment_type=origin -openshift_portal_net=172.30.0.0/16 -# localhost likely doesn't meet the minimum requirements -openshift_disable_check=disk_availability,memory_availability - -openshift_node_groups=[{'name': 'node-config-all-in-one', 'labels': ['node-role.kubernetes.io/master=true', 'node-role.kubernetes.io/infra=true', 'node-role.kubernetes.io/compute=true']}] - - -[masters] -localhost ansible_connection=local - -[etcd] -localhost ansible_connection=local - -[nodes] -# openshift_node_group_name should refer to a dictionary with matching key of name in list openshift_node_groups. -localhost ansible_connection=local openshift_node_group_name="node-config-all-in-one" diff --git a/inventory/hosts.openstack b/inventory/hosts.openstack deleted file mode 100644 index b9aa9927b..000000000 --- a/inventory/hosts.openstack +++ /dev/null @@ -1,37 +0,0 @@ -# This is an example of an OpenShift-Ansible host inventory - -# Create an OSEv3 group that contains the masters and nodes groups -[OSEv3:children] -masters -nodes -etcd -lb - -# Set variables common for all OSEv3 hosts -[OSEv3:vars] -ansible_ssh_user=cloud-user -ansible_become=yes - -# Debug level for all OpenShift components (Defaults to 2) -debug_level=2 - -openshift_deployment_type=openshift-enterprise - -openshift_additional_repos=[{'id': 'ose-3.1', 'name': 'ose-3.1', 'baseurl': 'http://pulp.dist.prod.ext.phx2.redhat.com/content/dist/rhel/server/7/7Server/x86_64/ose/3.1/os', 'enabled': 1, 'gpgcheck': 0}] - -openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}] - -#openshift_pkg_version=-3.0.0.0 - -[masters] -jdetiber-master.usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" - -[etcd] -jdetiber-etcd.usersys.redhat.com - -[lb] -#ose3-lb-ansible.test.example.com - -[nodes] -jdetiber-master.usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_node_group_name="node-config-master" -jdetiber-node[1:2].usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_node_group_name="node-config-compute" diff --git a/inventory/install-config-example.yml b/inventory/install-config-example.yml deleted file mode 100644 index 7d3c3f7a4..000000000 --- a/inventory/install-config-example.yml +++ /dev/null @@ -1,30 +0,0 @@ ---- -baseDomain: example.com -machines: -- name: master - replicas: 1 -- name: worker - # This should always be zero for openshift-ansible - replicas: 0 -metadata: - name: mycluster -networking: - clusterNetworks: - - cidr: 10.128.0.0/14 - hostSubnetLength: 9 - serviceCIDR: 172.30.0.0/16 - type: OpenShiftSDN -platform: - libvirt: - # This URI is not actually used - URI: null - defaultMachinePlatform: - image: file:///unused - masterIPs: null - network: - if: null - ipRange: null -pullSecret: | - < paste your pullSecret here > -sshKey: | - < paster your pubkey here > diff --git a/meta/main.yml b/meta/main.yml deleted file mode 100644 index 7f867d73b..000000000 --- a/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -dependencies: diff --git a/playbooks/README.md b/playbooks/README.md index 290d4c082..23cf0144e 100644 --- a/playbooks/README.md +++ b/playbooks/README.md @@ -1,17 +1 @@ # openshift-ansible playbooks - -In summary: - -- [`byo`](byo) (_Bring Your Own_ hosts) has the most actively maintained - playbooks for installing, upgrading and performing others tasks on OpenShift - clusters. -- [`common`](common) has a set of playbooks that are included by playbooks in - `byo` and others. - -And: - -- [`adhoc`](adhoc) is a generic home for playbooks and tasks that are community - supported and not officially maintained. - -Refer to the `README.md` file in each playbook directory for more information -about them.