mirror of
https://github.com/openshift/openshift-ansible.git
synced 2026-02-05 06:46:04 +01:00
Remove unsupported playbooks and utilities
This commit is contained in:
10
README.md
10
README.md
@@ -67,14 +67,10 @@ you are not running a stable release.
|
||||
dnf install -y ansible pyOpenSSL python-cryptography python-lxml
|
||||
```
|
||||
|
||||
2. Setup for a specific cloud:
|
||||
2. OpenShift Installation Documentation:
|
||||
|
||||
- [AWS](http://github.com/openshift/openshift-ansible/blob/master/README_AWS.md)
|
||||
- [GCE](http://github.com/openshift/openshift-ansible/blob/master/README_GCE.md)
|
||||
- [local VMs](http://github.com/openshift/openshift-ansible/blob/master/README_libvirt.md)
|
||||
- Bring your own host deployments:
|
||||
- [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html)
|
||||
- [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/advanced_install.html)
|
||||
- [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html)
|
||||
- [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/advanced_install.html)
|
||||
|
||||
## Containerized OpenShift Ansible
|
||||
|
||||
|
||||
200
README_AWS.md
200
README_AWS.md
@@ -1,200 +0,0 @@
|
||||
:warning: **WARNING** :warning: This feature is community supported and has not been tested by Red Hat. Visit [docs.openshift.com](https://docs.openshift.com) for [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/planning.html) or [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/planning.html) supported installation docs.
|
||||
|
||||
AWS Setup Instructions
|
||||
======================
|
||||
|
||||
Get AWS API credentials
|
||||
-----------------------
|
||||
1. [AWS credentials documentation](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSGettingStartedGuide/AWSCredentials.html)
|
||||
|
||||
|
||||
Create a credentials file
|
||||
-------------------------
|
||||
1. Create a credentials file (eg ~/.aws_creds) that looks something like this (variables must have have these exact names).
|
||||
```
|
||||
export AWS_ACCESS_KEY_ID='AKIASTUFF'
|
||||
export AWS_SECRET_ACCESS_KEY='STUFF'
|
||||
```
|
||||
2. source this file
|
||||
```
|
||||
source ~/.aws_creds
|
||||
```
|
||||
Note: You must source this file before running any Ansible commands.
|
||||
|
||||
Alternatively, you could configure credentials in either ~/.boto or ~/.aws/credentials, see the [boto docs](http://docs.pythonboto.org/en/latest/boto_config_tut.html) for the format.
|
||||
|
||||
Subscribe to CentOS
|
||||
-------------------
|
||||
|
||||
1. [CentOS on AWS](https://aws.amazon.com/marketplace/pp/B00O7WM7QW)
|
||||
|
||||
|
||||
Set up Security Group
|
||||
---------------------
|
||||
By default, a cluster is launched into the `public` security group. Make sure you allow hosts to talk to each other on port `4789` for SDN.
|
||||
You may also want to allow access from the outside world on the following ports:
|
||||
|
||||
```
|
||||
• 22/TCP - ssh
|
||||
• 80/TCP - Web Apps
|
||||
• 443/TCP - Web Apps (https)
|
||||
• 4789/UDP - SDN / VXLAN
|
||||
• 8443/TCP - OpenShift Console
|
||||
• 10250/TCP - kubelet
|
||||
```
|
||||
|
||||
|
||||
Determine your subnet and setup the VPC
|
||||
---------------------------------------
|
||||
|
||||
In the AWS VPC console, look up your subnet ID for the region you want to use and set it as such:
|
||||
|
||||
- export ec2_vpc_subnet='my_vpc_subnet'
|
||||
|
||||
Go to Your VPCs, select the VPC, and under Actions -> DNS Hostnames, set to Yes and Save.
|
||||
|
||||
|
||||
(Optional) Setup your $HOME/.ssh/config file
|
||||
-------------------------------------------
|
||||
In case of a cluster creation, or any other case where you don't know the machine hostname in advance, you can use `.ssh/config`
|
||||
to setup a private key file to allow ansible to connect to the created hosts.
|
||||
|
||||
To do so, add the the following entry to your $HOME/.ssh/config file and make it point to the private key file which allows you to login on AWS.
|
||||
```
|
||||
Host *.compute-1.amazonaws.com
|
||||
IdentityFile $HOME/.ssh/my_private_key.pem
|
||||
```
|
||||
|
||||
Alternatively, you can configure your ssh-agent to hold the credentials to connect to your AWS instances.
|
||||
|
||||
(Optional) Choose where the cluster will be launched
|
||||
----------------------------------------------------
|
||||
|
||||
By default, a cluster is launched with the following configuration:
|
||||
|
||||
- Instance type: m4.large
|
||||
- AMI: ami-7a9e9812 (for online deployments, ami-61bbf104 for origin deployments and ami-10663b78 for enterprise deployments)
|
||||
- Region: us-east-1
|
||||
- Keypair name: libra
|
||||
- Security group: public
|
||||
|
||||
#### Master specific defaults:
|
||||
- Master root volume size: 10 (in GiBs)
|
||||
- Master root volume type: gp2
|
||||
- Master root volume iops: 500 (only applicable when volume type is io1)
|
||||
|
||||
#### Node specific defaults:
|
||||
- Node root volume size: 10 (in GiBs)
|
||||
- Node root volume type: gp2
|
||||
- Node root volume iops: 500 (only applicable when volume type is io1)
|
||||
- Docker volume size: 25 (in GiBs)
|
||||
- Docker volume ephemeral: true (Whether the docker volume is ephemeral)
|
||||
- Docker volume type: gp2 (only applicable if ephemeral is false)
|
||||
- Docker volume iops: 500 (only applicable when volume type is io1)
|
||||
|
||||
### Specifying ec2 instance type.
|
||||
|
||||
#### All instances:
|
||||
|
||||
- export ec2_instance_type='m4.large'
|
||||
|
||||
#### Master instances:
|
||||
|
||||
- export ec2_master_instance_type='m4.large'
|
||||
|
||||
#### Infra node instances:
|
||||
|
||||
- export ec2_infra_instance_type='m4.large'
|
||||
|
||||
#### Non-infra node instances:
|
||||
|
||||
- export ec2_node_instance_type='m4.large'
|
||||
|
||||
#### etcd instances:
|
||||
|
||||
- export ec2_etcd_instance_type='m4.large'
|
||||
|
||||
If needed, these values can be changed by setting environment variables on your system.
|
||||
|
||||
- export ec2_image='ami-307b3658'
|
||||
- export ec2_region='us-east-1'
|
||||
- export ec2_keypair='libra'
|
||||
- export ec2_security_groups="['public']"
|
||||
- export ec2_assign_public_ip='true'
|
||||
- export os_etcd_root_vol_size='20'
|
||||
- export os_etcd_root_vol_type='standard'
|
||||
- export os_etcd_vol_size='20'
|
||||
- export os_etcd_vol_type='standard'
|
||||
- export os_master_root_vol_size='20'
|
||||
- export os_master_root_vol_type='standard'
|
||||
- export os_node_root_vol_size='15'
|
||||
- export os_docker_vol_size='50'
|
||||
- export os_docker_vol_ephemeral='false'
|
||||
|
||||
Install Dependencies
|
||||
--------------------
|
||||
1. Ansible requires python-boto for aws operations:
|
||||
|
||||
Fedora
|
||||
```
|
||||
dnf install -y ansible python-boto pyOpenSSL
|
||||
```
|
||||
|
||||
RHEL/CentOS
|
||||
```
|
||||
yum install -y ansible python-boto pyOpenSSL
|
||||
```
|
||||
OSX:
|
||||
```
|
||||
pip install -U pyopenssl boto
|
||||
```
|
||||
|
||||
|
||||
Test The Setup
|
||||
--------------
|
||||
1. cd openshift-ansible
|
||||
1. Try to list all instances (Passing an empty string as the cluster_id
|
||||
argument will result in all ec2 instances being listed)
|
||||
```
|
||||
bin/cluster list aws ''
|
||||
```
|
||||
|
||||
Creating a cluster
|
||||
------------------
|
||||
1. To create a cluster with one master and two nodes
|
||||
```
|
||||
bin/cluster create aws <cluster-id>
|
||||
```
|
||||
|
||||
Updating a cluster
|
||||
---------------------
|
||||
1. To update the cluster
|
||||
```
|
||||
bin/cluster update aws <cluster-id>
|
||||
```
|
||||
|
||||
Terminating a cluster
|
||||
---------------------
|
||||
1. To terminate the cluster
|
||||
```
|
||||
bin/cluster terminate aws <cluster-id>
|
||||
```
|
||||
|
||||
Specifying a deployment type
|
||||
---------------------------
|
||||
The --deployment-type flag can be passed to bin/cluster to specify the deployment type
|
||||
1. To launch an OpenShift Enterprise cluster (requires a valid subscription):
|
||||
```
|
||||
bin/cluster create aws --deployment-type=openshift-enterprise <cluster-id>
|
||||
```
|
||||
Note: If no deployment type is specified, then the default is origin.
|
||||
|
||||
|
||||
## Post-ansible steps
|
||||
|
||||
You should now be ready to follow the **What's Next?** section of the advanced installation guide to deploy your router, registry, and other components.
|
||||
|
||||
Refer to the advanced installation guide for your deployment type:
|
||||
|
||||
* [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html#what-s-next)
|
||||
* [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/advanced_install.html#what-s-next)
|
||||
136
README_GCE.md
136
README_GCE.md
@@ -1,136 +0,0 @@
|
||||
:warning: **WARNING** :warning: This feature is community supported and has not been tested by Red Hat. Visit [docs.openshift.com](https://docs.openshift.com) for [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/index.html) or [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/index.html) supported installation docs.
|
||||
|
||||
GCE Setup Instructions
|
||||
======================
|
||||
|
||||
Get a gce service key
|
||||
---------------------
|
||||
1. Ask your GCE project administrator for a GCE service key
|
||||
|
||||
Note: If your GCE project does not show a Service Account under <Project>/APIs & auth/Credentials, you will need to use "Create new Client ID" to create a Service Account before your administrator can create the service key for you.
|
||||
|
||||
|
||||
Convert a GCE service key into a pem (for ansible)
|
||||
--------------------------------------------------
|
||||
1. mkdir -p ~/.gce
|
||||
1. The gce service key looks something like this: projectname-ef83bd90f261.p12
|
||||
.. The ef83bd90f261 part is the public hash (GCE_KEY_HASH), The projectname part, is the project name (PROJECT_NAME).
|
||||
1. Be in the same directory as the p12 key file.
|
||||
1. The commands below should be copy / paste-able
|
||||
1. Run these commands:
|
||||
```
|
||||
# Temporarily set hash variable and project name
|
||||
export GCE_KEY_HASH=ef83bd90f261
|
||||
export PROJECT_NAME=Project Name
|
||||
export PROJECT_ID=Project ID
|
||||
|
||||
# Convert the service key (note: 'notasecret' is literally what we want here)
|
||||
openssl pkcs12 -in "${PROJECT_NAME}-${GCE_KEY_HASH}.p12" -passin pass:notasecret -nodes -nocerts | openssl rsa -out ${PROJECT_ID}-${GCE_KEY_HASH}.pem
|
||||
|
||||
# Move the converted service key to the .gce dir
|
||||
mv ${PROJECT_ID}-${GCE_KEY_HASH}.pem ~/.gce
|
||||
```
|
||||
|
||||
1. Once this is done, put the original service key file (projectname-ef83bd90f261.p12) somewhere safe, or delete it (your call, I don not know what else we will use it for, and we can always regen it if needed).
|
||||
|
||||
|
||||
Create a gce.ini file for GCE
|
||||
--------------------------------
|
||||
* gce_service_account_email_address - Found in "APIs & auth" -> Credentials -> "Service Account" -> "Email Address"
|
||||
* gce_service_account_pem_file_path - Full path from previous steps
|
||||
* gce_project_id - Found in "Projects", it list all the gce projects you are associated with. The page lists their "Project Name" and "Project ID". You want the "Project ID"
|
||||
|
||||
Mandatory customization variables (check the values according to your tenant):
|
||||
* zone = europe-west1-d
|
||||
* network = default
|
||||
|
||||
Optional Variable Overrides:
|
||||
* gce_ssh_user - ssh user, defaults to the current logged in user
|
||||
* gce_machine_type = n1-standard-1 - default machine type
|
||||
* gce_machine_etcd_type = n1-standard-1 - machine type for etcd hosts
|
||||
* gce_machine_master_type = n1-standard-1 - machine type for master hosts
|
||||
* gce_machine_node_type = n1-standard-1 - machine type for node hosts
|
||||
* gce_machine_image = centos-7 - default image
|
||||
* gce_machine_etcd_image = centos-7 - image for etcd hosts
|
||||
* gce_machine_master_image = centos-7 - image for master hosts
|
||||
* gce_machine_node_image = centos-7 - image for node hosts
|
||||
|
||||
|
||||
1. vi ~/.gce/gce.ini
|
||||
1. make the contents look like this:
|
||||
```
|
||||
[gce]
|
||||
gce_service_account_email_address = long...@developer.gserviceaccount.com
|
||||
gce_service_account_pem_file_path = /full/path/to/project_id-gce_key_hash.pem
|
||||
gce_project_id = project_id
|
||||
zone = europe-west1-d
|
||||
network = default
|
||||
gce_machine_type = n1-standard-2
|
||||
gce_machine_master_type = n1-standard-1
|
||||
gce_machine_node_type = n1-standard-2
|
||||
gce_machine_image = centos-7
|
||||
gce_machine_master_image = centos-7
|
||||
gce_machine_node_image = centos-7
|
||||
|
||||
```
|
||||
1. Define the environment variable GCE_INI_PATH so gce.py can pick it up and bin/cluster can also read it
|
||||
```
|
||||
export GCE_INI_PATH=~/.gce/gce.ini
|
||||
```
|
||||
|
||||
|
||||
Install Dependencies
|
||||
--------------------
|
||||
1. Ansible requires libcloud for gce operations:
|
||||
```
|
||||
yum install -y ansible python-libcloud
|
||||
```
|
||||
|
||||
> Installation using Mac OSX requires pycrypto library
|
||||
>
|
||||
> <kbd>$ pip install pycrypto</kbd>
|
||||
|
||||
Test The Setup
|
||||
--------------
|
||||
1. cd openshift-ansible/
|
||||
1. Try to list all instances (Passing an empty string as the cluster_id
|
||||
argument will result in all gce instances being listed)
|
||||
```
|
||||
bin/cluster list gce ''
|
||||
```
|
||||
|
||||
Creating a cluster
|
||||
------------------
|
||||
1. To create a cluster with one master, one infra node, and two compute nodes
|
||||
```
|
||||
bin/cluster create gce <cluster-id>
|
||||
```
|
||||
1. To create a cluster with 3 masters, 3 etcd hosts, 2 infra nodes and 10
|
||||
compute nodes
|
||||
```
|
||||
bin/cluster create gce -m 3 -e 3 -i 2 -n 10 <cluster-id>
|
||||
```
|
||||
|
||||
Updating a cluster
|
||||
---------------------
|
||||
1. To update the cluster
|
||||
```
|
||||
bin/cluster update gce <cluster-id>
|
||||
```
|
||||
|
||||
Add additional nodes
|
||||
---------------------
|
||||
1. To add additional infra nodes
|
||||
```
|
||||
bin/cluster add-nodes gce -i <num nodes> <cluster-id>
|
||||
```
|
||||
1. To add additional compute nodes
|
||||
```
|
||||
bin/cluster add-nodes gce -n <num nodes> <cluster-id>
|
||||
```
|
||||
Terminating a cluster
|
||||
---------------------
|
||||
1. To terminate the cluster
|
||||
```
|
||||
bin/cluster terminate gce <cluster-id>
|
||||
```
|
||||
@@ -1,163 +0,0 @@
|
||||
:warning: **WARNING** :warning: This feature is community supported and has not been tested by Red Hat. Visit [docs.openshift.com](https://docs.openshift.com) for [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/index.html) or [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/index.html) supported installation docs.
|
||||
|
||||
LIBVIRT Setup instructions
|
||||
==========================
|
||||
|
||||
`libvirt` is an `openshift-ansible` provider that uses `libvirt` to create local Fedora VMs that are provisioned exactly the same way that cloud VMs would be provisioned.
|
||||
|
||||
This makes `libvirt` useful to develop, test and debug OpenShift and openshift-ansible locally on the developer’s workstation before going to the cloud.
|
||||
|
||||
Install dependencies
|
||||
--------------------
|
||||
|
||||
1. Install [ansible](http://www.ansible.com/)
|
||||
2. Install [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html)
|
||||
3. Install [ebtables](http://ebtables.netfilter.org/)
|
||||
4. Install [qemu and qemu-system-x86](http://wiki.qemu.org/Main_Page)
|
||||
5. Install [libvirt-python and libvirt](http://libvirt.org/)
|
||||
6. Install [genisoimage](http://cdrkit.org/) or [mkisofs](http://cdrtools.sourceforge.net/private/cdrecord.html)
|
||||
7. Enable and start the libvirt daemon, e.g:
|
||||
- `systemctl enable libvirtd`
|
||||
- `systemctl start libvirtd`
|
||||
8. [Grant libvirt access to your user¹](https://libvirt.org/aclpolkit.html)
|
||||
9. Check that your `$HOME` is accessible to the qemu user²
|
||||
10. Configure dns resolution on the host³
|
||||
11. Install libselinux-python
|
||||
12. Ensure you have an SSH private and public keypair at `~/.ssh/id_rsa` and `~/.ssh/id_rsa.pub`⁴
|
||||
|
||||
#### ¹ Depending on your distribution, libvirt access may be denied by default or may require a password at each access.
|
||||
|
||||
You can test it with the following command:
|
||||
|
||||
```
|
||||
virsh -c qemu:///system pool-list
|
||||
```
|
||||
|
||||
If you have access error messages, please read https://libvirt.org/acl.html and https://libvirt.org/aclpolkit.html .
|
||||
|
||||
In short, if your libvirt has been compiled with Polkit support (ex: Arch, Fedora 21), you can create `/etc/polkit-1/rules.d/50-org.libvirt.unix.manage.rules` as follows to grant full access to libvirt to `$USER`
|
||||
|
||||
```
|
||||
sudo /bin/sh -c "cat - > /etc/polkit-1/rules.d/50-org.libvirt.unix.manage.rules" << EOF
|
||||
polkit.addRule(function(action, subject) {
|
||||
if (action.id == "org.libvirt.unix.manage" &&
|
||||
subject.user == "$USER") {
|
||||
return polkit.Result.YES;
|
||||
polkit.log("action=" + action);
|
||||
polkit.log("subject=" + subject);
|
||||
}
|
||||
});
|
||||
EOF
|
||||
```
|
||||
|
||||
If your libvirt has not been compiled with Polkit (ex: Ubuntu 14.04.1 LTS), check the permissions on the libvirt unix socket:
|
||||
|
||||
```
|
||||
ls -l /var/run/libvirt/libvirt-sock
|
||||
srwxrwx--- 1 root libvirtd 0 févr. 12 16:03 /var/run/libvirt/libvirt-sock
|
||||
|
||||
usermod -a -G libvirtd $USER
|
||||
# $USER needs to logout/login to have the new group be taken into account
|
||||
```
|
||||
|
||||
(Replace `$USER` with your login name)
|
||||
|
||||
#### ² Qemu will run with a specific user. It must have access to the VMs drives
|
||||
|
||||
All the disk drive resources needed by the VMs (Fedora disk image, cloud-init files) are put inside `~/libvirt-storage-pool-openshift/`.
|
||||
|
||||
As we’re using the `qemu:///system` instance of libvirt, qemu will run with a specific `user:group` distinct from your user. It is configured in `/etc/libvirt/qemu.conf`. That qemu user must have access to that libvirt storage pool.
|
||||
|
||||
If your `$HOME` is world readable, everything is fine. If your `$HOME` is private, `ansible` will fail with an error message like:
|
||||
|
||||
```
|
||||
error: Cannot access storage file '$HOME/libvirt-storage-pool-openshift/lenaic-master-216d8.qcow2' (as uid:99, gid:78): Permission denied
|
||||
```
|
||||
|
||||
In order to fix that issue, you have several possibilities:
|
||||
* set `libvirt_storage_pool_path` inside `playbooks/libvirt/openshift-cluster/launch.yml` and `playbooks/libvirt/openshift-cluster/terminate.yml` to a directory:
|
||||
* backed by a filesystem with a lot of free disk space
|
||||
* writable by your user;
|
||||
* accessible by the qemu user.
|
||||
* Grant the qemu user access to the storage pool.
|
||||
|
||||
On Arch or Fedora 22+:
|
||||
|
||||
```
|
||||
setfacl -m g:kvm:--x ~
|
||||
```
|
||||
|
||||
#### ³ Enabling DNS resolution to your guest VMs with NetworkManager
|
||||
|
||||
- Verify NetworkManager is configured to use dnsmasq:
|
||||
|
||||
```sh
|
||||
$ sudo vi /etc/NetworkManager/NetworkManager.conf
|
||||
[main]
|
||||
dns=dnsmasq
|
||||
```
|
||||
|
||||
- Configure dnsmasq to use the Virtual Network router for example.com:
|
||||
|
||||
```sh
|
||||
sudo vi /etc/NetworkManager/dnsmasq.d/libvirt_dnsmasq.conf
|
||||
server=/example.com/192.168.55.1
|
||||
```
|
||||
|
||||
#### ⁴ Private and public keypair in ~/.ssh/id_rsa and ~/.ssh/id_rsa.pub
|
||||
|
||||
This playbook uses SSH keys to communicate with the libvirt-driven virtual machines. At this time the names of those keys are fixed and cannot be changed.
|
||||
|
||||
|
||||
Test The Setup
|
||||
--------------
|
||||
|
||||
1. cd openshift-ansible/
|
||||
2. Try to list all instances (Passing an empty string as the cluster_id argument will result in all libvirt instances being listed)
|
||||
|
||||
```
|
||||
bin/cluster list libvirt ''
|
||||
```
|
||||
|
||||
Configuration
|
||||
-------------
|
||||
|
||||
The following options can be passed via the `-o` flag of the `create` command or as environment variables:
|
||||
|
||||
* `image_url` (default to `http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2.xz`): URL of the QCOW2 image to download
|
||||
* `image_name` (default to `CentOS-7-x86_64-GenericCloud.qcow2`): Name of the QCOW2 image to boot the VMs on
|
||||
* `image_compression` (default to `xz`): Source QCOW2 compression (only xz supported at this time)
|
||||
* `image_sha256` (default to `dd0f5e610e7c5ffacaca35ed7a78a19142a588f4543da77b61c1fb0d74400471`): Expected SHA256 checksum of the downloaded image
|
||||
* `libvirt_storage_pool` (default to `openshift-ansible`): name of the libvirt storage pool for the VM images. It will be created if it does not exist
|
||||
* `libvirt_storage_pool_path` (default to `$HOME/libvirt-storage-pool-openshift-ansible`): path to `libvirt_storage_pool`, i.e. where the VM images are stored
|
||||
* `libvirt_network` (default to `openshift-ansible`): name of the libvirt network that the VMs will use. It will be created if it does not exist
|
||||
* `libvirt_instance_memory_mib` (default to `1024`): memory of the VMs in MiB
|
||||
* `libvirt_instance_vcpu` (default to `2`): number of vCPUs of the VMs
|
||||
* `skip_image_download` (default to `no`): Skip QCOW2 image download. This requires the `image_name` QCOW2 image to be already present in `$HOME/libvirt-storage-pool-openshift-ansible`
|
||||
|
||||
Creating a cluster
|
||||
------------------
|
||||
|
||||
1. To create a cluster with one master and two nodes
|
||||
|
||||
```
|
||||
bin/cluster create libvirt lenaic
|
||||
```
|
||||
|
||||
Updating a cluster
|
||||
------------------
|
||||
|
||||
1. To update the cluster
|
||||
|
||||
```
|
||||
bin/cluster update libvirt lenaic
|
||||
```
|
||||
|
||||
Terminating a cluster
|
||||
---------------------
|
||||
|
||||
1. To terminate the cluster
|
||||
|
||||
```
|
||||
bin/cluster terminate libvirt lenaic
|
||||
```
|
||||
@@ -1,87 +0,0 @@
|
||||
:warning: **WARNING** :warning: This feature is community supported and has not been tested by Red Hat. Visit [docs.openshift.com](https://docs.openshift.com) for [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/index.html) or [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/index.html) supported installation docs.
|
||||
|
||||
OPENSTACK Setup instructions
|
||||
============================
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
The OpenStack instance must have Neutron and Heat enabled.
|
||||
|
||||
Install Dependencies
|
||||
--------------------
|
||||
|
||||
1. The OpenStack python clients for Nova, Neutron and Heat are required:
|
||||
|
||||
* `python-novaclient`
|
||||
* `python-neutronclient`
|
||||
* `python-heatclient`
|
||||
|
||||
On Fedora:
|
||||
```
|
||||
dnf install -y ansible python-novaclient python-neutronclient python-heatclient
|
||||
```
|
||||
|
||||
On RHEL / CentOS:
|
||||
```
|
||||
yum install -y ansible python-novaclient python-neutronclient python-heatclient
|
||||
sudo pip install shade
|
||||
```
|
||||
|
||||
Configuration
|
||||
-------------
|
||||
|
||||
The following options can be passed via the `-o` flag of the `create` command:
|
||||
|
||||
* `infra_heat_stack` (default to `playbooks/openstack/openshift-cluster/files/heat_stack.yaml`): filename of the HEAT template to use to create the cluster infrastructure
|
||||
|
||||
The following options are used only by `heat_stack.yaml`. They are so used only if the `infra_heat_stack` option is left with its default value.
|
||||
|
||||
* `image_name`: Name of the image to use to spawn VMs
|
||||
* `public_key` (default to `~/.ssh/id_rsa.pub`): filename of the ssh public key
|
||||
* `etcd_flavor` (default to `m1.small`): The ID or name of the flavor for the etcd nodes
|
||||
* `master_flavor` (default to `m1.small`): The ID or name of the flavor for the master
|
||||
* `node_flavor` (default to `m1.medium`): The ID or name of the flavor for the compute nodes
|
||||
* `infra_flavor` (default to `m1.small`): The ID or name of the flavor for the infrastructure nodes
|
||||
* `network_prefix` (default to `openshift-ansible-<cluster_id>`): prefix prepended to all network objects (net, subnet, router, security groups)
|
||||
* `dns` (default to `8.8.8.8,8.8.4.4`): comma separated list of DNS to use
|
||||
* `net_cidr` (default to `192.168.<rand()>.0/24`): CIDR of the network created by `heat_stack.yaml`
|
||||
* `external_net` (default to `external`): Name of the external network to connect to
|
||||
* `floating_ip_pool` (default to `external`): comma separated list of floating IP pools
|
||||
* `ssh_from` (default to `0.0.0.0/0`): IPs authorized to connect to the VMs via ssh
|
||||
* `node_port_from` (default to `0.0.0.0/0`): IPs authorized to connect to the services exposed via nodePort
|
||||
* `heat_timeout` (default to `3`): Timeout (in minutes) passed to heat for create or update stack.
|
||||
|
||||
|
||||
Creating a cluster
|
||||
------------------
|
||||
|
||||
1. To create a cluster with one master and two nodes
|
||||
|
||||
```
|
||||
bin/cluster create openstack <cluster-id>
|
||||
```
|
||||
|
||||
2. To create a cluster with one master and three nodes, a custom VM image and custom DNS:
|
||||
|
||||
```
|
||||
bin/cluster create -n 3 -o image_name=rhel-7.1-openshift-2015.05.21 -o dns=172.16.50.210,172.16.50.250 openstack lenaic
|
||||
```
|
||||
|
||||
Updating a cluster
|
||||
------------------
|
||||
|
||||
1. To update the cluster
|
||||
|
||||
```
|
||||
bin/cluster update openstack <cluster-id>
|
||||
```
|
||||
|
||||
Terminating a cluster
|
||||
---------------------
|
||||
|
||||
1. To terminate the cluster
|
||||
|
||||
```
|
||||
bin/cluster terminate openstack <cluster-id>
|
||||
```
|
||||
@@ -1 +0,0 @@
|
||||
The Vagrant-based installation has been moved to: https://github.com/openshift/openshift-ansible-contrib/tree/master/vagrant
|
||||
@@ -1,6 +0,0 @@
|
||||
# The `bin/cluster` tool
|
||||
|
||||
This tool was meant to be the entry point for managing OpenShift clusters,
|
||||
running against different "providers" (`aws`, `gce`, `libvirt`, `openstack`),
|
||||
though its use is now deprecated in favor of the [`byo`](../playbooks/byo)
|
||||
playbooks.
|
||||
424
bin/cluster
424
bin/cluster
@@ -1,424 +0,0 @@
|
||||
#!/usr/bin/env python2
|
||||
|
||||
import argparse
|
||||
import ConfigParser
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import traceback
|
||||
|
||||
|
||||
class Cluster(object):
|
||||
"""
|
||||
Provide Command, Control and Configuration (c3) Interface for OpenShift Clusters
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
# setup ansible ssh environment
|
||||
if 'ANSIBLE_SSH_ARGS' not in os.environ:
|
||||
os.environ['ANSIBLE_SSH_ARGS'] = (
|
||||
'-o ForwardAgent=yes '
|
||||
'-o StrictHostKeyChecking=no '
|
||||
'-o UserKnownHostsFile=/dev/null '
|
||||
'-o ControlMaster=auto '
|
||||
'-o ControlPersist=600s '
|
||||
)
|
||||
# Because of `UserKnownHostsFile=/dev/null`
|
||||
# our `.ssh/known_hosts` file most probably misses the ssh host public keys
|
||||
# of our servers.
|
||||
# In that case, ansible serializes the execution of ansible modules
|
||||
# because we might be interactively prompted to accept the ssh host public keys.
|
||||
# Because of `StrictHostKeyChecking=no` we know that we won't be prompted
|
||||
# So, we don't want our modules execution to be serialized.
|
||||
os.environ['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
|
||||
# TODO: A more secure way to proceed would consist in dynamically
|
||||
# retrieving the ssh host public keys from the IaaS interface
|
||||
if 'ANSIBLE_SSH_PIPELINING' not in os.environ:
|
||||
os.environ['ANSIBLE_SSH_PIPELINING'] = 'True'
|
||||
|
||||
def get_deployment_type(self, args):
|
||||
"""
|
||||
Get the deployment_type based on the environment variables and the
|
||||
command line arguments
|
||||
:param args: command line arguments provided by the user
|
||||
:return: string representing the deployment type
|
||||
"""
|
||||
deployment_type = 'origin'
|
||||
if args.deployment_type:
|
||||
deployment_type = args.deployment_type
|
||||
elif 'OS_DEPLOYMENT_TYPE' in os.environ:
|
||||
deployment_type = os.environ['OS_DEPLOYMENT_TYPE']
|
||||
return deployment_type
|
||||
|
||||
|
||||
def create(self, args):
|
||||
"""
|
||||
Create an OpenShift cluster for given provider
|
||||
:param args: command line arguments provided by user
|
||||
"""
|
||||
cluster = {'cluster_id': args.cluster_id,
|
||||
'deployment_type': self.get_deployment_type(args)}
|
||||
playbook = "playbooks/{0}/openshift-cluster/launch.yml".format(args.provider)
|
||||
inventory = self.setup_provider(args.provider)
|
||||
|
||||
cluster['num_masters'] = args.masters
|
||||
cluster['num_nodes'] = args.nodes
|
||||
cluster['num_infra'] = args.infra
|
||||
cluster['num_etcd'] = args.etcd
|
||||
cluster['cluster_env'] = args.env
|
||||
|
||||
if args.cloudprovider and args.provider == 'openstack':
|
||||
cluster['openshift_cloudprovider_kind'] = 'openstack'
|
||||
cluster['openshift_cloudprovider_openstack_auth_url'] = os.getenv('OS_AUTH_URL')
|
||||
cluster['openshift_cloudprovider_openstack_username'] = os.getenv('OS_USERNAME')
|
||||
cluster['openshift_cloudprovider_openstack_password'] = os.getenv('OS_PASSWORD')
|
||||
if 'OS_USER_DOMAIN_ID' in os.environ:
|
||||
cluster['openshift_cloudprovider_openstack_domain_id'] = os.getenv('OS_USER_DOMAIN_ID')
|
||||
if 'OS_USER_DOMAIN_NAME' in os.environ:
|
||||
cluster['openshift_cloudprovider_openstack_domain_name'] = os.getenv('OS_USER_DOMAIN_NAME')
|
||||
if 'OS_PROJECT_ID' in os.environ or 'OS_TENANT_ID' in os.environ:
|
||||
cluster['openshift_cloudprovider_openstack_tenant_id'] = os.getenv('OS_PROJECT_ID',os.getenv('OS_TENANT_ID'))
|
||||
if 'OS_PROJECT_NAME' is os.environ or 'OS_TENANT_NAME' in os.environ:
|
||||
cluster['openshift_cloudprovider_openstack_tenant_name'] = os.getenv('OS_PROJECT_NAME',os.getenv('OS_TENANT_NAME'))
|
||||
if 'OS_REGION_NAME' in os.environ:
|
||||
cluster['openshift_cloudprovider_openstack_region'] = os.getenv('OS_REGION_NAME')
|
||||
|
||||
self.action(args, inventory, cluster, playbook)
|
||||
|
||||
def add_nodes(self, args):
|
||||
"""
|
||||
Add nodes to an existing cluster for given provider
|
||||
:param args: command line arguments provided by user
|
||||
"""
|
||||
cluster = {'cluster_id': args.cluster_id,
|
||||
'deployment_type': self.get_deployment_type(args),
|
||||
}
|
||||
playbook = "playbooks/{0}/openshift-cluster/add_nodes.yml".format(args.provider)
|
||||
inventory = self.setup_provider(args.provider)
|
||||
|
||||
cluster['num_nodes'] = args.nodes
|
||||
cluster['num_infra'] = args.infra
|
||||
cluster['cluster_env'] = args.env
|
||||
|
||||
self.action(args, inventory, cluster, playbook)
|
||||
|
||||
def terminate(self, args):
|
||||
"""
|
||||
Destroy OpenShift cluster
|
||||
:param args: command line arguments provided by user
|
||||
"""
|
||||
cluster = {'cluster_id': args.cluster_id,
|
||||
'deployment_type': self.get_deployment_type(args),
|
||||
'cluster_env': args.env,
|
||||
}
|
||||
playbook = "playbooks/{0}/openshift-cluster/terminate.yml".format(args.provider)
|
||||
inventory = self.setup_provider(args.provider)
|
||||
|
||||
self.action(args, inventory, cluster, playbook)
|
||||
|
||||
def list(self, args):
|
||||
"""
|
||||
List VMs in cluster
|
||||
:param args: command line arguments provided by user
|
||||
"""
|
||||
cluster = {'cluster_id': args.cluster_id,
|
||||
'deployment_type': self.get_deployment_type(args),
|
||||
'cluster_env': args.env,
|
||||
}
|
||||
playbook = "playbooks/{0}/openshift-cluster/list.yml".format(args.provider)
|
||||
inventory = self.setup_provider(args.provider)
|
||||
|
||||
self.action(args, inventory, cluster, playbook)
|
||||
|
||||
def config(self, args):
|
||||
"""
|
||||
Configure or reconfigure OpenShift across clustered VMs
|
||||
:param args: command line arguments provided by user
|
||||
"""
|
||||
cluster = {'cluster_id': args.cluster_id,
|
||||
'deployment_type': self.get_deployment_type(args),
|
||||
'cluster_env': args.env,
|
||||
}
|
||||
playbook = "playbooks/{0}/openshift-cluster/config.yml".format(args.provider)
|
||||
inventory = self.setup_provider(args.provider)
|
||||
|
||||
self.action(args, inventory, cluster, playbook)
|
||||
|
||||
def update(self, args):
|
||||
"""
|
||||
Update to latest OpenShift across clustered VMs
|
||||
:param args: command line arguments provided by user
|
||||
"""
|
||||
cluster = {'cluster_id': args.cluster_id,
|
||||
'deployment_type': self.get_deployment_type(args),
|
||||
'cluster_env': args.env,
|
||||
}
|
||||
|
||||
playbook = "playbooks/{0}/openshift-cluster/update.yml".format(args.provider)
|
||||
inventory = self.setup_provider(args.provider)
|
||||
|
||||
self.action(args, inventory, cluster, playbook)
|
||||
|
||||
def service(self, args):
|
||||
"""
|
||||
Make the same service call across all nodes in the cluster
|
||||
:param args: command line arguments provided by user
|
||||
"""
|
||||
cluster = {'cluster_id': args.cluster_id,
|
||||
'deployment_type': self.get_deployment_type(args),
|
||||
'new_cluster_state': args.state,
|
||||
'cluster_env': args.env,
|
||||
}
|
||||
|
||||
playbook = "playbooks/{0}/openshift-cluster/service.yml".format(args.provider)
|
||||
inventory = self.setup_provider(args.provider)
|
||||
|
||||
self.action(args, inventory, cluster, playbook)
|
||||
|
||||
def setup_provider(self, provider):
|
||||
"""
|
||||
Setup ansible playbook environment
|
||||
:param provider: command line arguments provided by user
|
||||
:return: path to inventory for given provider
|
||||
"""
|
||||
config = ConfigParser.ConfigParser()
|
||||
if 'gce' == provider:
|
||||
gce_ini_default_path = os.path.join('inventory/gce/hosts/gce.ini')
|
||||
gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
|
||||
if os.path.exists(gce_ini_path):
|
||||
config.readfp(open(gce_ini_path))
|
||||
|
||||
for key in config.options('gce'):
|
||||
os.environ[key] = config.get('gce', key)
|
||||
|
||||
inventory = '-i inventory/gce/hosts'
|
||||
elif 'aws' == provider:
|
||||
config.readfp(open('inventory/aws/hosts/ec2.ini'))
|
||||
|
||||
for key in config.options('ec2'):
|
||||
os.environ[key] = config.get('ec2', key)
|
||||
|
||||
inventory = '-i inventory/aws/hosts'
|
||||
|
||||
key_vars = ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY']
|
||||
key_missing = [key for key in key_vars if key not in os.environ]
|
||||
|
||||
boto_conf_files = ['~/.aws/credentials', '~/.boto']
|
||||
conf_exists = lambda conf: os.path.isfile(os.path.expanduser(conf))
|
||||
boto_configs = [conf for conf in boto_conf_files if conf_exists(conf)]
|
||||
|
||||
if len(key_missing) > 0 and len(boto_configs) == 0:
|
||||
raise ValueError("PROVIDER aws requires {0} environment variable(s). See README_AWS.md".format(key_missing))
|
||||
|
||||
elif 'libvirt' == provider:
|
||||
inventory = '-i inventory/libvirt/hosts'
|
||||
elif 'openstack' == provider:
|
||||
inventory = '-i inventory/openstack/hosts'
|
||||
else:
|
||||
# this code should never be reached
|
||||
raise ValueError("invalid PROVIDER {0}".format(provider))
|
||||
|
||||
return inventory
|
||||
|
||||
def action(self, args, inventory, cluster, playbook):
|
||||
"""
|
||||
Build ansible-playbook command line and execute
|
||||
:param args: command line arguments provided by user
|
||||
:param inventory: derived provider library
|
||||
:param cluster: cluster variables for kubernetes
|
||||
:param playbook: ansible playbook to execute
|
||||
"""
|
||||
|
||||
verbose = ''
|
||||
if args.verbose > 0:
|
||||
verbose = '-{0}'.format('v' * args.verbose)
|
||||
|
||||
if args.option:
|
||||
for opt in args.option:
|
||||
k, v = opt.split('=', 1)
|
||||
cluster['cli_' + k] = v
|
||||
|
||||
ansible_extra_vars = '-e \'{0}\''.format(
|
||||
' '.join(['%s=%s' % (key, value) for (key, value) in cluster.items()])
|
||||
)
|
||||
|
||||
command = 'ansible-playbook {0} {1} {2} {3}'.format(
|
||||
verbose, inventory, ansible_extra_vars, playbook
|
||||
)
|
||||
|
||||
if args.profile:
|
||||
command = 'ANSIBLE_CALLBACK_PLUGINS=ansible-profile/callback_plugins ' + command
|
||||
|
||||
if args.verbose > 1:
|
||||
command = 'time {0}'.format(command)
|
||||
|
||||
if args.verbose > 0:
|
||||
sys.stderr.write('RUN [{0}]\n'.format(command))
|
||||
sys.stderr.flush()
|
||||
|
||||
try:
|
||||
subprocess.check_call(command, shell=True)
|
||||
except subprocess.CalledProcessError as exc:
|
||||
raise ActionFailed("ACTION [{0}] failed: {1}"
|
||||
.format(args.action, exc))
|
||||
|
||||
|
||||
class ActionFailed(Exception):
|
||||
"""
|
||||
Raised when action failed.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
"""
|
||||
User command to invoke ansible playbooks in a "known" configuration
|
||||
|
||||
Reads ~/.openshift-ansible for default configuration items
|
||||
[DEFAULT]
|
||||
validate_cluster_ids = False
|
||||
cluster_ids = marketing,sales
|
||||
providers = gce,aws,libvirt,openstack
|
||||
"""
|
||||
|
||||
warning = ("================================================================================\n"
|
||||
"ATTENTION: You are running a community supported utility that has not been\n"
|
||||
"tested by Red Hat. Visit https://docs.openshift.com for supported installation\n"
|
||||
"instructions.\n"
|
||||
"================================================================================\n\n")
|
||||
sys.stderr.write(warning)
|
||||
|
||||
cluster_config = ConfigParser.SafeConfigParser({
|
||||
'cluster_ids': 'marketing,sales',
|
||||
'validate_cluster_ids': 'False',
|
||||
'providers': 'gce,aws,libvirt,openstack',
|
||||
})
|
||||
|
||||
path = os.path.expanduser("~/.openshift-ansible")
|
||||
if os.path.isfile(path):
|
||||
cluster_config.read(path)
|
||||
|
||||
cluster = Cluster()
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
description='Python wrapper to ensure proper configuration for OpenShift ansible playbooks',
|
||||
epilog='''\
|
||||
This wrapper is overriding the following ansible variables:
|
||||
|
||||
* ANSIBLE_SSH_ARGS:
|
||||
If not set in the environment, this wrapper will use the following value:
|
||||
`-o ForwardAgent=yes -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ControlMaster=auto -o ControlPersist=600s`
|
||||
If set in the environment, the environment variable value is left untouched and used.
|
||||
|
||||
* ANSIBLE_SSH_PIPELINING:
|
||||
If not set in the environment, this wrapper will set it to `True`.
|
||||
If you experience issues with Ansible SSH pipelining, you can disable it by explicitly setting this environment variable to `False`.
|
||||
'''
|
||||
)
|
||||
parser.add_argument('-v', '--verbose', action='count',
|
||||
help='Multiple -v options increase the verbosity')
|
||||
parser.add_argument('--version', action='version', version='%(prog)s 0.3')
|
||||
|
||||
meta_parser = argparse.ArgumentParser(add_help=False)
|
||||
providers = cluster_config.get('DEFAULT', 'providers').split(',')
|
||||
meta_parser.add_argument('provider', choices=providers, help='provider')
|
||||
|
||||
if cluster_config.get('DEFAULT', 'validate_cluster_ids').lower() in ("yes", "true", "1"):
|
||||
meta_parser.add_argument('cluster_id', choices=cluster_config.get('DEFAULT', 'cluster_ids').split(','),
|
||||
help='prefix for cluster VM names')
|
||||
else:
|
||||
meta_parser.add_argument('cluster_id', help='prefix for cluster VM names')
|
||||
|
||||
meta_parser.add_argument('-t', '--deployment-type',
|
||||
choices=['origin', 'atomic-enterprise', 'openshift-enterprise'],
|
||||
help='Deployment type. (default: origin)')
|
||||
meta_parser.add_argument('-o', '--option', action='append',
|
||||
help='options')
|
||||
|
||||
meta_parser.add_argument('--env', default='dev', type=str,
|
||||
help='environment for the cluster. Defaults to \'dev\'.')
|
||||
|
||||
meta_parser.add_argument('-p', '--profile', action='store_true',
|
||||
help='Enable playbook profiling')
|
||||
|
||||
action_parser = parser.add_subparsers(dest='action', title='actions',
|
||||
description='Choose from valid actions')
|
||||
|
||||
create_parser = action_parser.add_parser('create', help='Create a cluster',
|
||||
parents=[meta_parser])
|
||||
create_parser.add_argument('-c', '--cloudprovider', action='store_true',
|
||||
help='Enable the cloudprovider')
|
||||
create_parser.add_argument('-m', '--masters', default=1, type=int,
|
||||
help='number of masters to create in cluster')
|
||||
create_parser.add_argument('-n', '--nodes', default=2, type=int,
|
||||
help='number of nodes to create in cluster')
|
||||
create_parser.add_argument('-i', '--infra', default=1, type=int,
|
||||
help='number of infra nodes to create in cluster')
|
||||
create_parser.add_argument('-e', '--etcd', default=0, type=int,
|
||||
help='number of external etcd hosts to create in cluster')
|
||||
create_parser.set_defaults(func=cluster.create)
|
||||
|
||||
|
||||
create_parser = action_parser.add_parser('add-nodes', help='Add nodes to a cluster',
|
||||
parents=[meta_parser])
|
||||
create_parser.add_argument('-n', '--nodes', default=1, type=int,
|
||||
help='number of nodes to add to the cluster')
|
||||
create_parser.add_argument('-i', '--infra', default=1, type=int,
|
||||
help='number of infra nodes to add to the cluster')
|
||||
create_parser.set_defaults(func=cluster.add_nodes)
|
||||
|
||||
|
||||
config_parser = action_parser.add_parser('config',
|
||||
help='Configure or reconfigure a cluster',
|
||||
parents=[meta_parser])
|
||||
config_parser.set_defaults(func=cluster.config)
|
||||
|
||||
terminate_parser = action_parser.add_parser('terminate',
|
||||
help='Destroy a cluster',
|
||||
parents=[meta_parser])
|
||||
terminate_parser.add_argument('-f', '--force', action='store_true',
|
||||
help='Destroy cluster without confirmation')
|
||||
terminate_parser.set_defaults(func=cluster.terminate)
|
||||
|
||||
update_parser = action_parser.add_parser('update',
|
||||
help='Update OpenShift across cluster',
|
||||
parents=[meta_parser])
|
||||
update_parser.add_argument('-f', '--force', action='store_true',
|
||||
help='Update cluster without confirmation')
|
||||
update_parser.set_defaults(func=cluster.update)
|
||||
|
||||
list_parser = action_parser.add_parser('list', help='List VMs in cluster',
|
||||
parents=[meta_parser])
|
||||
list_parser.set_defaults(func=cluster.list)
|
||||
|
||||
service_parser = action_parser.add_parser('service', help='service for openshift across cluster',
|
||||
parents=[meta_parser])
|
||||
# choices are the only ones valid for the ansible service module: http://docs.ansible.com/service_module.html
|
||||
service_parser.add_argument('state', choices=['started', 'stopped', 'restarted', 'reloaded'],
|
||||
help='make service call across cluster')
|
||||
service_parser.set_defaults(func=cluster.service)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if 'terminate' == args.action and not args.force:
|
||||
answer = raw_input("This will destroy the ENTIRE {0} cluster. Are you sure? [y/N] ".format(args.cluster_id))
|
||||
if answer not in ['y', 'Y']:
|
||||
sys.stderr.write('\nACTION [terminate] aborted by user!\n')
|
||||
exit(1)
|
||||
|
||||
if 'update' == args.action and not args.force:
|
||||
answer = raw_input(
|
||||
"This is destructive and could corrupt {0} cluster. Continue? [y/N] ".format(args.cluster_id))
|
||||
if answer not in ['y', 'Y']:
|
||||
sys.stderr.write('\nACTION [update] aborted by user!\n')
|
||||
exit(1)
|
||||
|
||||
try:
|
||||
args.func(args)
|
||||
except Exception as exc:
|
||||
if args.verbose:
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
else:
|
||||
print >>sys.stderr, exc
|
||||
exit(1)
|
||||
@@ -28,12 +28,6 @@ These are plugins used in playbooks and roles:
|
||||
|
||||
```
|
||||
.
|
||||
├── bin [DEPRECATED] Contains the `bin/cluster` script, a
|
||||
│ wrapper around the Ansible playbooks that ensures proper
|
||||
│ configuration, and facilitates installing, updating,
|
||||
│ destroying and configuring OpenShift clusters.
|
||||
│ Note: this tool is kept in the repository for legacy
|
||||
│ reasons and will be removed at some point.
|
||||
└── utils Contains the `atomic-openshift-installer` command, an
|
||||
interactive CLI utility to install OpenShift across a
|
||||
set of hosts.
|
||||
|
||||
@@ -2,8 +2,4 @@
|
||||
|
||||
You can install OpenShift on:
|
||||
|
||||
* [Amazon Web Services](aws/hosts/)
|
||||
* [BYO](byo/) (Bring your own), use this inventory config file to install OpenShift on your bare metal servers
|
||||
* [GCE](gce/) (Google Compute Engine)
|
||||
* [libvirt](libvirt/hosts/)
|
||||
* [OpenStack](openstack/hosts/)
|
||||
* [BYO](byo/) (Bring your own), use this inventory config file to install OpenShift on your pre-existing hosts
|
||||
|
||||
@@ -1,189 +0,0 @@
|
||||
# Ansible EC2 external inventory script settings
|
||||
#
|
||||
|
||||
[ec2]
|
||||
|
||||
# to talk to a private eucalyptus instance uncomment these lines
|
||||
# and edit edit eucalyptus_host to be the host name of your cloud controller
|
||||
#eucalyptus = True
|
||||
#eucalyptus_host = clc.cloud.domain.org
|
||||
|
||||
# AWS regions to make calls to. Set this to 'all' to make request to all regions
|
||||
# in AWS and merge the results together. Alternatively, set this to a comma
|
||||
# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2'
|
||||
regions = all
|
||||
regions_exclude = us-gov-west-1,cn-north-1
|
||||
|
||||
# When generating inventory, Ansible needs to know how to address a server.
|
||||
# Each EC2 instance has a lot of variables associated with it. Here is the list:
|
||||
# http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance
|
||||
# Below are 2 variables that are used as the address of a server:
|
||||
# - destination_variable
|
||||
# - vpc_destination_variable
|
||||
|
||||
# This is the normal destination variable to use. If you are running Ansible
|
||||
# from outside EC2, then 'public_dns_name' makes the most sense. If you are
|
||||
# running Ansible from within EC2, then perhaps you want to use the internal
|
||||
# address, and should set this to 'private_dns_name'. The key of an EC2 tag
|
||||
# may optionally be used; however the boto instance variables hold precedence
|
||||
# in the event of a collision.
|
||||
destination_variable = public_dns_name
|
||||
|
||||
# This allows you to override the inventory_name with an ec2 variable, instead
|
||||
# of using the destination_variable above. Addressing (aka ansible_ssh_host)
|
||||
# will still use destination_variable. Tags should be written as 'tag_TAGNAME'.
|
||||
hostname_variable = tag_Name
|
||||
|
||||
# For server inside a VPC, using DNS names may not make sense. When an instance
|
||||
# has 'subnet_id' set, this variable is used. If the subnet is public, setting
|
||||
# this to 'ip_address' will return the public IP address. For instances in a
|
||||
# private subnet, this should be set to 'private_ip_address', and Ansible must
|
||||
# be run from within EC2. The key of an EC2 tag may optionally be used; however
|
||||
# the boto instance variables hold precedence in the event of a collision.
|
||||
# WARNING: - instances that are in the private vpc, _without_ public ip address
|
||||
# will not be listed in the inventory until You set:
|
||||
# vpc_destination_variable = private_ip_address
|
||||
vpc_destination_variable = ip_address
|
||||
|
||||
# The following two settings allow flexible ansible host naming based on a
|
||||
# python format string and a comma-separated list of ec2 tags. Note that:
|
||||
#
|
||||
# 1) If the tags referenced are not present for some instances, empty strings
|
||||
# will be substituted in the format string.
|
||||
# 2) This overrides both destination_variable and vpc_destination_variable.
|
||||
#
|
||||
#destination_format = {0}.{1}.example.com
|
||||
#destination_format_tags = Name,environment
|
||||
|
||||
# To tag instances on EC2 with the resource records that point to them from
|
||||
# Route53, uncomment and set 'route53' to True.
|
||||
route53 = False
|
||||
|
||||
# To exclude RDS instances from the inventory, uncomment and set to False.
|
||||
rds = False
|
||||
|
||||
# To exclude ElastiCache instances from the inventory, uncomment and set to False.
|
||||
elasticache = False
|
||||
|
||||
# Additionally, you can specify the list of zones to exclude looking up in
|
||||
# 'route53_excluded_zones' as a comma-separated list.
|
||||
# route53_excluded_zones = samplezone1.com, samplezone2.com
|
||||
|
||||
# By default, only EC2 instances in the 'running' state are returned. Set
|
||||
# 'all_instances' to True to return all instances regardless of state.
|
||||
all_instances = False
|
||||
|
||||
# By default, only EC2 instances in the 'running' state are returned. Specify
|
||||
# EC2 instance states to return as a comma-separated list. This
|
||||
# option is overridden when 'all_instances' is True.
|
||||
# instance_states = pending, running, shutting-down, terminated, stopping, stopped
|
||||
|
||||
# By default, only RDS instances in the 'available' state are returned. Set
|
||||
# 'all_rds_instances' to True return all RDS instances regardless of state.
|
||||
all_rds_instances = False
|
||||
|
||||
# Include RDS cluster information (Aurora etc.)
|
||||
include_rds_clusters = False
|
||||
|
||||
# By default, only ElastiCache clusters and nodes in the 'available' state
|
||||
# are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes'
|
||||
# to True return all ElastiCache clusters and nodes, regardless of state.
|
||||
#
|
||||
# Note that all_elasticache_nodes only applies to listed clusters. That means
|
||||
# if you set all_elastic_clusters to false, no node will be return from
|
||||
# unavailable clusters, regardless of the state and to what you set for
|
||||
# all_elasticache_nodes.
|
||||
all_elasticache_replication_groups = False
|
||||
all_elasticache_clusters = False
|
||||
all_elasticache_nodes = False
|
||||
|
||||
# API calls to EC2 are slow. For this reason, we cache the results of an API
|
||||
# call. Set this to the path you want cache files to be written to. Two files
|
||||
# will be written to this directory:
|
||||
# - ansible-ec2.cache
|
||||
# - ansible-ec2.index
|
||||
cache_path = ~/.ansible/tmp
|
||||
|
||||
# The number of seconds a cache file is considered valid. After this many
|
||||
# seconds, a new API call will be made, and the cache file will be updated.
|
||||
# To disable the cache, set this value to 0
|
||||
cache_max_age = 300
|
||||
|
||||
# Organize groups into a nested/hierarchy instead of a flat namespace.
|
||||
nested_groups = False
|
||||
|
||||
# Replace - tags when creating groups to avoid issues with ansible
|
||||
replace_dash_in_groups = False
|
||||
|
||||
# If set to true, any tag of the form "a,b,c" is expanded into a list
|
||||
# and the results are used to create additional tag_* inventory groups.
|
||||
expand_csv_tags = False
|
||||
|
||||
# The EC2 inventory output can become very large. To manage its size,
|
||||
# configure which groups should be created.
|
||||
group_by_instance_id = True
|
||||
group_by_region = True
|
||||
group_by_availability_zone = True
|
||||
group_by_ami_id = True
|
||||
group_by_instance_type = True
|
||||
group_by_key_pair = True
|
||||
group_by_vpc_id = True
|
||||
group_by_security_group = True
|
||||
group_by_tag_keys = True
|
||||
group_by_tag_none = True
|
||||
group_by_route53_names = True
|
||||
group_by_rds_engine = True
|
||||
group_by_rds_parameter_group = True
|
||||
group_by_elasticache_engine = True
|
||||
group_by_elasticache_cluster = True
|
||||
group_by_elasticache_parameter_group = True
|
||||
group_by_elasticache_replication_group = True
|
||||
|
||||
# If you only want to include hosts that match a certain regular expression
|
||||
# pattern_include = staging-*
|
||||
|
||||
# If you want to exclude any hosts that match a certain regular expression
|
||||
# pattern_exclude = staging-*
|
||||
|
||||
# Instance filters can be used to control which instances are retrieved for
|
||||
# inventory. For the full list of possible filters, please read the EC2 API
|
||||
# docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters
|
||||
# Filters are key/value pairs separated by '=', to list multiple filters use
|
||||
# a list separated by commas. See examples below.
|
||||
|
||||
# Retrieve only instances with (key=value) env=staging tag
|
||||
# instance_filters = tag:env=staging
|
||||
|
||||
# Retrieve only instances with role=webservers OR role=dbservers tag
|
||||
# instance_filters = tag:role=webservers,tag:role=dbservers
|
||||
|
||||
# Retrieve only t1.micro instances OR instances with tag env=staging
|
||||
# instance_filters = instance-type=t1.micro,tag:env=staging
|
||||
|
||||
# You can use wildcards in filter values also. Below will list instances which
|
||||
# tag Name value matches webservers1*
|
||||
# (ex. webservers15, webservers1a, webservers123 etc)
|
||||
# instance_filters = tag:Name=webservers1*
|
||||
|
||||
# A boto configuration profile may be used to separate out credentials
|
||||
# see http://boto.readthedocs.org/en/latest/boto_config_tut.html
|
||||
# boto_profile = some-boto-profile-name
|
||||
|
||||
|
||||
[credentials]
|
||||
|
||||
# The AWS credentials can optionally be specified here. Credentials specified
|
||||
# here are ignored if the environment variable AWS_ACCESS_KEY_ID or
|
||||
# AWS_PROFILE is set, or if the boto_profile property above is set.
|
||||
#
|
||||
# Supplying AWS credentials here is not recommended, as it introduces
|
||||
# non-trivial security concerns. When going down this route, please make sure
|
||||
# to set access permissions for this file correctly, e.g. handle it the same
|
||||
# way as you would a private SSH key.
|
||||
#
|
||||
# Unlike the boto and AWS configure files, this section does not support
|
||||
# profiles.
|
||||
#
|
||||
# aws_access_key_id = AXXXXXXXXXXXXXX
|
||||
# aws_secret_access_key = XXXXXXXXXXXXXXXXXXX
|
||||
# aws_security_token = XXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1 +0,0 @@
|
||||
localhost ansible_connection=local ansible_become=no ansible_python_interpreter='/usr/bin/env python2'
|
||||
@@ -1,477 +0,0 @@
|
||||
#!/usr/bin/env python2
|
||||
# pylint: skip-file
|
||||
# Copyright 2013 Google Inc.
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
GCE external inventory script
|
||||
=================================
|
||||
|
||||
Generates inventory that Ansible can understand by making API requests
|
||||
Google Compute Engine via the libcloud library. Full install/configuration
|
||||
instructions for the gce* modules can be found in the comments of
|
||||
ansible/test/gce_tests.py.
|
||||
|
||||
When run against a specific host, this script returns the following variables
|
||||
based on the data obtained from the libcloud Node object:
|
||||
- gce_uuid
|
||||
- gce_id
|
||||
- gce_image
|
||||
- gce_machine_type
|
||||
- gce_private_ip
|
||||
- gce_public_ip
|
||||
- gce_name
|
||||
- gce_description
|
||||
- gce_status
|
||||
- gce_zone
|
||||
- gce_tags
|
||||
- gce_metadata
|
||||
- gce_network
|
||||
|
||||
When run in --list mode, instances are grouped by the following categories:
|
||||
- zone:
|
||||
zone group name examples are us-central1-b, europe-west1-a, etc.
|
||||
- instance tags:
|
||||
An entry is created for each tag. For example, if you have two instances
|
||||
with a common tag called 'foo', they will both be grouped together under
|
||||
the 'tag_foo' name.
|
||||
- network name:
|
||||
the name of the network is appended to 'network_' (e.g. the 'default'
|
||||
network will result in a group named 'network_default')
|
||||
- machine type
|
||||
types follow a pattern like n1-standard-4, g1-small, etc.
|
||||
- running status:
|
||||
group name prefixed with 'status_' (e.g. status_running, status_stopped,..)
|
||||
- image:
|
||||
when using an ephemeral/scratch disk, this will be set to the image name
|
||||
used when creating the instance (e.g. debian-7-wheezy-v20130816). when
|
||||
your instance was created with a root persistent disk it will be set to
|
||||
'persistent_disk' since there is no current way to determine the image.
|
||||
|
||||
Examples:
|
||||
Execute uname on all instances in the us-central1-a zone
|
||||
$ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a"
|
||||
|
||||
Use the GCE inventory script to print out instance specific information
|
||||
$ contrib/inventory/gce.py --host my_instance
|
||||
|
||||
Author: Eric Johnson <erjohnso@google.com>
|
||||
Contributors: Matt Hite <mhite@hotmail.com>, Tom Melendez <supertom@google.com>
|
||||
Version: 0.0.3
|
||||
'''
|
||||
|
||||
__requires__ = ['pycrypto>=2.6']
|
||||
try:
|
||||
import pkg_resources
|
||||
except ImportError:
|
||||
# Use pkg_resources to find the correct versions of libraries and set
|
||||
# sys.path appropriately when there are multiversion installs. We don't
|
||||
# fail here as there is code that better expresses the errors where the
|
||||
# library is used.
|
||||
pass
|
||||
|
||||
USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin"
|
||||
USER_AGENT_VERSION="v2"
|
||||
|
||||
import sys
|
||||
import os
|
||||
import argparse
|
||||
|
||||
from time import time
|
||||
|
||||
import ConfigParser
|
||||
|
||||
import logging
|
||||
logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler())
|
||||
|
||||
try:
|
||||
import json
|
||||
except ImportError:
|
||||
import simplejson as json
|
||||
|
||||
try:
|
||||
from libcloud.compute.types import Provider
|
||||
from libcloud.compute.providers import get_driver
|
||||
_ = Provider.GCE
|
||||
except:
|
||||
sys.exit("GCE inventory script requires libcloud >= 0.13")
|
||||
|
||||
|
||||
class CloudInventoryCache(object):
|
||||
def __init__(self, cache_name='ansible-cloud-cache', cache_path='/tmp',
|
||||
cache_max_age=300):
|
||||
cache_dir = os.path.expanduser(cache_path)
|
||||
if not os.path.exists(cache_dir):
|
||||
os.makedirs(cache_dir)
|
||||
self.cache_path_cache = os.path.join(cache_dir, cache_name)
|
||||
|
||||
self.cache_max_age = cache_max_age
|
||||
|
||||
def is_valid(self, max_age=None):
|
||||
''' Determines if the cache files have expired, or if it is still valid '''
|
||||
|
||||
if max_age is None:
|
||||
max_age = self.cache_max_age
|
||||
|
||||
if os.path.isfile(self.cache_path_cache):
|
||||
mod_time = os.path.getmtime(self.cache_path_cache)
|
||||
current_time = time()
|
||||
if (mod_time + max_age) > current_time:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def get_all_data_from_cache(self, filename=''):
|
||||
''' Reads the JSON inventory from the cache file. Returns Python dictionary. '''
|
||||
|
||||
data = ''
|
||||
if not filename:
|
||||
filename = self.cache_path_cache
|
||||
with open(filename, 'r') as cache:
|
||||
data = cache.read()
|
||||
return json.loads(data)
|
||||
|
||||
def write_to_cache(self, data, filename=''):
|
||||
''' Writes data to file as JSON. Returns True. '''
|
||||
if not filename:
|
||||
filename = self.cache_path_cache
|
||||
json_data = json.dumps(data)
|
||||
with open(filename, 'w') as cache:
|
||||
cache.write(json_data)
|
||||
return True
|
||||
|
||||
|
||||
class GceInventory(object):
|
||||
def __init__(self):
|
||||
# Cache object
|
||||
self.cache = None
|
||||
# dictionary containing inventory read from disk
|
||||
self.inventory = {}
|
||||
|
||||
# Read settings and parse CLI arguments
|
||||
self.parse_cli_args()
|
||||
self.config = self.get_config()
|
||||
self.driver = self.get_gce_driver()
|
||||
self.ip_type = self.get_inventory_options()
|
||||
if self.ip_type:
|
||||
self.ip_type = self.ip_type.lower()
|
||||
|
||||
# Cache management
|
||||
start_inventory_time = time()
|
||||
cache_used = False
|
||||
if self.args.refresh_cache or not self.cache.is_valid():
|
||||
self.do_api_calls_update_cache()
|
||||
else:
|
||||
self.load_inventory_from_cache()
|
||||
cache_used = True
|
||||
self.inventory['_meta']['stats'] = {'use_cache': True}
|
||||
self.inventory['_meta']['stats'] = {
|
||||
'inventory_load_time': time() - start_inventory_time,
|
||||
'cache_used': cache_used
|
||||
}
|
||||
|
||||
# Just display data for specific host
|
||||
if self.args.host:
|
||||
print(self.json_format_dict(
|
||||
self.inventory['_meta']['hostvars'][self.args.host],
|
||||
pretty=self.args.pretty))
|
||||
else:
|
||||
# Otherwise, assume user wants all instances grouped
|
||||
zones = self.parse_env_zones()
|
||||
print(self.json_format_dict(self.inventory,
|
||||
pretty=self.args.pretty))
|
||||
sys.exit(0)
|
||||
|
||||
def get_config(self):
|
||||
"""
|
||||
Reads the settings from the gce.ini file.
|
||||
|
||||
Populates a SafeConfigParser object with defaults and
|
||||
attempts to read an .ini-style configuration from the filename
|
||||
specified in GCE_INI_PATH. If the environment variable is
|
||||
not present, the filename defaults to gce.ini in the current
|
||||
working directory.
|
||||
"""
|
||||
gce_ini_default_path = os.path.join(
|
||||
os.path.dirname(os.path.realpath(__file__)), "gce.ini")
|
||||
gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
|
||||
|
||||
# Create a ConfigParser.
|
||||
# This provides empty defaults to each key, so that environment
|
||||
# variable configuration (as opposed to INI configuration) is able
|
||||
# to work.
|
||||
config = ConfigParser.SafeConfigParser(defaults={
|
||||
'gce_service_account_email_address': '',
|
||||
'gce_service_account_pem_file_path': '',
|
||||
'gce_project_id': '',
|
||||
'libcloud_secrets': '',
|
||||
'inventory_ip_type': '',
|
||||
'cache_path': '~/.ansible/tmp',
|
||||
'cache_max_age': '300'
|
||||
})
|
||||
if 'gce' not in config.sections():
|
||||
config.add_section('gce')
|
||||
if 'inventory' not in config.sections():
|
||||
config.add_section('inventory')
|
||||
if 'cache' not in config.sections():
|
||||
config.add_section('cache')
|
||||
|
||||
config.read(gce_ini_path)
|
||||
|
||||
#########
|
||||
# Section added for processing ini settings
|
||||
#########
|
||||
|
||||
# Set the instance_states filter based on config file options
|
||||
self.instance_states = []
|
||||
if config.has_option('gce', 'instance_states'):
|
||||
states = config.get('gce', 'instance_states')
|
||||
# Ignore if instance_states is an empty string.
|
||||
if states:
|
||||
self.instance_states = states.split(',')
|
||||
|
||||
# Caching
|
||||
cache_path = config.get('cache', 'cache_path')
|
||||
cache_max_age = config.getint('cache', 'cache_max_age')
|
||||
# TOOD(supertom): support project-specific caches
|
||||
cache_name = 'ansible-gce.cache'
|
||||
self.cache = CloudInventoryCache(cache_path=cache_path,
|
||||
cache_max_age=cache_max_age,
|
||||
cache_name=cache_name)
|
||||
return config
|
||||
|
||||
def get_inventory_options(self):
|
||||
"""Determine inventory options. Environment variables always
|
||||
take precedence over configuration files."""
|
||||
ip_type = self.config.get('inventory', 'inventory_ip_type')
|
||||
# If the appropriate environment variables are set, they override
|
||||
# other configuration
|
||||
ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type)
|
||||
return ip_type
|
||||
|
||||
def get_gce_driver(self):
|
||||
"""Determine the GCE authorization settings and return a
|
||||
libcloud driver.
|
||||
"""
|
||||
# Attempt to get GCE params from a configuration file, if one
|
||||
# exists.
|
||||
secrets_path = self.config.get('gce', 'libcloud_secrets')
|
||||
secrets_found = False
|
||||
try:
|
||||
import secrets
|
||||
args = list(getattr(secrets, 'GCE_PARAMS', []))
|
||||
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
|
||||
secrets_found = True
|
||||
except:
|
||||
pass
|
||||
|
||||
if not secrets_found and secrets_path:
|
||||
if not secrets_path.endswith('secrets.py'):
|
||||
err = "Must specify libcloud secrets file as "
|
||||
err += "/absolute/path/to/secrets.py"
|
||||
sys.exit(err)
|
||||
sys.path.append(os.path.dirname(secrets_path))
|
||||
try:
|
||||
import secrets
|
||||
args = list(getattr(secrets, 'GCE_PARAMS', []))
|
||||
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
|
||||
secrets_found = True
|
||||
except:
|
||||
pass
|
||||
if not secrets_found:
|
||||
args = [
|
||||
self.config.get('gce','gce_service_account_email_address'),
|
||||
self.config.get('gce','gce_service_account_pem_file_path')
|
||||
]
|
||||
kwargs = {'project': self.config.get('gce', 'gce_project_id')}
|
||||
|
||||
# If the appropriate environment variables are set, they override
|
||||
# other configuration; process those into our args and kwargs.
|
||||
args[0] = os.environ.get('GCE_EMAIL', args[0])
|
||||
args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
|
||||
kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
|
||||
|
||||
# Retrieve and return the GCE driver.
|
||||
gce = get_driver(Provider.GCE)(*args, **kwargs)
|
||||
gce.connection.user_agent_append(
|
||||
'%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION),
|
||||
)
|
||||
return gce
|
||||
|
||||
def parse_env_zones(self):
|
||||
'''returns a list of comma separated zones parsed from the GCE_ZONE environment variable.
|
||||
If provided, this will be used to filter the results of the grouped_instances call'''
|
||||
import csv
|
||||
reader = csv.reader([os.environ.get('GCE_ZONE',"")], skipinitialspace=True)
|
||||
zones = [r for r in reader]
|
||||
return [z for z in zones[0]]
|
||||
|
||||
def parse_cli_args(self):
|
||||
''' Command line argument processing '''
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Produce an Ansible Inventory file based on GCE')
|
||||
parser.add_argument('--list', action='store_true', default=True,
|
||||
help='List instances (default: True)')
|
||||
parser.add_argument('--host', action='store',
|
||||
help='Get all information about an instance')
|
||||
parser.add_argument('--pretty', action='store_true', default=False,
|
||||
help='Pretty format (default: False)')
|
||||
parser.add_argument(
|
||||
'--refresh-cache', action='store_true', default=False,
|
||||
help='Force refresh of cache by making API requests (default: False - use cache files)')
|
||||
self.args = parser.parse_args()
|
||||
|
||||
|
||||
def node_to_dict(self, inst):
|
||||
md = {}
|
||||
|
||||
if inst is None:
|
||||
return {}
|
||||
|
||||
if 'items' in inst.extra['metadata']:
|
||||
for entry in inst.extra['metadata']['items']:
|
||||
md[entry['key']] = entry['value']
|
||||
|
||||
net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
|
||||
# default to exernal IP unless user has specified they prefer internal
|
||||
if self.ip_type == 'internal':
|
||||
ssh_host = inst.private_ips[0]
|
||||
else:
|
||||
ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
|
||||
|
||||
return {
|
||||
'gce_uuid': inst.uuid,
|
||||
'gce_id': inst.id,
|
||||
'gce_image': inst.image,
|
||||
'gce_machine_type': inst.size,
|
||||
'gce_private_ip': inst.private_ips[0],
|
||||
'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None,
|
||||
'gce_name': inst.name,
|
||||
'gce_description': inst.extra['description'],
|
||||
'gce_status': inst.extra['status'],
|
||||
'gce_zone': inst.extra['zone'].name,
|
||||
'gce_tags': inst.extra['tags'],
|
||||
'gce_metadata': md,
|
||||
'gce_network': net,
|
||||
# Hosts don't have a public name, so we add an IP
|
||||
'ansible_ssh_host': ssh_host
|
||||
}
|
||||
|
||||
def load_inventory_from_cache(self):
|
||||
''' Loads inventory from JSON on disk. '''
|
||||
|
||||
try:
|
||||
self.inventory = self.cache.get_all_data_from_cache()
|
||||
hosts = self.inventory['_meta']['hostvars']
|
||||
except Exception as e:
|
||||
print(
|
||||
"Invalid inventory file %s. Please rebuild with -refresh-cache option."
|
||||
% (self.cache.cache_path_cache))
|
||||
raise
|
||||
|
||||
def do_api_calls_update_cache(self):
|
||||
''' Do API calls and save data in cache. '''
|
||||
zones = self.parse_env_zones()
|
||||
data = self.group_instances(zones)
|
||||
self.cache.write_to_cache(data)
|
||||
self.inventory = data
|
||||
|
||||
def list_nodes(self):
|
||||
all_nodes = []
|
||||
params, more_results = {'maxResults': 500}, True
|
||||
while more_results:
|
||||
self.driver.connection.gce_params=params
|
||||
all_nodes.extend(self.driver.list_nodes())
|
||||
more_results = 'pageToken' in params
|
||||
return all_nodes
|
||||
|
||||
def group_instances(self, zones=None):
|
||||
'''Group all instances'''
|
||||
groups = {}
|
||||
meta = {}
|
||||
meta["hostvars"] = {}
|
||||
|
||||
for node in self.list_nodes():
|
||||
|
||||
# This check filters on the desired instance states defined in the
|
||||
# config file with the instance_states config option.
|
||||
#
|
||||
# If the instance_states list is _empty_ then _ALL_ states are returned.
|
||||
#
|
||||
# If the instance_states list is _populated_ then check the current
|
||||
# state against the instance_states list
|
||||
if self.instance_states and not node.extra['status'] in self.instance_states:
|
||||
continue
|
||||
|
||||
name = node.name
|
||||
|
||||
meta["hostvars"][name] = self.node_to_dict(node)
|
||||
|
||||
zone = node.extra['zone'].name
|
||||
|
||||
# To avoid making multiple requests per zone
|
||||
# we list all nodes and then filter the results
|
||||
if zones and zone not in zones:
|
||||
continue
|
||||
|
||||
if zone in groups: groups[zone].append(name)
|
||||
else: groups[zone] = [name]
|
||||
|
||||
tags = node.extra['tags']
|
||||
for t in tags:
|
||||
if t.startswith('group-'):
|
||||
tag = t[6:]
|
||||
else:
|
||||
tag = 'tag_%s' % t
|
||||
if tag in groups: groups[tag].append(name)
|
||||
else: groups[tag] = [name]
|
||||
|
||||
net = node.extra['networkInterfaces'][0]['network'].split('/')[-1]
|
||||
net = 'network_%s' % net
|
||||
if net in groups: groups[net].append(name)
|
||||
else: groups[net] = [name]
|
||||
|
||||
machine_type = node.size
|
||||
if machine_type in groups: groups[machine_type].append(name)
|
||||
else: groups[machine_type] = [name]
|
||||
|
||||
image = node.image and node.image or 'persistent_disk'
|
||||
if image in groups: groups[image].append(name)
|
||||
else: groups[image] = [name]
|
||||
|
||||
status = node.extra['status']
|
||||
stat = 'status_%s' % status.lower()
|
||||
if stat in groups: groups[stat].append(name)
|
||||
else: groups[stat] = [name]
|
||||
|
||||
groups["_meta"] = meta
|
||||
|
||||
return groups
|
||||
|
||||
def json_format_dict(self, data, pretty=False):
|
||||
''' Converts a dict to a JSON object and dumps it as a formatted
|
||||
string '''
|
||||
|
||||
if pretty:
|
||||
return json.dumps(data, sort_keys=True, indent=2)
|
||||
else:
|
||||
return json.dumps(data)
|
||||
|
||||
# Run the script
|
||||
if __name__ == '__main__':
|
||||
GceInventory()
|
||||
@@ -1 +0,0 @@
|
||||
localhost ansible_connection=local ansible_become=no ansible_python_interpreter='/usr/bin/env python2'
|
||||
@@ -1 +0,0 @@
|
||||
localhost ansible_connection=local ansible_become=no ansible_python_interpreter='/usr/bin/env python2'
|
||||
@@ -1,20 +0,0 @@
|
||||
# Ansible libvirt external inventory script settings
|
||||
#
|
||||
|
||||
[libvirt]
|
||||
|
||||
uri = qemu:///system
|
||||
|
||||
# API calls to libvirt can be slow. For this reason, we cache the results of an API
|
||||
# call. Set this to the path you want cache files to be written to. Two files
|
||||
# will be written to this directory:
|
||||
# - ansible-libvirt.cache
|
||||
# - ansible-libvirt.index
|
||||
cache_path = /tmp
|
||||
|
||||
# The number of seconds a cache file is considered valid. After this many
|
||||
# seconds, a new API call will be made, and the cache file will be updated.
|
||||
cache_max_age = 900
|
||||
|
||||
|
||||
|
||||
@@ -1,191 +0,0 @@
|
||||
#!/usr/bin/env python2
|
||||
# pylint: skip-file
|
||||
|
||||
'''
|
||||
libvirt external inventory script
|
||||
=================================
|
||||
|
||||
Ansible has a feature where instead of reading from /etc/ansible/hosts
|
||||
as a text file, it can query external programs to obtain the list
|
||||
of hosts, groups the hosts are in, and even variables to assign to each host.
|
||||
|
||||
To use this, copy this file over /etc/ansible/hosts and chmod +x the file.
|
||||
This, more or less, allows you to keep one central database containing
|
||||
info about all of your managed instances.
|
||||
|
||||
'''
|
||||
|
||||
# (c) 2015, Jason DeTiberus <jdetiber@redhat.com>
|
||||
#
|
||||
# This file is part of Ansible,
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
######################################################################
|
||||
|
||||
import argparse
|
||||
import ConfigParser
|
||||
import os
|
||||
import sys
|
||||
import libvirt
|
||||
import xml.etree.ElementTree as ET
|
||||
|
||||
try:
|
||||
import json
|
||||
except ImportError:
|
||||
import simplejson as json
|
||||
|
||||
|
||||
class LibvirtInventory(object):
|
||||
''' libvirt dynamic inventory '''
|
||||
|
||||
def __init__(self):
|
||||
''' Main execution path '''
|
||||
|
||||
self.inventory = dict() # A list of groups and the hosts in that group
|
||||
self.cache = dict() # Details about hosts in the inventory
|
||||
|
||||
# Read settings and parse CLI arguments
|
||||
self.read_settings()
|
||||
self.parse_cli_args()
|
||||
|
||||
if self.args.host:
|
||||
print(_json_format_dict(self.get_host_info(), self.args.pretty))
|
||||
elif self.args.list:
|
||||
print(_json_format_dict(self.get_inventory(), self.args.pretty))
|
||||
else: # default action with no options
|
||||
print(_json_format_dict(self.get_inventory(), self.args.pretty))
|
||||
|
||||
def read_settings(self):
|
||||
''' Reads the settings from the libvirt.ini file '''
|
||||
|
||||
config = ConfigParser.SafeConfigParser()
|
||||
config.read(
|
||||
os.path.dirname(os.path.realpath(__file__)) + '/libvirt.ini'
|
||||
)
|
||||
self.libvirt_uri = config.get('libvirt', 'uri')
|
||||
|
||||
def parse_cli_args(self):
|
||||
''' Command line argument processing '''
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Produce an Ansible Inventory file based on libvirt'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--list',
|
||||
action='store_true',
|
||||
default=True,
|
||||
help='List instances (default: True)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--host',
|
||||
action='store',
|
||||
help='Get all the variables about a specific instance'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--pretty',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='Pretty format (default: False)'
|
||||
)
|
||||
self.args = parser.parse_args()
|
||||
|
||||
def get_host_info(self):
|
||||
''' Get variables about a specific host '''
|
||||
|
||||
inventory = self.get_inventory()
|
||||
if self.args.host in inventory['_meta']['hostvars']:
|
||||
return inventory['_meta']['hostvars'][self.args.host]
|
||||
|
||||
def get_inventory(self):
|
||||
''' Construct the inventory '''
|
||||
|
||||
inventory = dict(_meta=dict(hostvars=dict()))
|
||||
|
||||
conn = libvirt.openReadOnly(self.libvirt_uri)
|
||||
if conn is None:
|
||||
print("Failed to open connection to %s" % self.libvirt_uri)
|
||||
sys.exit(1)
|
||||
|
||||
domains = conn.listAllDomains()
|
||||
if domains is None:
|
||||
print("Failed to list domains for connection %s" % self.libvirt_uri)
|
||||
sys.exit(1)
|
||||
|
||||
for domain in domains:
|
||||
hostvars = dict(libvirt_name=domain.name(),
|
||||
libvirt_id=domain.ID(),
|
||||
libvirt_uuid=domain.UUIDString())
|
||||
domain_name = domain.name()
|
||||
|
||||
# TODO: add support for guests that are not in a running state
|
||||
state, _ = domain.state()
|
||||
# 2 is the state for a running guest
|
||||
if state != 1:
|
||||
continue
|
||||
|
||||
hostvars['libvirt_status'] = 'running'
|
||||
|
||||
root = ET.fromstring(domain.XMLDesc())
|
||||
ansible_ns = {'ansible': 'https://github.com/ansible/ansible'}
|
||||
for tag_elem in root.findall('./metadata/ansible:tags/ansible:tag', ansible_ns):
|
||||
tag = tag_elem.text
|
||||
_push(inventory, "tag_%s" % tag, domain_name)
|
||||
_push(hostvars, 'libvirt_tags', tag)
|
||||
|
||||
# TODO: support more than one network interface, also support
|
||||
# interface types other than 'network'
|
||||
interface = root.find("./devices/interface[@type='network']")
|
||||
if interface is not None:
|
||||
source_elem = interface.find('source')
|
||||
mac_elem = interface.find('mac')
|
||||
if source_elem is not None and \
|
||||
mac_elem is not None:
|
||||
# Adding this to disable pylint check specifically
|
||||
# ignoring libvirt-python versions that
|
||||
# do not include DHCPLeases
|
||||
# This is needed until we upgrade the build bot to
|
||||
# RHEL7 (>= 1.2.6 libvirt)
|
||||
# pylint: disable=no-member
|
||||
dhcp_leases = conn.networkLookupByName(source_elem.get('network')) \
|
||||
.DHCPLeases(mac_elem.get('address'))
|
||||
if len(dhcp_leases) > 0:
|
||||
ip_address = dhcp_leases[0]['ipaddr']
|
||||
hostvars['ansible_ssh_host'] = ip_address
|
||||
hostvars['libvirt_ip_address'] = ip_address
|
||||
|
||||
inventory['_meta']['hostvars'][domain_name] = hostvars
|
||||
|
||||
return inventory
|
||||
|
||||
def _push(my_dict, key, element):
|
||||
'''
|
||||
Push element to the my_dict[key] list.
|
||||
After having initialized my_dict[key] if it dosn't exist.
|
||||
'''
|
||||
|
||||
if key in my_dict:
|
||||
my_dict[key].append(element)
|
||||
else:
|
||||
my_dict[key] = [element]
|
||||
|
||||
def _json_format_dict(data, pretty=False):
|
||||
''' Serialize data to a JSON formated str '''
|
||||
|
||||
if pretty:
|
||||
return json.dumps(data, sort_keys=True, indent=2)
|
||||
else:
|
||||
return json.dumps(data)
|
||||
|
||||
LibvirtInventory()
|
||||
@@ -1 +0,0 @@
|
||||
localhost ansible_become=no ansible_python_interpreter='/usr/bin/env python2' connection=local
|
||||
@@ -1,247 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# pylint: skip-file
|
||||
|
||||
# Copyright (c) 2012, Marco Vito Moscaritolo <marco@agavee.com>
|
||||
# Copyright (c) 2013, Jesse Keating <jesse.keating@rackspace.com>
|
||||
# Copyright (c) 2015, Hewlett-Packard Development Company, L.P.
|
||||
# Copyright (c) 2016, Rackspace Australia
|
||||
#
|
||||
# This module is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This software is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this software. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# The OpenStack Inventory module uses os-client-config for configuration.
|
||||
# https://github.com/stackforge/os-client-config
|
||||
# This means it will either:
|
||||
# - Respect normal OS_* environment variables like other OpenStack tools
|
||||
# - Read values from a clouds.yaml file.
|
||||
# If you want to configure via clouds.yaml, you can put the file in:
|
||||
# - Current directory
|
||||
# - ~/.config/openstack/clouds.yaml
|
||||
# - /etc/openstack/clouds.yaml
|
||||
# - /etc/ansible/openstack.yml
|
||||
# The clouds.yaml file can contain entries for multiple clouds and multiple
|
||||
# regions of those clouds. If it does, this inventory module will connect to
|
||||
# all of them and present them as one contiguous inventory.
|
||||
#
|
||||
# See the adjacent openstack.yml file for an example config file
|
||||
# There are two ansible inventory specific options that can be set in
|
||||
# the inventory section.
|
||||
# expand_hostvars controls whether or not the inventory will make extra API
|
||||
# calls to fill out additional information about each server
|
||||
# use_hostnames changes the behavior from registering every host with its UUID
|
||||
# and making a group of its hostname to only doing this if the
|
||||
# hostname in question has more than one server
|
||||
# fail_on_errors causes the inventory to fail and return no hosts if one cloud
|
||||
# has failed (for example, bad credentials or being offline).
|
||||
# When set to False, the inventory will return hosts from
|
||||
# whichever other clouds it can contact. (Default: True)
|
||||
|
||||
import argparse
|
||||
import collections
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from distutils.version import StrictVersion
|
||||
|
||||
try:
|
||||
import json
|
||||
except:
|
||||
import simplejson as json
|
||||
|
||||
import os_client_config
|
||||
import shade
|
||||
import shade.inventory
|
||||
|
||||
CONFIG_FILES = ['/etc/ansible/openstack.yaml', '/etc/ansible/openstack.yml']
|
||||
|
||||
|
||||
def get_groups_from_server(server_vars, namegroup=True):
|
||||
groups = []
|
||||
|
||||
region = server_vars['region']
|
||||
cloud = server_vars['cloud']
|
||||
metadata = server_vars.get('metadata', {})
|
||||
|
||||
# Create a group for the cloud
|
||||
groups.append(cloud)
|
||||
|
||||
# Create a group on region
|
||||
groups.append(region)
|
||||
|
||||
# And one by cloud_region
|
||||
groups.append("%s_%s" % (cloud, region))
|
||||
|
||||
# Check if group metadata key in servers' metadata
|
||||
if 'group' in metadata:
|
||||
groups.append(metadata['group'])
|
||||
|
||||
for extra_group in metadata.get('groups', '').split(','):
|
||||
if extra_group:
|
||||
groups.append(extra_group.strip())
|
||||
|
||||
groups.append('instance-%s' % server_vars['id'])
|
||||
if namegroup:
|
||||
groups.append(server_vars['name'])
|
||||
|
||||
for key in ('flavor', 'image'):
|
||||
if 'name' in server_vars[key]:
|
||||
groups.append('%s-%s' % (key, server_vars[key]['name']))
|
||||
|
||||
for key, value in iter(metadata.items()):
|
||||
groups.append('meta-%s_%s' % (key, value))
|
||||
|
||||
az = server_vars.get('az', None)
|
||||
if az:
|
||||
# Make groups for az, region_az and cloud_region_az
|
||||
groups.append(az)
|
||||
groups.append('%s_%s' % (region, az))
|
||||
groups.append('%s_%s_%s' % (cloud, region, az))
|
||||
return groups
|
||||
|
||||
|
||||
def get_host_groups(inventory, refresh=False):
|
||||
(cache_file, cache_expiration_time) = get_cache_settings()
|
||||
if is_cache_stale(cache_file, cache_expiration_time, refresh=refresh):
|
||||
groups = to_json(get_host_groups_from_cloud(inventory))
|
||||
open(cache_file, 'w').write(groups)
|
||||
else:
|
||||
groups = open(cache_file, 'r').read()
|
||||
return groups
|
||||
|
||||
|
||||
def append_hostvars(hostvars, groups, key, server, namegroup=False):
|
||||
hostvars[key] = dict(
|
||||
ansible_ssh_host=server['interface_ip'],
|
||||
openstack=server)
|
||||
for group in get_groups_from_server(server, namegroup=namegroup):
|
||||
groups[group].append(key)
|
||||
|
||||
|
||||
def get_host_groups_from_cloud(inventory):
|
||||
groups = collections.defaultdict(list)
|
||||
firstpass = collections.defaultdict(list)
|
||||
hostvars = {}
|
||||
list_args = {}
|
||||
if hasattr(inventory, 'extra_config'):
|
||||
use_hostnames = inventory.extra_config['use_hostnames']
|
||||
list_args['expand'] = inventory.extra_config['expand_hostvars']
|
||||
if StrictVersion(shade.__version__) >= StrictVersion("1.6.0"):
|
||||
list_args['fail_on_cloud_config'] = \
|
||||
inventory.extra_config['fail_on_errors']
|
||||
else:
|
||||
use_hostnames = False
|
||||
|
||||
for server in inventory.list_hosts(**list_args):
|
||||
|
||||
if 'interface_ip' not in server:
|
||||
continue
|
||||
firstpass[server['name']].append(server)
|
||||
for name, servers in firstpass.items():
|
||||
if len(servers) == 1 and use_hostnames:
|
||||
append_hostvars(hostvars, groups, name, servers[0])
|
||||
else:
|
||||
server_ids = set()
|
||||
# Trap for duplicate results
|
||||
for server in servers:
|
||||
server_ids.add(server['id'])
|
||||
if len(server_ids) == 1 and use_hostnames:
|
||||
append_hostvars(hostvars, groups, name, servers[0])
|
||||
else:
|
||||
for server in servers:
|
||||
append_hostvars(
|
||||
hostvars, groups, server['id'], server,
|
||||
namegroup=True)
|
||||
groups['_meta'] = {'hostvars': hostvars}
|
||||
return groups
|
||||
|
||||
|
||||
def is_cache_stale(cache_file, cache_expiration_time, refresh=False):
|
||||
''' Determines if cache file has expired, or if it is still valid '''
|
||||
if refresh:
|
||||
return True
|
||||
if os.path.isfile(cache_file) and os.path.getsize(cache_file) > 0:
|
||||
mod_time = os.path.getmtime(cache_file)
|
||||
current_time = time.time()
|
||||
if (mod_time + cache_expiration_time) > current_time:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def get_cache_settings():
|
||||
config = os_client_config.config.OpenStackConfig(
|
||||
config_files=os_client_config.config.CONFIG_FILES + CONFIG_FILES)
|
||||
# For inventory-wide caching
|
||||
cache_expiration_time = config.get_cache_expiration_time()
|
||||
cache_path = config.get_cache_path()
|
||||
if not os.path.exists(cache_path):
|
||||
os.makedirs(cache_path)
|
||||
cache_file = os.path.join(cache_path, 'ansible-inventory.cache')
|
||||
return (cache_file, cache_expiration_time)
|
||||
|
||||
|
||||
def to_json(in_dict):
|
||||
return json.dumps(in_dict, sort_keys=True, indent=2)
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description='OpenStack Inventory Module')
|
||||
parser.add_argument('--private',
|
||||
action='store_true',
|
||||
help='Use private address for ansible host')
|
||||
parser.add_argument('--refresh', action='store_true',
|
||||
help='Refresh cached information')
|
||||
parser.add_argument('--debug', action='store_true', default=False,
|
||||
help='Enable debug output')
|
||||
group = parser.add_mutually_exclusive_group(required=True)
|
||||
group.add_argument('--list', action='store_true',
|
||||
help='List active servers')
|
||||
group.add_argument('--host', help='List details about the specific host')
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
try:
|
||||
config_files = os_client_config.config.CONFIG_FILES + CONFIG_FILES
|
||||
shade.simple_logging(debug=args.debug)
|
||||
inventory_args = dict(
|
||||
refresh=args.refresh,
|
||||
config_files=config_files,
|
||||
private=args.private,
|
||||
)
|
||||
if hasattr(shade.inventory.OpenStackInventory, 'extra_config'):
|
||||
inventory_args.update(dict(
|
||||
config_key='ansible',
|
||||
config_defaults={
|
||||
'use_hostnames': False,
|
||||
'expand_hostvars': True,
|
||||
'fail_on_errors': True,
|
||||
}
|
||||
))
|
||||
|
||||
inventory = shade.inventory.OpenStackInventory(**inventory_args)
|
||||
|
||||
if args.list:
|
||||
output = get_host_groups(inventory, refresh=args.refresh)
|
||||
elif args.host:
|
||||
output = to_json(inventory.get_host(args.host))
|
||||
print(output)
|
||||
except shade.OpenStackCloudException as e:
|
||||
sys.stderr.write('%s\n' % e.message)
|
||||
sys.exit(1)
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -12,8 +12,6 @@ And:
|
||||
|
||||
- [`adhoc`](adhoc) is a generic home for playbooks and tasks that are community
|
||||
supported and not officially maintained.
|
||||
- [`aws`](aws), [`gce`](gce), [`libvirt`](libvirt) and [`openstack`](openstack)
|
||||
are related to the [`bin/cluster`](../bin) tool and its usage is deprecated.
|
||||
|
||||
Refer to the `README.md` file in each playbook directory for more information
|
||||
about them.
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
# AWS playbooks
|
||||
|
||||
Parts of this playbook directory are meant to be driven by [`bin/cluster`](../../bin),
|
||||
which is community supported and use is considered **deprecated**.
|
||||
|
||||
|
||||
## Provisioning
|
||||
|
||||
With recent desire for provisioning from customers and developers alike, the AWS
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
---
|
||||
- name: Launch instance(s)
|
||||
hosts: localhost
|
||||
connection: local
|
||||
become: no
|
||||
gather_facts: no
|
||||
vars_files:
|
||||
- vars.yml
|
||||
vars:
|
||||
oo_extend_env: True
|
||||
tasks:
|
||||
- include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
|
||||
vars:
|
||||
type: "compute"
|
||||
count: "{{ num_nodes }}"
|
||||
- include: tasks/launch_instances.yml
|
||||
vars:
|
||||
instances: "{{ node_names }}"
|
||||
cluster: "{{ cluster_id }}"
|
||||
type: "{{ k8s_type }}"
|
||||
g_sub_host_type: "{{ sub_host_type }}"
|
||||
|
||||
- include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
|
||||
vars:
|
||||
type: "infra"
|
||||
count: "{{ num_infra }}"
|
||||
- include: tasks/launch_instances.yml
|
||||
vars:
|
||||
instances: "{{ node_names }}"
|
||||
cluster: "{{ cluster_id }}"
|
||||
type: "{{ k8s_type }}"
|
||||
g_sub_host_type: "{{ sub_host_type }}"
|
||||
|
||||
- include: scaleup.yml
|
||||
- include: list.yml
|
||||
@@ -1,25 +0,0 @@
|
||||
---
|
||||
g_all_hosts: "{{ groups['tag_clusterid_' ~ cluster_id] | default([])
|
||||
| intersect(groups['tag_environment_' ~ cluster_env] | default([])) }}"
|
||||
|
||||
g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_etcd'] | default([])) }}"
|
||||
|
||||
g_new_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_etcd'] | default([])) }}"
|
||||
|
||||
g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_lb'] | default([])) }}"
|
||||
|
||||
g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_nfs'] | default([])) }}"
|
||||
|
||||
g_glusterfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-glusterfs'] | default([])) }}"
|
||||
|
||||
g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_master'] | default([])) }}"
|
||||
|
||||
g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_master'] | default([])) }}"
|
||||
|
||||
g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_node'] | default([])) }}"
|
||||
|
||||
g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_node'] | default([])) }}"
|
||||
|
||||
g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_infra'] | default([])) }}"
|
||||
|
||||
g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_compute'] | default([])) }}"
|
||||
@@ -1,37 +0,0 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- include_vars: vars.yml
|
||||
- include_vars: cluster_hosts.yml
|
||||
- add_host:
|
||||
name: "{{ item }}"
|
||||
groups: l_oo_all_hosts
|
||||
with_items: "{{ g_all_hosts | default([]) }}"
|
||||
|
||||
- hosts: l_oo_all_hosts
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- include_vars: vars.yml
|
||||
- include_vars: cluster_hosts.yml
|
||||
|
||||
- include: ../../common/openshift-cluster/config.yml
|
||||
vars:
|
||||
g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
g_sudo: "{{ deployment_vars[deployment_type].become }}"
|
||||
g_nodeonmaster: true
|
||||
openshift_cluster_id: "{{ cluster_id }}"
|
||||
openshift_debug_level: "{{ debug_level }}"
|
||||
openshift_deployment_type: "{{ deployment_type }}"
|
||||
openshift_public_hostname: "{{ ec2_ip_address }}"
|
||||
openshift_hosted_registry_selector: 'type=infra'
|
||||
openshift_hosted_router_selector: 'type=infra'
|
||||
openshift_node_labels:
|
||||
region: "{{ deployment_vars[deployment_type].region }}"
|
||||
type: "{{ hostvars[inventory_hostname]['ec2_tag_sub-host-type'] }}"
|
||||
openshift_master_cluster_method: 'native'
|
||||
openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
|
||||
os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
|
||||
openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"
|
||||
openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}"
|
||||
openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}"
|
||||
@@ -1,54 +0,0 @@
|
||||
---
|
||||
- name: Launch instance(s)
|
||||
hosts: localhost
|
||||
connection: local
|
||||
become: no
|
||||
gather_facts: no
|
||||
vars_files:
|
||||
- vars.yml
|
||||
tasks:
|
||||
- include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml
|
||||
- include: tasks/launch_instances.yml
|
||||
vars:
|
||||
instances: "{{ etcd_names }}"
|
||||
cluster: "{{ cluster_id }}"
|
||||
type: "{{ k8s_type }}"
|
||||
g_sub_host_type: "default"
|
||||
|
||||
- include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml
|
||||
- include: tasks/launch_instances.yml
|
||||
vars:
|
||||
instances: "{{ master_names }}"
|
||||
cluster: "{{ cluster_id }}"
|
||||
type: "{{ k8s_type }}"
|
||||
g_sub_host_type: "default"
|
||||
|
||||
- include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
|
||||
vars:
|
||||
type: "compute"
|
||||
count: "{{ num_nodes }}"
|
||||
- include: tasks/launch_instances.yml
|
||||
vars:
|
||||
instances: "{{ node_names }}"
|
||||
cluster: "{{ cluster_id }}"
|
||||
type: "{{ k8s_type }}"
|
||||
g_sub_host_type: "{{ sub_host_type }}"
|
||||
|
||||
- include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
|
||||
vars:
|
||||
type: "infra"
|
||||
count: "{{ num_infra }}"
|
||||
- include: tasks/launch_instances.yml
|
||||
vars:
|
||||
instances: "{{ node_names }}"
|
||||
cluster: "{{ cluster_id }}"
|
||||
type: "{{ k8s_type }}"
|
||||
g_sub_host_type: "{{ sub_host_type }}"
|
||||
|
||||
- add_host:
|
||||
name: "{{ master_names.0 }}"
|
||||
groups: service_master
|
||||
when: master_names is defined and master_names.0 is defined
|
||||
|
||||
- include: update.yml
|
||||
- include: list.yml
|
||||
@@ -1,23 +0,0 @@
|
||||
---
|
||||
- name: Generate oo_list_hosts group
|
||||
hosts: localhost
|
||||
gather_facts: no
|
||||
connection: local
|
||||
become: no
|
||||
vars_files:
|
||||
- vars.yml
|
||||
tasks:
|
||||
- set_fact: scratch_group=tag_clusterid_{{ cluster_id }}
|
||||
when: cluster_id != ''
|
||||
- set_fact: scratch_group=all
|
||||
when: cluster_id == ''
|
||||
- add_host:
|
||||
name: "{{ item }}"
|
||||
groups: oo_list_hosts
|
||||
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
ansible_become: "{{ deployment_vars[deployment_type].become }}"
|
||||
oo_public_ipv4: "{{ hostvars[item].ec2_ip_address }}"
|
||||
oo_private_ipv4: "{{ hostvars[item].ec2_private_ip_address }}"
|
||||
with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
|
||||
- debug:
|
||||
msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
|
||||
@@ -1,32 +0,0 @@
|
||||
---
|
||||
|
||||
- hosts: localhost
|
||||
gather_facts: no
|
||||
connection: local
|
||||
become: no
|
||||
vars_files:
|
||||
- vars.yml
|
||||
tasks:
|
||||
- name: Evaluate oo_hosts_to_update
|
||||
add_host:
|
||||
name: "{{ item }}"
|
||||
groups: oo_hosts_to_update
|
||||
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
ansible_become: "{{ deployment_vars[deployment_type].become }}"
|
||||
with_items: "{{ groups.nodes_to_add }}"
|
||||
|
||||
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
|
||||
|
||||
- include: ../../common/openshift-cluster/scaleup.yml
|
||||
vars_files:
|
||||
- ../../aws/openshift-cluster/vars.yml
|
||||
- ../../aws/openshift-cluster/cluster_hosts.yml
|
||||
vars:
|
||||
g_new_node_hosts: "{{ groups.nodes_to_add }}"
|
||||
g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
g_sudo: "{{ deployment_vars[deployment_type].become }}"
|
||||
g_nodeonmaster: true
|
||||
openshift_cluster_id: "{{ cluster_id }}"
|
||||
openshift_debug_level: "{{ debug_level }}"
|
||||
openshift_deployment_type: "{{ deployment_type }}"
|
||||
openshift_public_hostname: "{{ ec2_ip_address }}"
|
||||
@@ -1,31 +0,0 @@
|
||||
---
|
||||
- name: Call same systemctl command for openshift on all instance(s)
|
||||
hosts: localhost
|
||||
connection: local
|
||||
become: no
|
||||
gather_facts: no
|
||||
vars_files:
|
||||
- vars.yml
|
||||
- cluster_hosts.yml
|
||||
tasks:
|
||||
- fail: msg="cluster_id is required to be injected in this playbook"
|
||||
when: cluster_id is not defined
|
||||
|
||||
- name: Evaluate g_service_masters
|
||||
add_host:
|
||||
name: "{{ item }}"
|
||||
groups: g_service_masters
|
||||
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
ansible_become: "{{ deployment_vars[deployment_type].become }}"
|
||||
with_items: "{{ master_hosts | default([]) }}"
|
||||
|
||||
- name: Evaluate g_service_nodes
|
||||
add_host:
|
||||
name: "{{ item }}"
|
||||
groups: g_service_nodes
|
||||
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
ansible_become: "{{ deployment_vars[deployment_type].become }}"
|
||||
with_items: "{{ node_hosts | default([]) }}"
|
||||
|
||||
- include: ../../common/openshift-node/service.yml
|
||||
- include: ../../common/openshift-master/service.yml
|
||||
@@ -1,188 +0,0 @@
|
||||
---
|
||||
- set_fact:
|
||||
created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}"
|
||||
docker_vol_ephemeral: "{{ lookup('env', 'os_docker_vol_ephemeral') | default(false, true) }}"
|
||||
cluster: "{{ cluster_id }}"
|
||||
env: "{{ cluster_env }}"
|
||||
host_type: "{{ type }}"
|
||||
sub_host_type: "{{ g_sub_host_type }}"
|
||||
|
||||
- set_fact:
|
||||
ec2_instance_type: "{{ lookup('env', 'ec2_master_instance_type') | default(deployment_vars[deployment_type].type, true) }}"
|
||||
ec2_security_groups: "{{ lookup('env', 'ec2_master_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"
|
||||
when: host_type == "master" and sub_host_type == "default"
|
||||
|
||||
- set_fact:
|
||||
ec2_instance_type: "{{ lookup('env', 'ec2_etcd_instance_type') | default(deployment_vars[deployment_type].type, true) }}"
|
||||
ec2_security_groups: "{{ lookup('env', 'ec2_etcd_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"
|
||||
when: host_type == "etcd" and sub_host_type == "default"
|
||||
|
||||
- set_fact:
|
||||
ec2_instance_type: "{{ lookup('env', 'ec2_infra_instance_type') | default(deployment_vars[deployment_type].type, true) }}"
|
||||
ec2_security_groups: "{{ lookup('env', 'ec2_infra_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"
|
||||
when: host_type == "node" and sub_host_type == "infra"
|
||||
|
||||
- set_fact:
|
||||
ec2_instance_type: "{{ lookup('env', 'ec2_node_instance_type') | default(deployment_vars[deployment_type].type, true) }}"
|
||||
ec2_security_groups: "{{ lookup('env', 'ec2_node_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"
|
||||
when: host_type == "node" and sub_host_type == "compute"
|
||||
|
||||
- set_fact:
|
||||
ec2_instance_type: "{{ deployment_vars[deployment_type].type }}"
|
||||
when: ec2_instance_type is not defined
|
||||
- set_fact:
|
||||
ec2_security_groups: "{{ deployment_vars[deployment_type].security_groups }}"
|
||||
when: ec2_security_groups is not defined
|
||||
|
||||
- name: Find amis for deployment_type
|
||||
ec2_ami_find:
|
||||
region: "{{ deployment_vars[deployment_type].region }}"
|
||||
ami_id: "{{ deployment_vars[deployment_type].image }}"
|
||||
name: "{{ deployment_vars[deployment_type].image_name }}"
|
||||
register: ami_result
|
||||
|
||||
- fail: msg="Could not find requested ami"
|
||||
when: not ami_result.results
|
||||
|
||||
- set_fact:
|
||||
latest_ami: "{{ ami_result.results | oo_ami_selector(deployment_vars[deployment_type].image_name) }}"
|
||||
volume_defs:
|
||||
etcd:
|
||||
root:
|
||||
volume_size: "{{ lookup('env', 'os_etcd_root_vol_size') | default(25, true) }}"
|
||||
device_type: "{{ lookup('env', 'os_etcd_root_vol_type') | default('gp2', true) }}"
|
||||
iops: "{{ lookup('env', 'os_etcd_root_vol_iops') | default(500, true) }}"
|
||||
master:
|
||||
root:
|
||||
volume_size: "{{ lookup('env', 'os_master_root_vol_size') | default(25, true) }}"
|
||||
device_type: "{{ lookup('env', 'os_master_root_vol_type') | default('gp2', true) }}"
|
||||
iops: "{{ lookup('env', 'os_master_root_vol_iops') | default(500, true) }}"
|
||||
docker:
|
||||
volume_size: "{{ lookup('env', 'os_docker_vol_size') | default(10, true) }}"
|
||||
device_type: "{{ lookup('env', 'os_docker_vol_type') | default('gp2', true) }}"
|
||||
iops: "{{ lookup('env', 'os_docker_vol_iops') | default(500, true) }}"
|
||||
node:
|
||||
root:
|
||||
volume_size: "{{ lookup('env', 'os_node_root_vol_size') | default(85, true) }}"
|
||||
device_type: "{{ lookup('env', 'os_node_root_vol_type') | default('gp2', true) }}"
|
||||
iops: "{{ lookup('env', 'os_node_root_vol_iops') | default(500, true) }}"
|
||||
docker:
|
||||
volume_size: "{{ lookup('env', 'os_docker_vol_size') | default(32, true) }}"
|
||||
device_type: "{{ lookup('env', 'os_docker_vol_type') | default('gp2', true) }}"
|
||||
iops: "{{ lookup('env', 'os_docker_vol_iops') | default(500, true) }}"
|
||||
|
||||
- set_fact:
|
||||
volumes: "{{ volume_defs | oo_ec2_volume_definition(host_type, docker_vol_ephemeral | bool) }}"
|
||||
|
||||
- name: Launch instance(s)
|
||||
ec2:
|
||||
state: present
|
||||
region: "{{ deployment_vars[deployment_type].region }}"
|
||||
keypair: "{{ deployment_vars[deployment_type].keypair }}"
|
||||
group: "{{ deployment_vars[deployment_type].security_groups }}"
|
||||
instance_type: "{{ ec2_instance_type }}"
|
||||
image: "{{ deployment_vars[deployment_type].image }}"
|
||||
count: "{{ instances | length }}"
|
||||
vpc_subnet_id: "{{ deployment_vars[deployment_type].vpc_subnet }}"
|
||||
assign_public_ip: "{{ deployment_vars[deployment_type].assign_public_ip }}"
|
||||
user_data: "{{ lookup('template', '../templates/user_data.j2') }}"
|
||||
wait: yes
|
||||
instance_tags:
|
||||
created-by: "{{ created_by }}"
|
||||
clusterid: "{{ cluster }}"
|
||||
environment: "{{ cluster_env }}"
|
||||
host-type: "{{ host_type }}"
|
||||
sub-host-type: "{{ sub_host_type }}"
|
||||
volumes: "{{ volumes }}"
|
||||
register: ec2
|
||||
|
||||
- name: Add Name tag to instances
|
||||
ec2_tag: resource={{ item.1.id }} region={{ deployment_vars[deployment_type].region }} state=present
|
||||
with_together:
|
||||
- "{{ instances }}"
|
||||
- "{{ ec2.instances }}"
|
||||
args:
|
||||
tags:
|
||||
Name: "{{ item.0 }}"
|
||||
|
||||
- set_fact:
|
||||
instance_groups: >
|
||||
tag_created-by_{{ created_by }}, tag_clusterid_{{ cluster }},
|
||||
tag_environment_{{ cluster_env }}, tag_host-type_{{ host_type }},
|
||||
tag_sub-host-type_{{ sub_host_type }}
|
||||
|
||||
- set_fact:
|
||||
node_label:
|
||||
region: "{{ deployment_vars[deployment_type].region }}"
|
||||
type: "{{sub_host_type}}"
|
||||
when: host_type == "node"
|
||||
|
||||
- set_fact:
|
||||
node_label:
|
||||
region: "{{ deployment_vars[deployment_type].region }}"
|
||||
type: "{{host_type}}"
|
||||
when: host_type != "node"
|
||||
|
||||
- set_fact:
|
||||
logrotate:
|
||||
- name: syslog
|
||||
path: |
|
||||
/var/log/cron
|
||||
/var/log/maillog
|
||||
/var/log/messages
|
||||
/var/log/secure
|
||||
/var/log/spooler"
|
||||
options:
|
||||
- daily
|
||||
- rotate 7
|
||||
- compress
|
||||
- sharedscripts
|
||||
- missingok
|
||||
scripts:
|
||||
postrotate: "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"
|
||||
|
||||
- name: Add new instances groups and variables
|
||||
add_host:
|
||||
hostname: "{{ item.0 }}"
|
||||
ansible_ssh_host: "{{ item.1.dns_name }}"
|
||||
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
ansible_become: "{{ deployment_vars[deployment_type].become }}"
|
||||
groups: "{{ instance_groups }}"
|
||||
ec2_private_ip_address: "{{ item.1.private_ip }}"
|
||||
ec2_ip_address: "{{ item.1.public_ip }}"
|
||||
ec2_tag_sub-host-type: "{{ sub_host_type }}"
|
||||
openshift_node_labels: "{{ node_label }}"
|
||||
logrotate_scripts: "{{ logrotate }}"
|
||||
with_together:
|
||||
- "{{ instances }}"
|
||||
- "{{ ec2.instances }}"
|
||||
|
||||
- name: Add new instances to nodes_to_add group if needed
|
||||
add_host:
|
||||
hostname: "{{ item.0 }}"
|
||||
ansible_ssh_host: "{{ item.1.dns_name }}"
|
||||
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
ansible_become: "{{ deployment_vars[deployment_type].become }}"
|
||||
groups: nodes_to_add
|
||||
ec2_private_ip_address: "{{ item.1.private_ip }}"
|
||||
ec2_ip_address: "{{ item.1.public_ip }}"
|
||||
openshift_node_labels: "{{ node_label }}"
|
||||
logrotate_scripts: "{{ logrotate }}"
|
||||
with_together:
|
||||
- "{{ instances }}"
|
||||
- "{{ ec2.instances }}"
|
||||
when: oo_extend_env is defined and oo_extend_env | bool
|
||||
|
||||
- name: Wait for ssh
|
||||
wait_for: "port=22 host={{ item.dns_name }}"
|
||||
with_items: "{{ ec2.instances }}"
|
||||
|
||||
- name: Wait for user setup
|
||||
command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.0].ansible_ssh_user }}@{{ item.1.dns_name }} echo {{ hostvars[item.0].ansible_ssh_user }} user is setup"
|
||||
register: result
|
||||
until: result.rc == 0
|
||||
retries: 20
|
||||
delay: 10
|
||||
with_together:
|
||||
- "{{ instances }}"
|
||||
- "{{ ec2.instances }}"
|
||||
@@ -1,22 +0,0 @@
|
||||
#cloud-config
|
||||
{% if type in ['node', 'master'] and 'docker' in volume_defs[type] %}
|
||||
mounts:
|
||||
- [ xvdb ]
|
||||
- [ ephemeral0 ]
|
||||
{% endif %}
|
||||
|
||||
write_files:
|
||||
{% if type in ['node', 'master'] and 'docker' in volume_defs[type] %}
|
||||
- content: |
|
||||
DEVS=/dev/xvdb
|
||||
VG=docker_vg
|
||||
path: /etc/sysconfig/docker-storage-setup
|
||||
owner: root:root
|
||||
permissions: '0644'
|
||||
{% endif %}
|
||||
{% if deployment_vars[deployment_type].become | bool %}
|
||||
- path: /etc/sudoers.d/99-{{ deployment_vars[deployment_type].ssh_user }}-cloud-init-requiretty
|
||||
permissions: 440
|
||||
content: |
|
||||
Defaults:{{ deployment_vars[deployment_type].ssh_user }} !requiretty
|
||||
{% endif %}
|
||||
@@ -1,77 +0,0 @@
|
||||
---
|
||||
- name: Terminate instance(s)
|
||||
hosts: localhost
|
||||
connection: local
|
||||
become: no
|
||||
gather_facts: no
|
||||
vars_files:
|
||||
- vars.yml
|
||||
tasks:
|
||||
- add_host:
|
||||
name: "{{ item }}"
|
||||
groups: oo_hosts_to_terminate
|
||||
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
ansible_become: "{{ deployment_vars[deployment_type].become }}"
|
||||
with_items: "{{ (groups['tag_clusterid_' ~ cluster_id] | default([])) | difference(['localhost']) }}"
|
||||
|
||||
- name: Unsubscribe VMs
|
||||
hosts: oo_hosts_to_terminate
|
||||
roles:
|
||||
- role: rhel_unsubscribe
|
||||
when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and
|
||||
ansible_distribution == "RedHat" and
|
||||
lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
|
||||
default('no', True) | lower in ['no', 'false']
|
||||
|
||||
- name: Terminate instances
|
||||
hosts: localhost
|
||||
connection: local
|
||||
become: no
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- name: Remove tags from instances
|
||||
ec2_tag:
|
||||
resource: "{{ hostvars[item]['ec2_id'] }}"
|
||||
region: "{{ hostvars[item]['ec2_region'] }}"
|
||||
state: absent
|
||||
tags:
|
||||
environment: "{{ hostvars[item]['ec2_tag_environment'] }}"
|
||||
clusterid: "{{ hostvars[item]['ec2_tag_clusterid'] }}"
|
||||
host-type: "{{ hostvars[item]['ec2_tag_host-type'] }}"
|
||||
sub_host_type: "{{ hostvars[item]['ec2_tag_sub-host-type'] }}"
|
||||
with_items: "{{ groups.oo_hosts_to_terminate }}"
|
||||
when: "'oo_hosts_to_terminate' in groups"
|
||||
|
||||
- name: Terminate instances
|
||||
ec2:
|
||||
state: absent
|
||||
instance_ids: ["{{ hostvars[item].ec2_id }}"]
|
||||
region: "{{ hostvars[item].ec2_region }}"
|
||||
ignore_errors: yes
|
||||
register: ec2_term
|
||||
with_items: "{{ groups.oo_hosts_to_terminate }}"
|
||||
when: "'oo_hosts_to_terminate' in groups"
|
||||
|
||||
# Fail if any of the instances failed to terminate with an error other
|
||||
# than 403 Forbidden
|
||||
- fail:
|
||||
msg: "Terminating instance {{ item.ec2_id }} failed with message {{ item.msg }}"
|
||||
when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed"
|
||||
with_items: "{{ ec2_term.results }}"
|
||||
|
||||
- name: Stop instance if termination failed
|
||||
ec2:
|
||||
state: stopped
|
||||
instance_ids: ["{{ item.item.ec2_id }}"]
|
||||
region: "{{ item.item.ec2_region }}"
|
||||
register: ec2_stop
|
||||
when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed"
|
||||
with_items: "{{ ec2_term.results }}"
|
||||
|
||||
- name: Rename stopped instances
|
||||
ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
|
||||
args:
|
||||
tags:
|
||||
Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
|
||||
with_items: "{{ ec2_stop.results }}"
|
||||
when: ec2_stop | changed
|
||||
@@ -1,34 +0,0 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- include_vars: vars.yml
|
||||
- include_vars: cluster_hosts.yml
|
||||
- add_host:
|
||||
name: "{{ item }}"
|
||||
groups: l_oo_all_hosts
|
||||
with_items: "{{ g_all_hosts }}"
|
||||
|
||||
- hosts: l_oo_all_hosts
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- include_vars: vars.yml
|
||||
- include_vars: cluster_hosts.yml
|
||||
|
||||
- name: Update - Populate oo_hosts_to_update group
|
||||
hosts: localhost
|
||||
connection: local
|
||||
become: no
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- name: Update - Evaluate oo_hosts_to_update
|
||||
add_host:
|
||||
name: "{{ item }}"
|
||||
groups: oo_hosts_to_update
|
||||
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
ansible_become: "{{ deployment_vars[deployment_type].become }}"
|
||||
with_items: "{{ g_all_hosts | default([]) }}"
|
||||
|
||||
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
|
||||
|
||||
- include: config.yml
|
||||
@@ -1,36 +1,4 @@
|
||||
---
|
||||
debug_level: 2
|
||||
|
||||
deployment_rhel7_ent_base:
|
||||
# rhel-7.1, requires cloud access subscription
|
||||
image: "{{ lookup('oo_option', 'ec2_image') | default('ami-10251c7a', True) }}"
|
||||
image_name: "{{ lookup('oo_option', 'ec2_image_name') | default(None, True) }}"
|
||||
region: "{{ lookup('oo_option', 'ec2_region') | default('us-east-1', True) }}"
|
||||
ssh_user: ec2-user
|
||||
become: yes
|
||||
keypair: "{{ lookup('oo_option', 'ec2_keypair') | default('libra', True) }}"
|
||||
type: "{{ lookup('oo_option', 'ec2_instance_type') | default('m4.large', True) }}"
|
||||
security_groups: "{{ lookup('oo_option', 'ec2_security_groups') | default([ 'public' ], True) }}"
|
||||
vpc_subnet: "{{ lookup('oo_option', 'ec2_vpc_subnet') | default(omit, True) }}"
|
||||
assign_public_ip: "{{ lookup('oo_option', 'ec2_assign_public_ip') | default(omit, True) }}"
|
||||
|
||||
deployment_vars:
|
||||
origin:
|
||||
# centos-7, requires marketplace
|
||||
image: "{{ lookup('oo_option', 'ec2_image') | default('ami-6d1c2007', True) }}"
|
||||
image_name: "{{ lookup('oo_option', 'ec2_image_name') | default(None, True) }}"
|
||||
region: "{{ lookup('oo_option', 'ec2_region') | default('us-east-1', True) }}"
|
||||
ssh_user: centos
|
||||
become: yes
|
||||
keypair: "{{ lookup('oo_option', 'ec2_keypair') | default('libra', True) }}"
|
||||
type: "{{ lookup('oo_option', 'ec2_instance_type') | default('m4.large', True) }}"
|
||||
security_groups: "{{ lookup('oo_option', 'ec2_security_groups') | default([ 'public' ], True) }}"
|
||||
vpc_subnet: "{{ lookup('oo_option', 'ec2_vpc_subnet') | default(omit, True) }}"
|
||||
assign_public_ip: "{{ lookup('oo_option', 'ec2_assign_public_ip') | default(omit, True) }}"
|
||||
|
||||
enterprise: "{{ deployment_rhel7_ent_base }}"
|
||||
openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
|
||||
atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
|
||||
|
||||
clusterid: mycluster
|
||||
region: us-east-1
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
---
|
||||
- include: rhel_subscribe.yml
|
||||
|
||||
- include: config.yml
|
||||
@@ -1,9 +1,8 @@
|
||||
# Common playbooks
|
||||
|
||||
This directory has a generic set of playbooks that are included by playbooks in
|
||||
[`byo`](../byo), as well as other playbooks related to the
|
||||
[`bin/cluster`](../../bin) tool.
|
||||
[`byo`](../byo).
|
||||
|
||||
Note: playbooks in this directory use generic group names that do not line up
|
||||
with the groups used by the `byo` playbooks or `bin/cluster` derived playbooks,
|
||||
requiring an explicit remapping of groups.
|
||||
with the groups used by the `byo` playbooks, requiring an explicit remapping of
|
||||
groups.
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
---
|
||||
- name: Populate g_service_masters host group if needed
|
||||
hosts: localhost
|
||||
connection: local
|
||||
become: no
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- fail: msg="new_cluster_state is required to be injected in this playbook"
|
||||
when: new_cluster_state is not defined
|
||||
|
||||
- name: Evaluate g_service_etcd
|
||||
add_host:
|
||||
name: "{{ item }}"
|
||||
groups: g_service_etcd
|
||||
with_items: "{{ oo_host_group_exp | default([]) }}"
|
||||
changed_when: False
|
||||
|
||||
- name: Change etcd state on etcd instance(s)
|
||||
hosts: g_service_etcd
|
||||
connection: ssh
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- service: name=etcd state="{{ new_cluster_state }}"
|
||||
@@ -1,23 +0,0 @@
|
||||
---
|
||||
- name: Populate g_service_nodes host group if needed
|
||||
hosts: localhost
|
||||
connection: local
|
||||
become: no
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- fail: msg="new_cluster_state is required to be injected in this playbook"
|
||||
when: new_cluster_state is not defined
|
||||
|
||||
- name: Evaluate g_service_lb
|
||||
add_host:
|
||||
name: "{{ item }}"
|
||||
groups: g_service_lb
|
||||
with_items: "{{ oo_host_group_exp | default([]) }}"
|
||||
changed_when: False
|
||||
|
||||
- name: Change state on lb instance(s)
|
||||
hosts: g_service_lb
|
||||
connection: ssh
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- service: name=haproxy state="{{ new_cluster_state }}"
|
||||
@@ -1,23 +0,0 @@
|
||||
---
|
||||
- name: Populate g_service_masters host group if needed
|
||||
hosts: localhost
|
||||
gather_facts: no
|
||||
connection: local
|
||||
become: no
|
||||
tasks:
|
||||
- fail: msg="new_cluster_state is required to be injected in this playbook"
|
||||
when: new_cluster_state is not defined
|
||||
|
||||
- name: Evaluate g_service_masters
|
||||
add_host:
|
||||
name: "{{ item }}"
|
||||
groups: g_service_masters
|
||||
with_items: "{{ oo_host_group_exp | default([]) }}"
|
||||
changed_when: False
|
||||
|
||||
- name: Change state on master instance(s)
|
||||
hosts: g_service_masters
|
||||
connection: ssh
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- service: name={{ openshift.common.service_type }}-master state="{{ new_cluster_state }}"
|
||||
@@ -1,21 +0,0 @@
|
||||
---
|
||||
- name: Populate g_service_nfs host group if needed
|
||||
hosts: localhost
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- fail: msg="new_cluster_state is required to be injected in this playbook"
|
||||
when: new_cluster_state is not defined
|
||||
|
||||
- name: Evaluate g_service_nfs
|
||||
add_host:
|
||||
name: "{{ item }}"
|
||||
groups: g_service_nfs
|
||||
with_items: "{{ oo_host_group_exp | default([]) }}"
|
||||
changed_when: False
|
||||
|
||||
- name: Change state on nfs instance(s)
|
||||
hosts: g_service_nfs
|
||||
connection: ssh
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- service: name=nfs-server state="{{ new_cluster_state }}"
|
||||
@@ -1,26 +0,0 @@
|
||||
---
|
||||
- name: Populate g_service_nodes host group if needed
|
||||
hosts: localhost
|
||||
connection: local
|
||||
become: no
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- fail: msg="new_cluster_state is required to be injected in this playbook"
|
||||
when: new_cluster_state is not defined
|
||||
|
||||
- name: Evaluate g_service_nodes
|
||||
add_host:
|
||||
name: "{{ item }}"
|
||||
groups: g_service_nodes
|
||||
with_items: "{{ oo_host_group_exp | default([]) }}"
|
||||
changed_when: False
|
||||
|
||||
- name: Change state on node instance(s)
|
||||
hosts: g_service_nodes
|
||||
connection: ssh
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- name: Change state on node instance(s)
|
||||
service:
|
||||
name: "{{ service_type }}-node"
|
||||
state: "{{ new_cluster_state }}"
|
||||
@@ -1,4 +0,0 @@
|
||||
# GCE playbooks
|
||||
|
||||
This playbook directory is meant to be driven by [`bin/cluster`](../../bin),
|
||||
which is community supported and most use is considered deprecated.
|
||||
@@ -1,43 +0,0 @@
|
||||
---
|
||||
- name: Launch instance(s)
|
||||
hosts: localhost
|
||||
connection: local
|
||||
become: no
|
||||
gather_facts: no
|
||||
vars_files:
|
||||
- vars.yml
|
||||
vars:
|
||||
oo_extend_env: True
|
||||
tasks:
|
||||
- fail:
|
||||
msg: Deployment type not supported for gce provider yet
|
||||
when: deployment_type == 'enterprise'
|
||||
|
||||
- include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
|
||||
vars:
|
||||
type: "compute"
|
||||
count: "{{ num_nodes }}"
|
||||
- include: tasks/launch_instances.yml
|
||||
vars:
|
||||
instances: "{{ node_names }}"
|
||||
cluster: "{{ cluster_id }}"
|
||||
type: "{{ k8s_type }}"
|
||||
g_sub_host_type: "{{ sub_host_type }}"
|
||||
gce_machine_type: "{{ lookup('env', 'gce_machine_node_type') | default(lookup('env', 'gce_machine_type'), true) }}"
|
||||
gce_machine_image: "{{ lookup('env', 'gce_machine_node_image') | default(lookup('env', 'gce_machine_image'), true) }}"
|
||||
|
||||
- include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
|
||||
vars:
|
||||
type: "infra"
|
||||
count: "{{ num_infra }}"
|
||||
- include: tasks/launch_instances.yml
|
||||
vars:
|
||||
instances: "{{ node_names }}"
|
||||
cluster: "{{ cluster_id }}"
|
||||
type: "{{ k8s_type }}"
|
||||
g_sub_host_type: "{{ sub_host_type }}"
|
||||
gce_machine_type: "{{ lookup('env', 'gce_machine_node_type') | default(lookup('env', 'gce_machine_type'), true) }}"
|
||||
gce_machine_image: "{{ lookup('env', 'gce_machine_node_image') | default(lookup('env', 'gce_machine_image'), true) }}"
|
||||
|
||||
- include: scaleup.yml
|
||||
- include: list.yml
|
||||
@@ -1,25 +0,0 @@
|
||||
---
|
||||
g_all_hosts: "{{ groups['tag_clusterid-' ~ cluster_id] | default([])
|
||||
| intersect(groups['tag_environment-' ~ cluster_env] | default([])) }}"
|
||||
|
||||
g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-etcd'] | default([])) }}"
|
||||
|
||||
g_new_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new_etcd'] | default([])) }}"
|
||||
|
||||
g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])) }}"
|
||||
|
||||
g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}"
|
||||
|
||||
g_glusterfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-glusterfs'] | default([])) }}"
|
||||
|
||||
g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}"
|
||||
|
||||
g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-master'] | default([])) }}"
|
||||
|
||||
g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | default([])) }}"
|
||||
|
||||
g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-node'] | default([])) }}"
|
||||
|
||||
g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra'] | default([])) }}"
|
||||
|
||||
g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-compute'] | default([])) }}"
|
||||
@@ -1,36 +0,0 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- include_vars: vars.yml
|
||||
- include_vars: cluster_hosts.yml
|
||||
- add_host:
|
||||
name: "{{ item }}"
|
||||
groups: l_oo_all_hosts
|
||||
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
ansible_become: "{{ deployment_vars[deployment_type].become }}"
|
||||
with_items: "{{ g_all_hosts | default([]) }}"
|
||||
|
||||
- hosts: l_oo_all_hosts
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- include_vars: vars.yml
|
||||
- include_vars: cluster_hosts.yml
|
||||
|
||||
- include: ../../common/openshift-cluster/config.yml
|
||||
vars:
|
||||
g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
g_sudo: "{{ deployment_vars[deployment_type].become }}"
|
||||
g_nodeonmaster: true
|
||||
openshift_cluster_id: "{{ cluster_id }}"
|
||||
openshift_debug_level: "{{ debug_level }}"
|
||||
openshift_deployment_type: "{{ deployment_type }}"
|
||||
openshift_hostname: "{{ gce_private_ip }}"
|
||||
openshift_hosted_registry_selector: 'type=infra'
|
||||
openshift_hosted_router_selector: 'type=infra'
|
||||
openshift_master_cluster_method: 'native'
|
||||
openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
|
||||
os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
|
||||
openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"
|
||||
openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}"
|
||||
openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}"
|
||||
@@ -1 +0,0 @@
|
||||
../../../filter_plugins
|
||||
@@ -1,67 +0,0 @@
|
||||
---
|
||||
- name: Launch instance(s)
|
||||
hosts: localhost
|
||||
connection: local
|
||||
become: no
|
||||
gather_facts: no
|
||||
vars_files:
|
||||
- vars.yml
|
||||
tasks:
|
||||
- fail: msg="Deployment type not supported for gce provider yet"
|
||||
when: deployment_type == 'enterprise'
|
||||
|
||||
- include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml
|
||||
- include: tasks/launch_instances.yml
|
||||
vars:
|
||||
instances: "{{ etcd_names }}"
|
||||
cluster: "{{ cluster_id }}"
|
||||
type: "{{ k8s_type }}"
|
||||
g_sub_host_type: "default"
|
||||
gce_machine_type: "{{ lookup('env', 'gce_machine_etcd_type') | default(lookup('env', 'gce_machine_type'), true) }}"
|
||||
gce_machine_image: "{{ lookup('env', 'gce_machine_etcd_image') | default(lookup('env', 'gce_machine_image'), true) }}"
|
||||
|
||||
|
||||
- include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml
|
||||
- include: tasks/launch_instances.yml
|
||||
vars:
|
||||
instances: "{{ master_names }}"
|
||||
cluster: "{{ cluster_id }}"
|
||||
type: "{{ k8s_type }}"
|
||||
g_sub_host_type: "default"
|
||||
gce_machine_type: "{{ lookup('env', 'gce_machine_master_type') | default(lookup('env', 'gce_machine_type'), true) }}"
|
||||
gce_machine_image: "{{ lookup('env', 'gce_machine_master_image') | default(lookup('env', 'gce_machine_image'), true) }}"
|
||||
|
||||
- include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
|
||||
vars:
|
||||
type: "compute"
|
||||
count: "{{ num_nodes }}"
|
||||
- include: tasks/launch_instances.yml
|
||||
vars:
|
||||
instances: "{{ node_names }}"
|
||||
cluster: "{{ cluster_id }}"
|
||||
type: "{{ k8s_type }}"
|
||||
g_sub_host_type: "{{ sub_host_type }}"
|
||||
gce_machine_type: "{{ lookup('env', 'gce_machine_node_type') | default(lookup('env', 'gce_machine_type'), true) }}"
|
||||
gce_machine_image: "{{ lookup('env', 'gce_machine_node_image') | default(lookup('env', 'gce_machine_image'), true) }}"
|
||||
|
||||
- include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
|
||||
vars:
|
||||
type: "infra"
|
||||
count: "{{ num_infra }}"
|
||||
- include: tasks/launch_instances.yml
|
||||
vars:
|
||||
instances: "{{ node_names }}"
|
||||
cluster: "{{ cluster_id }}"
|
||||
type: "{{ k8s_type }}"
|
||||
g_sub_host_type: "{{ sub_host_type }}"
|
||||
gce_machine_type: "{{ lookup('env', 'gce_machine_node_type') | default(lookup('env', 'gce_machine_type'), true) }}"
|
||||
gce_machine_image: "{{ lookup('env', 'gce_machine_node_image') | default(lookup('env', 'gce_machine_image'), true) }}"
|
||||
|
||||
- add_host:
|
||||
name: "{{ master_names.0 }}"
|
||||
groups: service_master
|
||||
when: master_names is defined and master_names.0 is defined
|
||||
|
||||
- include: update.yml
|
||||
|
||||
- include: list.yml
|
||||
@@ -1,23 +0,0 @@
|
||||
---
|
||||
- name: Generate oo_list_hosts group
|
||||
hosts: localhost
|
||||
connection: local
|
||||
become: no
|
||||
gather_facts: no
|
||||
vars_files:
|
||||
- vars.yml
|
||||
tasks:
|
||||
- set_fact: scratch_group=tag_clusterid-{{ cluster_id }}
|
||||
when: cluster_id != ''
|
||||
- set_fact: scratch_group=all
|
||||
when: cluster_id == ''
|
||||
- add_host:
|
||||
name: "{{ item }}"
|
||||
groups: oo_list_hosts
|
||||
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
ansible_become: "{{ deployment_vars[deployment_type].become }}"
|
||||
oo_public_ipv4: "{{ hostvars[item].gce_public_ip }}"
|
||||
oo_private_ipv4: "{{ hostvars[item].gce_private_ip }}"
|
||||
with_items: "{{ groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true)) }}"
|
||||
- debug:
|
||||
msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
|
||||
@@ -1 +0,0 @@
|
||||
../../../lookup_plugins
|
||||
@@ -1 +0,0 @@
|
||||
../../../roles
|
||||
@@ -1,29 +0,0 @@
|
||||
---
|
||||
- name: Call same systemctl command for openshift on all instance(s)
|
||||
hosts: localhost
|
||||
connection: local
|
||||
become: no
|
||||
gather_facts: no
|
||||
vars_files:
|
||||
- vars.yml
|
||||
- cluster_hosts.yml
|
||||
tasks:
|
||||
- fail: msg="cluster_id is required to be injected in this playbook"
|
||||
when: cluster_id is not defined
|
||||
|
||||
- add_host:
|
||||
name: "{{ item }}"
|
||||
groups: g_service_nodes
|
||||
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
ansible_become: "{{ deployment_vars[deployment_type].become }}"
|
||||
with_items: "{{ node_hosts | default([]) | difference(['localhost']) | difference(groups.status_terminated) }}"
|
||||
|
||||
- add_host:
|
||||
name: "{{ item }}"
|
||||
groups: g_service_masters
|
||||
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
ansible_become: "{{ deployment_vars[deployment_type].become }}"
|
||||
with_items: "{{ master_hosts | default([]) | difference(['localhost']) | difference(groups.status_terminated) }}"
|
||||
|
||||
- include: ../../common/openshift-node/service.yml
|
||||
- include: ../../common/openshift-master/service.yml
|
||||
@@ -1,65 +0,0 @@
|
||||
---
|
||||
- name: Launch instance(s)
|
||||
gce:
|
||||
instance_names: "{{ instances|join(',') }}"
|
||||
machine_type: "{{ gce_machine_type | default(deployment_vars[deployment_type].machine_type, true) }}"
|
||||
image: "{{ gce_machine_image | default(deployment_vars[deployment_type].image, true) }}"
|
||||
service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
|
||||
pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
|
||||
project_id: "{{ lookup('env', 'gce_project_id') }}"
|
||||
zone: "{{ lookup('env', 'zone') }}"
|
||||
network: "{{ lookup('env', 'network') }}"
|
||||
subnetwork: "{{ lookup('env', 'subnetwork') | default(omit, True) }}"
|
||||
# unsupported in 1.9.+
|
||||
#service_account_permissions: "datastore,logging-write"
|
||||
tags:
|
||||
- created-by-{{ lookup('env', 'LOGNAME') | regex_replace('[^a-z0-9]+', '') | default(cluster, true) }}
|
||||
- environment-{{ cluster_env }}
|
||||
- clusterid-{{ cluster_id }}
|
||||
- host-type-{{ type }}
|
||||
- sub-host-type-{{ g_sub_host_type }}
|
||||
metadata:
|
||||
startup-script: |
|
||||
#!/bin/bash
|
||||
echo "Defaults:{{ deployment_vars[deployment_type].ssh_user }} !requiretty" > /etc/sudoers.d/99-{{ deployment_vars[deployment_type].ssh_user }}
|
||||
|
||||
when: instances |length > 0
|
||||
register: gce
|
||||
|
||||
- set_fact:
|
||||
node_label:
|
||||
# There doesn't seem to be a way to get the region directly, so parse it out of the zone.
|
||||
region: "{{ gce.zone | regex_replace('^(.*)-.*$', '\\\\1') }}"
|
||||
type: "{{ g_sub_host_type }}"
|
||||
when: instances |length > 0 and type == "node"
|
||||
|
||||
- set_fact:
|
||||
node_label:
|
||||
# There doesn't seem to be a way to get the region directly, so parse it out of the zone.
|
||||
region: "{{ gce.zone | regex_replace('^(.*)-.*$', '\\\\1') }}"
|
||||
type: "{{ type }}"
|
||||
when: instances |length > 0 and type != "node"
|
||||
|
||||
- name: Add new instances to groups and set variables needed
|
||||
add_host:
|
||||
hostname: "{{ item.name }}"
|
||||
ansible_ssh_host: "{{ item.public_ip }}"
|
||||
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
ansible_become: "{{ deployment_vars[deployment_type].become }}"
|
||||
groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}"
|
||||
gce_public_ip: "{{ item.public_ip }}"
|
||||
gce_private_ip: "{{ item.private_ip }}"
|
||||
openshift_node_labels: "{{ node_label }}"
|
||||
with_items: "{{ gce.instance_data | default([], true) }}"
|
||||
|
||||
- name: Wait for ssh
|
||||
wait_for: port=22 host={{ item.public_ip }}
|
||||
with_items: "{{ gce.instance_data | default([], true) }}"
|
||||
|
||||
- name: Wait for user setup
|
||||
command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.name].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.name].ansible_ssh_user }} user is setup"
|
||||
register: result
|
||||
until: result.rc == 0
|
||||
retries: 30
|
||||
delay: 5
|
||||
with_items: "{{ gce.instance_data | default([], true) }}"
|
||||
@@ -1,58 +0,0 @@
|
||||
---
|
||||
- name: Terminate instance(s)
|
||||
hosts: localhost
|
||||
connection: local
|
||||
become: no
|
||||
gather_facts: no
|
||||
vars_files:
|
||||
- vars.yml
|
||||
tasks:
|
||||
- add_host:
|
||||
name: "{{ item }}"
|
||||
groups: oo_hosts_to_terminate
|
||||
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
ansible_become: "{{ deployment_vars[deployment_type].become }}"
|
||||
with_items: "{{ (groups['tag_clusterid-' ~ cluster_id] | default([])) | difference(['localhost']) }}"
|
||||
|
||||
- name: Unsubscribe VMs
|
||||
hosts: oo_hosts_to_terminate
|
||||
vars_files:
|
||||
- vars.yml
|
||||
roles:
|
||||
- role: rhel_unsubscribe
|
||||
when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and
|
||||
ansible_distribution == "RedHat" and
|
||||
lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
|
||||
default('no', True) | lower in ['no', 'false']
|
||||
|
||||
- name: Terminate instances(s)
|
||||
hosts: localhost
|
||||
become: no
|
||||
connection: local
|
||||
gather_facts: no
|
||||
vars_files:
|
||||
- vars.yml
|
||||
tasks:
|
||||
- name: Terminate instances that were previously launched
|
||||
local_action:
|
||||
module: gce
|
||||
state: 'absent'
|
||||
name: "{{ item }}"
|
||||
service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
|
||||
pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
|
||||
project_id: "{{ lookup('env', 'gce_project_id') }}"
|
||||
zone: "{{ lookup('env', 'zone') }}"
|
||||
with_items: "{{ groups['oo_hosts_to_terminate'] | default([], true) }}"
|
||||
when: item is defined
|
||||
|
||||
#- include: ../openshift-node/terminate.yml
|
||||
# vars:
|
||||
# gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
|
||||
# gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
|
||||
# gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
|
||||
#
|
||||
#- include: ../openshift-master/terminate.yml
|
||||
# vars:
|
||||
# gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
|
||||
# gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
|
||||
# gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
|
||||
@@ -1,34 +0,0 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- include_vars: vars.yml
|
||||
- include_vars: cluster_hosts.yml
|
||||
- add_host:
|
||||
name: "{{ item }}"
|
||||
groups: l_oo_all_hosts
|
||||
with_items: "{{ g_all_hosts }}"
|
||||
|
||||
- hosts: l_oo_all_hosts
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- include_vars: vars.yml
|
||||
- include_vars: cluster_hosts.yml
|
||||
|
||||
- name: Populate oo_hosts_to_update group
|
||||
hosts: localhost
|
||||
connection: local
|
||||
become: no
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- name: Evaluate oo_hosts_to_update
|
||||
add_host:
|
||||
name: "{{ item }}"
|
||||
groups: oo_hosts_to_update
|
||||
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
ansible_become: "{{ deployment_vars[deployment_type].become }}"
|
||||
with_items: "{{ g_all_hosts | default([]) }}"
|
||||
|
||||
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
|
||||
|
||||
- include: config.yml
|
||||
@@ -1,18 +0,0 @@
|
||||
---
|
||||
debug_level: 2
|
||||
|
||||
deployment_rhel7_ent_base:
|
||||
image: "{{ lookup('oo_option', 'image_name') | default('rhel-7', True) }}"
|
||||
machine_type: "{{ lookup('oo_option', 'machine_type') | default('n1-standard-1', True) }}"
|
||||
ssh_user: "{{ lookup('env', 'gce_ssh_user') | default(ansible_ssh_user, true) }}"
|
||||
become: yes
|
||||
|
||||
deployment_vars:
|
||||
origin:
|
||||
image: "{{ lookup('oo_option', 'image_name') | default('centos-7', True) }}"
|
||||
machine_type: "{{ lookup('oo_option', 'machine_type') | default('n1-standard-1', True) }}"
|
||||
ssh_user: "{{ lookup('env', 'gce_ssh_user') | default(ansible_ssh_user, true) }}"
|
||||
become: yes
|
||||
enterprise: "{{ deployment_rhel7_ent_base }}"
|
||||
openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
|
||||
atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
|
||||
@@ -1,4 +0,0 @@
|
||||
# libvirt playbooks
|
||||
|
||||
This playbook directory is meant to be driven by [`bin/cluster`](../../bin),
|
||||
which is community supported and most use is considered deprecated.
|
||||
@@ -1,25 +0,0 @@
|
||||
---
|
||||
g_all_hosts: "{{ groups['tag_clusterid-' ~ cluster_id] | default([])
|
||||
| intersect(groups['tag_environment-' ~ cluster_env] | default([])) }}"
|
||||
|
||||
g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-etcd'] | default([])) }}"
|
||||
|
||||
g_new_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new_etcd'] | default([])) }}"
|
||||
|
||||
g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])) }}"
|
||||
|
||||
g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}"
|
||||
|
||||
g_glusterfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-glusterfs'] | default([])) }}"
|
||||
|
||||
g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}"
|
||||
|
||||
g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-master'] | default([])) }}"
|
||||
|
||||
g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | default([])) }}"
|
||||
|
||||
g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-node'] | default([])) }}"
|
||||
|
||||
g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra'] | default([])) }}"
|
||||
|
||||
g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-compute'] | default([])) }}"
|
||||
@@ -1,39 +0,0 @@
|
||||
---
|
||||
# TODO: need to figure out a plan for setting hostname, currently the default
|
||||
# is localhost, so no hostname value (or public_hostname) value is getting
|
||||
# assigned
|
||||
|
||||
- include: ../../common/openshift-cluster/std_include.yml
|
||||
|
||||
- hosts: localhost
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- include_vars: vars.yml
|
||||
- include_vars: cluster_hosts.yml
|
||||
- add_host:
|
||||
name: "{{ item }}"
|
||||
groups: l_oo_all_hosts
|
||||
with_items: "{{ g_all_hosts | default([]) }}"
|
||||
|
||||
- hosts: l_oo_all_hosts
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- include_vars: vars.yml
|
||||
- include_vars: cluster_hosts.yml
|
||||
|
||||
- include: ../../common/openshift-cluster/config.yml
|
||||
vars:
|
||||
g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
g_sudo: "{{ deployment_vars[deployment_type].become }}"
|
||||
g_nodeonmaster: true
|
||||
openshift_cluster_id: "{{ cluster_id }}"
|
||||
openshift_debug_level: "{{ debug_level }}"
|
||||
openshift_deployment_type: "{{ deployment_type }}"
|
||||
openshift_hosted_registry_selector: 'type=infra'
|
||||
openshift_hosted_router_selector: 'type=infra'
|
||||
openshift_master_cluster_method: 'native'
|
||||
openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
|
||||
os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
|
||||
openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"
|
||||
openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}"
|
||||
openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}"
|
||||
@@ -1 +0,0 @@
|
||||
../../../filter_plugins
|
||||
@@ -1,57 +0,0 @@
|
||||
---
|
||||
- name: Launch instance(s)
|
||||
hosts: localhost
|
||||
become: no
|
||||
connection: local
|
||||
gather_facts: no
|
||||
vars_files:
|
||||
- vars.yml
|
||||
vars:
|
||||
image_url: "{{ deployment_vars[deployment_type].image.url }}"
|
||||
image_sha256: "{{ deployment_vars[deployment_type].image.sha256 }}"
|
||||
image_name: "{{ deployment_vars[deployment_type].image.name }}"
|
||||
image_compression: "{{ deployment_vars[deployment_type].image.compression }}"
|
||||
tasks:
|
||||
- include: tasks/configure_libvirt.yml
|
||||
|
||||
- include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml
|
||||
- include: tasks/launch_instances.yml
|
||||
vars:
|
||||
instances: "{{ etcd_names }}"
|
||||
cluster: "{{ cluster_id }}"
|
||||
type: "{{ k8s_type }}"
|
||||
g_sub_host_type: "default"
|
||||
|
||||
- include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml
|
||||
- include: tasks/launch_instances.yml
|
||||
vars:
|
||||
instances: "{{ master_names }}"
|
||||
cluster: "{{ cluster_id }}"
|
||||
type: "{{ k8s_type }}"
|
||||
g_sub_host_type: "default"
|
||||
|
||||
- include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
|
||||
vars:
|
||||
type: "compute"
|
||||
count: "{{ num_nodes }}"
|
||||
- include: tasks/launch_instances.yml
|
||||
vars:
|
||||
instances: "{{ node_names }}"
|
||||
cluster: "{{ cluster_id }}"
|
||||
type: "{{ k8s_type }}"
|
||||
g_sub_host_type: "{{ sub_host_type }}"
|
||||
|
||||
- include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
|
||||
vars:
|
||||
type: "infra"
|
||||
count: "{{ num_infra }}"
|
||||
- include: tasks/launch_instances.yml
|
||||
vars:
|
||||
instances: "{{ node_names }}"
|
||||
cluster: "{{ cluster_id }}"
|
||||
type: "{{ k8s_type }}"
|
||||
g_sub_host_type: "{{ sub_host_type }}"
|
||||
|
||||
- include: update.yml
|
||||
|
||||
- include: list.yml
|
||||
@@ -1,23 +0,0 @@
|
||||
---
|
||||
- name: Generate oo_list_hosts group
|
||||
hosts: localhost
|
||||
become: no
|
||||
connection: local
|
||||
gather_facts: no
|
||||
vars_files:
|
||||
- vars.yml
|
||||
tasks:
|
||||
- set_fact: scratch_group=tag_clusterid-{{ cluster_id }}
|
||||
when: cluster_id != ''
|
||||
- set_fact: scratch_group=all
|
||||
when: cluster_id == ''
|
||||
- add_host:
|
||||
name: "{{ item }}"
|
||||
groups: oo_list_hosts
|
||||
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
ansible_become: "{{ deployment_vars[deployment_type].become }}"
|
||||
oo_public_ipv4: ""
|
||||
oo_private_ipv4: "{{ hostvars[item].libvirt_ip_address }}"
|
||||
with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
|
||||
- debug:
|
||||
msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
|
||||
@@ -1 +0,0 @@
|
||||
../../../lookup_plugins
|
||||
@@ -1 +0,0 @@
|
||||
../../../roles
|
||||
@@ -1,34 +0,0 @@
|
||||
---
|
||||
# TODO: need to figure out a plan for setting hostname, currently the default
|
||||
# is localhost, so no hostname value (or public_hostname) value is getting
|
||||
# assigned
|
||||
|
||||
- name: Call same systemctl command for openshift on all instance(s)
|
||||
hosts: localhost
|
||||
become: no
|
||||
connection: local
|
||||
gather_facts: no
|
||||
vars_files:
|
||||
- vars.yml
|
||||
tasks:
|
||||
- fail: msg="cluster_id is required to be injected in this playbook"
|
||||
when: cluster_id is not defined
|
||||
|
||||
- name: Evaluate g_service_masters
|
||||
add_host:
|
||||
name: "{{ item }}"
|
||||
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
ansible_become: "{{ deployment_vars[deployment_type].become }}"
|
||||
groups: g_service_masters
|
||||
with_items: "{{ g_master_hosts | default([]) }}"
|
||||
|
||||
- name: Evaluate g_service_nodes
|
||||
add_host:
|
||||
name: "{{ item }}"
|
||||
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
ansible_become: "{{ deployment_vars[deployment_type].become }}"
|
||||
groups: g_service_nodes
|
||||
with_items: "{{ g_node_hosts | default([]) }}"
|
||||
|
||||
- include: ../../common/openshift-node/service.yml
|
||||
- include: ../../common/openshift-master/service.yml
|
||||
@@ -1,6 +0,0 @@
|
||||
---
|
||||
- include: configure_libvirt_storage_pool.yml
|
||||
when: libvirt_storage_pool is defined and libvirt_storage_pool_path is defined
|
||||
|
||||
- include: configure_libvirt_network.yml
|
||||
when: libvirt_network is defined
|
||||
@@ -1,11 +0,0 @@
|
||||
---
|
||||
- name: Create the libvirt network for OpenShift
|
||||
virt_net:
|
||||
name: '{{ libvirt_network }}'
|
||||
state: '{{ item }}'
|
||||
autostart: 'yes'
|
||||
xml: "{{ lookup('template', 'network.xml') }}"
|
||||
uri: '{{ libvirt_uri }}'
|
||||
with_items:
|
||||
- present
|
||||
- active
|
||||
@@ -1,30 +0,0 @@
|
||||
---
|
||||
- name: Create libvirt storage directory for openshift
|
||||
file:
|
||||
dest: "{{ libvirt_storage_pool_path }}"
|
||||
state: directory
|
||||
|
||||
# We need to set permissions on the directory and any items created under the directory, so we need to call the acl module with and without default set.
|
||||
- acl:
|
||||
default: '{{ item.default }}'
|
||||
entity: kvm
|
||||
etype: group
|
||||
name: "{{ libvirt_storage_pool_path }}"
|
||||
permissions: '{{ item.permissions }}'
|
||||
state: present
|
||||
with_items:
|
||||
- default: no
|
||||
permissions: x
|
||||
- default: yes
|
||||
permissions: rwx
|
||||
|
||||
- name: Create the libvirt storage pool for OpenShift
|
||||
virt_pool:
|
||||
name: '{{ libvirt_storage_pool }}'
|
||||
state: '{{ item }}'
|
||||
autostart: 'yes'
|
||||
xml: "{{ lookup('template', 'storage-pool.xml') }}"
|
||||
uri: '{{ libvirt_uri }}'
|
||||
with_items:
|
||||
- present
|
||||
- active
|
||||
@@ -1,142 +0,0 @@
|
||||
---
|
||||
# TODO: Add support for choosing base image based on deployment_type and os
|
||||
# wanted (os wanted needs support added in bin/cluster with sane defaults:
|
||||
# fedora/centos for origin, rhel for enterprise)
|
||||
|
||||
# TODO: create a role to encapsulate some of this complexity, possibly also
|
||||
# create a module to manage the storage tasks, network tasks, and possibly
|
||||
# even handle the libvirt tasks to set metadata in the domain xml and be able
|
||||
# to create/query data about vms without having to use xml the python libvirt
|
||||
# bindings look like a good candidate for this
|
||||
|
||||
- name: Download Base Cloud image
|
||||
get_url:
|
||||
url: '{{ image_url }}'
|
||||
sha256sum: '{{ image_sha256 }}'
|
||||
dest: '{{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | difference([""]) | join(".") }}'
|
||||
when: ( lookup("oo_option", "skip_image_download") | default("no", True) | lower ) in ["false", "no"]
|
||||
register: downloaded_image
|
||||
|
||||
- name: Uncompress xz compressed base cloud image
|
||||
command: 'unxz -kf {{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}'
|
||||
args:
|
||||
creates: '{{ libvirt_storage_pool_path }}/{{ image_name }}'
|
||||
when: image_compression in ["xz"] and downloaded_image.changed
|
||||
|
||||
- name: Uncompress tgz compressed base cloud image
|
||||
command: 'tar zxvf {{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}'
|
||||
args:
|
||||
creates: '{{ libvirt_storage_pool_path }}/{{ image_name }}'
|
||||
when: image_compression in ["tgz"] and downloaded_image.changed
|
||||
|
||||
- name: Uncompress gzip compressed base cloud image
|
||||
command: 'gunzip {{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}'
|
||||
args:
|
||||
creates: '{{ libvirt_storage_pool_path }}/{{ image_name }}'
|
||||
when: image_compression in ["gz"] and downloaded_image.changed
|
||||
|
||||
- name: Create the cloud-init config drive path
|
||||
file:
|
||||
dest: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/'
|
||||
state: directory
|
||||
with_items: '{{ instances }}'
|
||||
|
||||
- name: Create the cloud-init config drive files
|
||||
template:
|
||||
src: '{{ item[1] }}'
|
||||
dest: '{{ libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/{{ item[1] }}'
|
||||
with_nested:
|
||||
- '{{ instances }}'
|
||||
- [ user-data, meta-data ]
|
||||
|
||||
- name: Check for genisoimage
|
||||
command: which genisoimage
|
||||
register: which_genisoimage
|
||||
|
||||
- name: Create the cloud-init config drive
|
||||
command: "{{ 'genisoimage' if which_genisoimage.rc == 0 else 'mkisofs' }} -output {{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data"
|
||||
args:
|
||||
chdir: "{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/"
|
||||
creates: "{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso"
|
||||
with_items: '{{ instances }}'
|
||||
|
||||
- name: Refresh the libvirt storage pool for openshift
|
||||
command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}'
|
||||
|
||||
- name: Create VM drives
|
||||
command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ image_name }} --backing-vol-format qcow2'
|
||||
with_items: '{{ instances }}'
|
||||
|
||||
- name: Create VM docker drives
|
||||
command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ libvirt_storage_pool }} {{ item }}-docker.qcow2 10G --format qcow2 --allocation 0'
|
||||
with_items: '{{ instances }}'
|
||||
|
||||
- name: Create VMs
|
||||
virt:
|
||||
name: '{{ item }}'
|
||||
command: define
|
||||
xml: "{{ lookup('template', '../templates/domain.xml') }}"
|
||||
uri: '{{ libvirt_uri }}'
|
||||
with_items: '{{ instances }}'
|
||||
|
||||
- name: Start VMs
|
||||
virt:
|
||||
name: '{{ item }}'
|
||||
state: running
|
||||
uri: '{{ libvirt_uri }}'
|
||||
with_items: '{{ instances }}'
|
||||
|
||||
- name: Wait for the VMs to get an IP
|
||||
shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases {{ libvirt_network }} | egrep -c ''{{ instances | join("|") }}'''
|
||||
register: nb_allocated_ips
|
||||
until: nb_allocated_ips.stdout == '{{ instances | length }}'
|
||||
retries: 60
|
||||
delay: 3
|
||||
when: instances | length != 0
|
||||
|
||||
- name: Collect IP addresses of the VMs
|
||||
shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases {{ libvirt_network }} | awk ''$6 == "{{ item }}" {gsub(/\/.*/, "", $5); print $5}'''
|
||||
register: scratch_ip
|
||||
with_items: '{{ instances }}'
|
||||
|
||||
- set_fact:
|
||||
ips: "{{ scratch_ip.results | default([]) | oo_collect('stdout') }}"
|
||||
|
||||
- set_fact:
|
||||
node_label:
|
||||
type: "{{ g_sub_host_type }}"
|
||||
when: instances | length > 0 and type == "node"
|
||||
|
||||
- set_fact:
|
||||
node_label:
|
||||
type: "{{ type }}"
|
||||
when: instances | length > 0 and type != "node"
|
||||
|
||||
- name: Add new instances
|
||||
add_host:
|
||||
hostname: '{{ item.0 }}'
|
||||
ansible_ssh_host: '{{ item.1 }}'
|
||||
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
ansible_become: "{{ deployment_vars[deployment_type].become }}"
|
||||
groups: "tag_environment-{{ cluster_env }}, tag_host-type-{{ type }}, tag_sub-host-type-{{ g_sub_host_type }}, tag_clusterid-{{ cluster_id }}"
|
||||
openshift_node_labels: "{{ node_label }}"
|
||||
libvirt_ip_address: "{{ item.1 }}"
|
||||
with_together:
|
||||
- '{{ instances }}'
|
||||
- '{{ ips }}'
|
||||
|
||||
- name: Wait for ssh
|
||||
wait_for:
|
||||
host: '{{ item }}'
|
||||
port: 22
|
||||
with_items: '{{ ips }}'
|
||||
|
||||
- name: Wait for openshift user setup
|
||||
command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null openshift@{{ item.1 }} echo openshift user is setup'
|
||||
register: result
|
||||
until: result.rc == 0
|
||||
retries: 30
|
||||
delay: 1
|
||||
with_together:
|
||||
- '{{ instances }}'
|
||||
- '{{ ips }}'
|
||||
@@ -1,65 +0,0 @@
|
||||
<domain type='kvm' id='8'>
|
||||
<name>{{ item }}</name>
|
||||
<memory unit='MiB'>{{ libvirt_instance_memory_mib }}</memory>
|
||||
<metadata xmlns:ansible="https://github.com/ansible/ansible">
|
||||
<ansible:tags>
|
||||
<ansible:tag>environment-{{ cluster_env }}</ansible:tag>
|
||||
<ansible:tag>clusterid-{{ cluster }}</ansible:tag>
|
||||
<ansible:tag>host-type-{{ type }}</ansible:tag>
|
||||
<ansible:tag>sub-host-type-{{ g_sub_host_type }}</ansible:tag>
|
||||
</ansible:tags>
|
||||
</metadata>
|
||||
<vcpu placement='static'>{{ libvirt_instance_vcpu }}</vcpu>
|
||||
<os>
|
||||
<type arch='x86_64' machine='pc'>hvm</type>
|
||||
<boot dev='hd'/>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
<apic/>
|
||||
<pae/>
|
||||
</features>
|
||||
<cpu mode='host-model'>
|
||||
<model fallback='allow'/>
|
||||
</cpu>
|
||||
<clock offset='utc'>
|
||||
<timer name='rtc' tickpolicy='catchup'/>
|
||||
<timer name='pit' tickpolicy='delay'/>
|
||||
<timer name='hpet' present='no'/>
|
||||
</clock>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>restart</on_crash>
|
||||
<devices>
|
||||
<emulator>/usr/bin/qemu-system-x86_64</emulator>
|
||||
<disk type='file' device='disk'>
|
||||
<driver name='qemu' type='qcow2' discard='unmap'/>
|
||||
<source file='{{ libvirt_storage_pool_path }}/{{ item }}.qcow2'/>
|
||||
<target dev='sda' bus='scsi'/>
|
||||
</disk>
|
||||
<disk type='file' device='disk'>
|
||||
<driver name='qemu' type='qcow2' discard='unmap'/>
|
||||
<source file='{{ libvirt_storage_pool_path }}/{{ item }}-docker.qcow2'/>
|
||||
<target dev='sdb' bus='scsi'/>
|
||||
</disk>
|
||||
<disk type='file' device='cdrom'>
|
||||
<driver name='qemu' type='raw'/>
|
||||
<source file='{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'/>
|
||||
<target dev='sdc' bus='scsi'/>
|
||||
<readonly/>
|
||||
</disk>
|
||||
<controller type='scsi' model='virtio-scsi' />
|
||||
<interface type='network'>
|
||||
<source network='{{ libvirt_network }}'/>
|
||||
<model type='virtio'/>
|
||||
</interface>
|
||||
<serial type='pty'>
|
||||
<target port='0'/>
|
||||
</serial>
|
||||
<console type='pty'>
|
||||
<target type='serial' port='0'/>
|
||||
</console>
|
||||
<memballoon model='virtio'>
|
||||
</memballoon>
|
||||
</devices>
|
||||
</domain>
|
||||
@@ -1,3 +0,0 @@
|
||||
instance-id: {{ item[0] }}
|
||||
hostname: {{ item[0] }}
|
||||
local-hostname: {{ item[0] }}.example.com
|
||||
@@ -1,23 +0,0 @@
|
||||
<network>
|
||||
<name>{{ libvirt_network }}</name>
|
||||
<forward mode='nat'>
|
||||
<nat>
|
||||
<port start='1024' end='65535'/>
|
||||
</nat>
|
||||
</forward>
|
||||
<!-- TODO: query for first available virbr interface available -->
|
||||
<bridge name='virbr3' stp='on' delay='0'/>
|
||||
<!-- TODO: make overridable -->
|
||||
<domain name='example.com' localOnly='yes' />
|
||||
<dns>
|
||||
<!-- TODO: automatically add host entries -->
|
||||
</dns>
|
||||
<!-- TODO: query for available address space -->
|
||||
<ip address='192.168.55.1' netmask='255.255.255.0'>
|
||||
<dhcp>
|
||||
<range start='192.168.55.2' end='192.168.55.254'/>
|
||||
<!-- TODO: add static entries addresses for the hosts to be created -->
|
||||
</dhcp>
|
||||
</ip>
|
||||
</network>
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
<pool type='dir'>
|
||||
<name>{{ libvirt_storage_pool }}</name>
|
||||
<target>
|
||||
<path>{{ libvirt_storage_pool_path }}</path>
|
||||
</target>
|
||||
</pool>
|
||||
@@ -1,43 +0,0 @@
|
||||
#cloud-config
|
||||
disable_root: true
|
||||
|
||||
hostname: {{ item[0] }}
|
||||
fqdn: {{ item[0] }}.example.com
|
||||
|
||||
mounts:
|
||||
- [ sdb ]
|
||||
|
||||
users:
|
||||
- default
|
||||
- name: root
|
||||
ssh_authorized_keys:
|
||||
- {{ lookup('file', '~/.ssh/id_rsa.pub') }}
|
||||
|
||||
system_info:
|
||||
default_user:
|
||||
name: openshift
|
||||
sudo: ["ALL=(ALL) NOPASSWD: ALL"]
|
||||
|
||||
ssh_authorized_keys:
|
||||
- {{ lookup('file', '~/.ssh/id_rsa.pub') }}
|
||||
|
||||
write_files:
|
||||
- path: /etc/sudoers.d/00-openshift-no-requiretty
|
||||
permissions: 440
|
||||
content: |
|
||||
Defaults:openshift !requiretty
|
||||
- path: /etc/sysconfig/docker-storage-setup
|
||||
owner: root:root
|
||||
permissions: '0644'
|
||||
content: |
|
||||
DEVS=/dev/sdb
|
||||
VG=docker_vg
|
||||
EXTRA_DOCKER_STORAGE_OPTIONS='--storage-opt dm.blkdiscard=true'
|
||||
- path: /etc/systemd/system/fstrim.timer.d/hourly.conf
|
||||
content: |
|
||||
[Timer]
|
||||
OnCalendar=hourly
|
||||
|
||||
runcmd:
|
||||
- NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart
|
||||
- systemctl enable --now fstrim.timer
|
||||
@@ -1,70 +0,0 @@
|
||||
---
|
||||
# TODO: does not handle a non-existent cluster gracefully
|
||||
|
||||
- name: Terminate instance(s)
|
||||
hosts: localhost
|
||||
become: no
|
||||
connection: local
|
||||
gather_facts: no
|
||||
vars_files:
|
||||
- vars.yml
|
||||
tasks:
|
||||
- set_fact: cluster_group=tag_clusterid-{{ cluster_id }}
|
||||
- add_host:
|
||||
name: "{{ item }}"
|
||||
groups: oo_hosts_to_terminate
|
||||
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
ansible_become: "{{ deployment_vars[deployment_type].become }}"
|
||||
with_items: '{{ groups[cluster_group] | default([]) }}'
|
||||
|
||||
- name: Unsubscribe VMs
|
||||
hosts: oo_hosts_to_terminate
|
||||
vars_files:
|
||||
- vars.yml
|
||||
roles:
|
||||
- role: rhel_unsubscribe
|
||||
when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and
|
||||
ansible_distribution == "RedHat" and
|
||||
lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
|
||||
default('no', True) | lower in ['no', 'false']
|
||||
|
||||
- name: Terminate instance(s)
|
||||
hosts: localhost
|
||||
become: no
|
||||
connection: local
|
||||
gather_facts: no
|
||||
vars_files:
|
||||
- vars.yml
|
||||
tasks:
|
||||
- name: Destroy VMs
|
||||
virt:
|
||||
name: '{{ item[0] }}'
|
||||
command: '{{ item[1] }}'
|
||||
uri: '{{ libvirt_uri }}'
|
||||
with_nested:
|
||||
- "{{ groups['oo_hosts_to_terminate'] }}"
|
||||
- [ destroy, undefine ]
|
||||
|
||||
- name: Delete VM drives
|
||||
command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item }}.qcow2'
|
||||
args:
|
||||
removes: '{{ libvirt_storage_pool_path }}/{{ item }}.qcow2'
|
||||
with_items: "{{ groups['oo_hosts_to_terminate'] }}"
|
||||
|
||||
- name: Delete VM docker drives
|
||||
command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item }}-docker.qcow2'
|
||||
args:
|
||||
removes: '{{ libvirt_storage_pool_path }}/{{ item }}-docker.qcow2'
|
||||
with_items: "{{ groups['oo_hosts_to_terminate'] }}"
|
||||
|
||||
- name: Delete the VM cloud-init image
|
||||
file:
|
||||
path: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
|
||||
state: absent
|
||||
with_items: "{{ groups['oo_hosts_to_terminate'] }}"
|
||||
|
||||
- name: Remove the cloud-init config directory
|
||||
file:
|
||||
path: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/'
|
||||
state: absent
|
||||
with_items: "{{ groups['oo_hosts_to_terminate'] }}"
|
||||
@@ -1,37 +0,0 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- include_vars: vars.yml
|
||||
- include_vars: cluster_hosts.yml
|
||||
- add_host:
|
||||
name: "{{ item }}"
|
||||
groups: l_oo_all_hosts
|
||||
with_items: '{{ g_all_hosts }}'
|
||||
|
||||
- hosts: l_oo_all_hosts
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- include_vars: vars.yml
|
||||
- include_vars: cluster_hosts.yml
|
||||
|
||||
- name: Populate oo_hosts_to_update group
|
||||
hosts: localhost
|
||||
connection: local
|
||||
become: no
|
||||
gather_facts: no
|
||||
vars_files:
|
||||
- vars.yml
|
||||
- cluster_hosts.yml
|
||||
tasks:
|
||||
- name: Evaluate oo_hosts_to_update
|
||||
add_host:
|
||||
name: "{{ item }}"
|
||||
groups: oo_hosts_to_update
|
||||
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
ansible_become: "{{ deployment_vars[deployment_type].become }}"
|
||||
with_items: '{{ g_all_hosts | default([]) }}'
|
||||
|
||||
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
|
||||
|
||||
- include: config.yml
|
||||
@@ -1,40 +0,0 @@
|
||||
---
|
||||
default_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift-ansible"
|
||||
libvirt_storage_pool_path: "{{ lookup('oo_option', 'libvirt_storage_pool_path') | default(default_pool_path, True) }}"
|
||||
libvirt_storage_pool: "{{ lookup('oo_option', 'libvirt_storage_pool') | default('openshift-ansible', True) }}"
|
||||
libvirt_network: "{{ lookup('oo_option', 'libvirt_network') | default('openshift-ansible', True) }}"
|
||||
libvirt_instance_memory_mib: "{{ lookup('oo_option', 'libvirt_instance_memory_mib') | default(1024, True) }}"
|
||||
libvirt_instance_vcpu: "{{ lookup('oo_option', 'libvirt_instance_vcpu') | default(2, True) }}"
|
||||
libvirt_uri: "{{ lookup('oo_option', 'libvirt_uri') | default('qemu:///system', True) }}"
|
||||
debug_level: 2
|
||||
|
||||
# Automatic download of the qcow2 image for RHEL cannot be done directly from the RedHat portal because it requires authentication.
|
||||
# The default value of image_url for enterprise and openshift-enterprise deployment types below won't work.
|
||||
deployment_rhel7_ent_base:
|
||||
image:
|
||||
url: "{{ lookup('oo_option', 'image_url') |
|
||||
default('https://access.cdn.redhat.com//content/origin/files/sha256/25/25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0/rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}"
|
||||
name: "{{ lookup('oo_option', 'image_name') |
|
||||
default('rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}"
|
||||
sha256: "{{ lookup('oo_option', 'image_sha256') |
|
||||
default('25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0', True) }}"
|
||||
compression: ""
|
||||
ssh_user: openshift
|
||||
become: yes
|
||||
|
||||
deployment_vars:
|
||||
origin:
|
||||
image:
|
||||
url: "{{ lookup('oo_option', 'image_url') |
|
||||
default('http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1602.qcow2.xz', True) }}"
|
||||
compression: "{{ lookup('oo_option', 'image_compression') |
|
||||
default('xz', True) }}"
|
||||
name: "{{ lookup('oo_option', 'image_name') |
|
||||
default('CentOS-7-x86_64-GenericCloud.qcow2', True) }}"
|
||||
sha256: "{{ lookup('oo_option', 'image_sha256') |
|
||||
default('dd0f5e610e7c5ffacaca35ed7a78a19142a588f4543da77b61c1fb0d74400471', True) }}"
|
||||
ssh_user: openshift
|
||||
become: yes
|
||||
enterprise: "{{ deployment_rhel7_ent_base }}"
|
||||
openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
|
||||
atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
|
||||
@@ -1,4 +0,0 @@
|
||||
# OpenStack playbooks
|
||||
|
||||
This playbook directory is meant to be driven by [`bin/cluster`](../../bin),
|
||||
which is community supported and most use is considered deprecated.
|
||||
@@ -1,25 +0,0 @@
|
||||
---
|
||||
g_all_hosts: "{{ groups['meta-clusterid_' ~ cluster_id] | default([])
|
||||
| intersect(groups['meta-environment_' ~ cluster_env] | default([])) }}"
|
||||
|
||||
g_etcd_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_etcd'] | default([])) }}"
|
||||
|
||||
g_new_etcd_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_new_etcd'] | default([])) }}"
|
||||
|
||||
g_lb_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_lb'] | default([])) }}"
|
||||
|
||||
g_nfs_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_nfs'] | default([])) }}"
|
||||
|
||||
g_glusterfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-glusterfs'] | default([])) }}"
|
||||
|
||||
g_master_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_master'] | default([])) }}"
|
||||
|
||||
g_new_master_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_new_master'] | default([])) }}"
|
||||
|
||||
g_node_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_node'] | default([])) }}"
|
||||
|
||||
g_new_node_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_new_node'] | default([])) }}"
|
||||
|
||||
g_infra_hosts: "{{ g_node_hosts | intersect(groups['meta-sub-host-type_infra'] | default([])) }}"
|
||||
|
||||
g_compute_hosts: "{{ g_node_hosts | intersect(groups['meta-sub-host-type_compute'] | default([])) }}"
|
||||
@@ -1,33 +0,0 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- include_vars: vars.yml
|
||||
- include_vars: cluster_hosts.yml
|
||||
- add_host:
|
||||
name: "{{ item }}"
|
||||
groups: l_oo_all_hosts
|
||||
with_items: "{{ g_all_hosts | default([]) }}"
|
||||
|
||||
- hosts: l_oo_all_hosts
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- include_vars: vars.yml
|
||||
- include_vars: cluster_hosts.yml
|
||||
|
||||
- include: ../../common/openshift-cluster/config.yml
|
||||
vars:
|
||||
g_nodeonmaster: true
|
||||
g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
g_sudo: "{{ deployment_vars[deployment_type].become }}"
|
||||
openshift_cluster_id: "{{ cluster_id }}"
|
||||
openshift_debug_level: "{{ debug_level }}"
|
||||
openshift_deployment_type: "{{ deployment_type }}"
|
||||
openshift_hosted_registry_selector: 'type=infra'
|
||||
openshift_hosted_router_selector: 'type=infra'
|
||||
openshift_master_cluster_method: 'native'
|
||||
openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
|
||||
os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
|
||||
openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"
|
||||
openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}"
|
||||
openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}"
|
||||
@@ -1,508 +0,0 @@
|
||||
heat_template_version: 2014-10-16
|
||||
|
||||
description: OpenShift cluster
|
||||
|
||||
parameters:
|
||||
|
||||
cluster_env:
|
||||
type: string
|
||||
label: Cluster environment
|
||||
description: Environment of the cluster
|
||||
|
||||
cluster_id:
|
||||
type: string
|
||||
label: Cluster ID
|
||||
description: Identifier of the cluster
|
||||
|
||||
subnet_24_prefix:
|
||||
type: string
|
||||
label: subnet /24 prefix
|
||||
description: /24 subnet prefix of the network of the cluster (dot separated number triplet)
|
||||
|
||||
dns_nameservers:
|
||||
type: comma_delimited_list
|
||||
label: DNS nameservers list
|
||||
description: List of DNS nameservers
|
||||
|
||||
external_net:
|
||||
type: string
|
||||
label: External network
|
||||
description: Name of the external network
|
||||
default: external
|
||||
|
||||
ssh_public_key:
|
||||
type: string
|
||||
label: SSH public key
|
||||
description: SSH public key
|
||||
hidden: true
|
||||
|
||||
ssh_incoming:
|
||||
type: string
|
||||
label: Source of ssh connections
|
||||
description: Source of legitimate ssh connections
|
||||
default: 0.0.0.0/0
|
||||
|
||||
node_port_incoming:
|
||||
type: string
|
||||
label: Source of node port connections
|
||||
description: Authorized sources targeting node ports
|
||||
default: 0.0.0.0/0
|
||||
|
||||
num_etcd:
|
||||
type: number
|
||||
label: Number of etcd nodes
|
||||
description: Number of etcd nodes
|
||||
|
||||
num_masters:
|
||||
type: number
|
||||
label: Number of masters
|
||||
description: Number of masters
|
||||
|
||||
num_nodes:
|
||||
type: number
|
||||
label: Number of compute nodes
|
||||
description: Number of compute nodes
|
||||
|
||||
num_infra:
|
||||
type: number
|
||||
label: Number of infrastructure nodes
|
||||
description: Number of infrastructure nodes
|
||||
|
||||
etcd_image:
|
||||
type: string
|
||||
label: Etcd image
|
||||
description: Name of the image for the etcd servers
|
||||
|
||||
master_image:
|
||||
type: string
|
||||
label: Master image
|
||||
description: Name of the image for the master servers
|
||||
|
||||
node_image:
|
||||
type: string
|
||||
label: Node image
|
||||
description: Name of the image for the compute node servers
|
||||
|
||||
infra_image:
|
||||
type: string
|
||||
label: Infra image
|
||||
description: Name of the image for the infra node servers
|
||||
|
||||
etcd_flavor:
|
||||
type: string
|
||||
label: Etcd flavor
|
||||
description: Flavor of the etcd servers
|
||||
|
||||
master_flavor:
|
||||
type: string
|
||||
label: Master flavor
|
||||
description: Flavor of the master servers
|
||||
|
||||
node_flavor:
|
||||
type: string
|
||||
label: Node flavor
|
||||
description: Flavor of the compute node servers
|
||||
|
||||
infra_flavor:
|
||||
type: string
|
||||
label: Infra flavor
|
||||
description: Flavor of the infra node servers
|
||||
|
||||
outputs:
|
||||
|
||||
etcd_names:
|
||||
description: Name of the etcds
|
||||
value: { get_attr: [ etcd, name ] }
|
||||
|
||||
etcd_ips:
|
||||
description: IPs of the etcds
|
||||
value: { get_attr: [ etcd, private_ip ] }
|
||||
|
||||
etcd_floating_ips:
|
||||
description: Floating IPs of the etcds
|
||||
value: { get_attr: [ etcd, floating_ip ] }
|
||||
|
||||
master_names:
|
||||
description: Name of the masters
|
||||
value: { get_attr: [ masters, name ] }
|
||||
|
||||
master_ips:
|
||||
description: IPs of the masters
|
||||
value: { get_attr: [ masters, private_ip ] }
|
||||
|
||||
master_floating_ips:
|
||||
description: Floating IPs of the masters
|
||||
value: { get_attr: [ masters, floating_ip ] }
|
||||
|
||||
node_names:
|
||||
description: Name of the nodes
|
||||
value: { get_attr: [ compute_nodes, name ] }
|
||||
|
||||
node_ips:
|
||||
description: IPs of the nodes
|
||||
value: { get_attr: [ compute_nodes, private_ip ] }
|
||||
|
||||
node_floating_ips:
|
||||
description: Floating IPs of the nodes
|
||||
value: { get_attr: [ compute_nodes, floating_ip ] }
|
||||
|
||||
infra_names:
|
||||
description: Name of the nodes
|
||||
value: { get_attr: [ infra_nodes, name ] }
|
||||
|
||||
infra_ips:
|
||||
description: IPs of the nodes
|
||||
value: { get_attr: [ infra_nodes, private_ip ] }
|
||||
|
||||
infra_floating_ips:
|
||||
description: Floating IPs of the nodes
|
||||
value: { get_attr: [ infra_nodes, floating_ip ] }
|
||||
|
||||
resources:
|
||||
|
||||
net:
|
||||
type: OS::Neutron::Net
|
||||
properties:
|
||||
name:
|
||||
str_replace:
|
||||
template: openshift-ansible-cluster_id-net
|
||||
params:
|
||||
cluster_id: { get_param: cluster_id }
|
||||
|
||||
subnet:
|
||||
type: OS::Neutron::Subnet
|
||||
properties:
|
||||
name:
|
||||
str_replace:
|
||||
template: openshift-ansible-cluster_id-subnet
|
||||
params:
|
||||
cluster_id: { get_param: cluster_id }
|
||||
network: { get_resource: net }
|
||||
cidr:
|
||||
str_replace:
|
||||
template: subnet_24_prefix.0/24
|
||||
params:
|
||||
subnet_24_prefix: { get_param: subnet_24_prefix }
|
||||
dns_nameservers: { get_param: dns_nameservers }
|
||||
|
||||
router:
|
||||
type: OS::Neutron::Router
|
||||
properties:
|
||||
name:
|
||||
str_replace:
|
||||
template: openshift-ansible-cluster_id-router
|
||||
params:
|
||||
cluster_id: { get_param: cluster_id }
|
||||
external_gateway_info:
|
||||
network: { get_param: external_net }
|
||||
|
||||
interface:
|
||||
type: OS::Neutron::RouterInterface
|
||||
properties:
|
||||
router_id: { get_resource: router }
|
||||
subnet_id: { get_resource: subnet }
|
||||
|
||||
keypair:
|
||||
type: OS::Nova::KeyPair
|
||||
properties:
|
||||
name:
|
||||
str_replace:
|
||||
template: openshift-ansible-cluster_id-keypair
|
||||
params:
|
||||
cluster_id: { get_param: cluster_id }
|
||||
public_key: { get_param: ssh_public_key }
|
||||
|
||||
master-secgrp:
|
||||
type: OS::Neutron::SecurityGroup
|
||||
properties:
|
||||
name:
|
||||
str_replace:
|
||||
template: openshift-ansible-cluster_id-master-secgrp
|
||||
params:
|
||||
cluster_id: { get_param: cluster_id }
|
||||
description:
|
||||
str_replace:
|
||||
template: Security group for cluster_id OpenShift cluster master
|
||||
params:
|
||||
cluster_id: { get_param: cluster_id }
|
||||
rules:
|
||||
- direction: ingress
|
||||
protocol: tcp
|
||||
port_range_min: 22
|
||||
port_range_max: 22
|
||||
remote_ip_prefix: { get_param: ssh_incoming }
|
||||
- direction: ingress
|
||||
protocol: tcp
|
||||
port_range_min: 4001
|
||||
port_range_max: 4001
|
||||
- direction: ingress
|
||||
protocol: tcp
|
||||
port_range_min: 8443
|
||||
port_range_max: 8443
|
||||
- direction: ingress
|
||||
protocol: tcp
|
||||
port_range_min: 8444
|
||||
port_range_max: 8444
|
||||
- direction: ingress
|
||||
protocol: tcp
|
||||
port_range_min: 53
|
||||
port_range_max: 53
|
||||
- direction: ingress
|
||||
protocol: udp
|
||||
port_range_min: 53
|
||||
port_range_max: 53
|
||||
- direction: ingress
|
||||
protocol: tcp
|
||||
port_range_min: 8053
|
||||
port_range_max: 8053
|
||||
- direction: ingress
|
||||
protocol: udp
|
||||
port_range_min: 8053
|
||||
port_range_max: 8053
|
||||
- direction: ingress
|
||||
protocol: tcp
|
||||
port_range_min: 24224
|
||||
port_range_max: 24224
|
||||
- direction: ingress
|
||||
protocol: udp
|
||||
port_range_min: 24224
|
||||
port_range_max: 24224
|
||||
- direction: ingress
|
||||
protocol: tcp
|
||||
port_range_min: 2224
|
||||
port_range_max: 2224
|
||||
- direction: ingress
|
||||
protocol: udp
|
||||
port_range_min: 5404
|
||||
port_range_max: 5404
|
||||
- direction: ingress
|
||||
protocol: udp
|
||||
port_range_min: 5405
|
||||
port_range_max: 5405
|
||||
- direction: ingress
|
||||
protocol: tcp
|
||||
port_range_min: 9090
|
||||
port_range_max: 9090
|
||||
|
||||
etcd-secgrp:
|
||||
type: OS::Neutron::SecurityGroup
|
||||
properties:
|
||||
name:
|
||||
str_replace:
|
||||
template: openshift-ansible-cluster_id-etcd-secgrp
|
||||
params:
|
||||
cluster_id: { get_param: cluster_id }
|
||||
description:
|
||||
str_replace:
|
||||
template: Security group for cluster_id etcd cluster
|
||||
params:
|
||||
cluster_id: { get_param: cluster_id }
|
||||
rules:
|
||||
- direction: ingress
|
||||
protocol: tcp
|
||||
port_range_min: 22
|
||||
port_range_max: 22
|
||||
remote_ip_prefix: { get_param: ssh_incoming }
|
||||
- direction: ingress
|
||||
protocol: tcp
|
||||
port_range_min: 2379
|
||||
port_range_max: 2379
|
||||
remote_mode: remote_group_id
|
||||
remote_group_id: { get_resource: master-secgrp }
|
||||
- direction: ingress
|
||||
protocol: tcp
|
||||
port_range_min: 2380
|
||||
port_range_max: 2380
|
||||
remote_mode: remote_group_id
|
||||
|
||||
node-secgrp:
|
||||
type: OS::Neutron::SecurityGroup
|
||||
properties:
|
||||
name:
|
||||
str_replace:
|
||||
template: openshift-ansible-cluster_id-node-secgrp
|
||||
params:
|
||||
cluster_id: { get_param: cluster_id }
|
||||
description:
|
||||
str_replace:
|
||||
template: Security group for cluster_id OpenShift cluster nodes
|
||||
params:
|
||||
cluster_id: { get_param: cluster_id }
|
||||
rules:
|
||||
- direction: ingress
|
||||
protocol: tcp
|
||||
port_range_min: 22
|
||||
port_range_max: 22
|
||||
remote_ip_prefix: { get_param: ssh_incoming }
|
||||
- direction: ingress
|
||||
protocol: tcp
|
||||
port_range_min: 10250
|
||||
port_range_max: 10250
|
||||
remote_mode: remote_group_id
|
||||
- direction: ingress
|
||||
protocol: udp
|
||||
port_range_min: 4789
|
||||
port_range_max: 4789
|
||||
remote_mode: remote_group_id
|
||||
- direction: ingress
|
||||
protocol: tcp
|
||||
port_range_min: 30000
|
||||
port_range_max: 32767
|
||||
remote_ip_prefix: { get_param: node_port_incoming }
|
||||
|
||||
infra-secgrp:
|
||||
type: OS::Neutron::SecurityGroup
|
||||
properties:
|
||||
name:
|
||||
str_replace:
|
||||
template: openshift-ansible-cluster_id-infra-secgrp
|
||||
params:
|
||||
cluster_id: { get_param: cluster_id }
|
||||
description:
|
||||
str_replace:
|
||||
template: Security group for cluster_id OpenShift infrastructure cluster nodes
|
||||
params:
|
||||
cluster_id: { get_param: cluster_id }
|
||||
rules:
|
||||
- direction: ingress
|
||||
protocol: tcp
|
||||
port_range_min: 80
|
||||
port_range_max: 80
|
||||
- direction: ingress
|
||||
protocol: tcp
|
||||
port_range_min: 443
|
||||
port_range_max: 443
|
||||
|
||||
etcd:
|
||||
type: OS::Heat::ResourceGroup
|
||||
properties:
|
||||
count: { get_param: num_etcd }
|
||||
resource_def:
|
||||
type: heat_stack_server.yaml
|
||||
properties:
|
||||
name:
|
||||
str_replace:
|
||||
template: cluster_id-k8s_type-%index%
|
||||
params:
|
||||
cluster_id: { get_param: cluster_id }
|
||||
k8s_type: etcd
|
||||
cluster_env: { get_param: cluster_env }
|
||||
cluster_id: { get_param: cluster_id }
|
||||
type: etcd
|
||||
image: { get_param: etcd_image }
|
||||
flavor: { get_param: etcd_flavor }
|
||||
key_name: { get_resource: keypair }
|
||||
net: { get_resource: net }
|
||||
subnet: { get_resource: subnet }
|
||||
secgrp:
|
||||
- { get_resource: etcd-secgrp }
|
||||
floating_network: { get_param: external_net }
|
||||
net_name:
|
||||
str_replace:
|
||||
template: openshift-ansible-cluster_id-net
|
||||
params:
|
||||
cluster_id: { get_param: cluster_id }
|
||||
depends_on:
|
||||
- interface
|
||||
|
||||
masters:
|
||||
type: OS::Heat::ResourceGroup
|
||||
properties:
|
||||
count: { get_param: num_masters }
|
||||
resource_def:
|
||||
type: heat_stack_server.yaml
|
||||
properties:
|
||||
name:
|
||||
str_replace:
|
||||
template: cluster_id-k8s_type-%index%
|
||||
params:
|
||||
cluster_id: { get_param: cluster_id }
|
||||
k8s_type: master
|
||||
cluster_env: { get_param: cluster_env }
|
||||
cluster_id: { get_param: cluster_id }
|
||||
type: master
|
||||
image: { get_param: master_image }
|
||||
flavor: { get_param: master_flavor }
|
||||
key_name: { get_resource: keypair }
|
||||
net: { get_resource: net }
|
||||
subnet: { get_resource: subnet }
|
||||
secgrp:
|
||||
- { get_resource: master-secgrp }
|
||||
- { get_resource: node-secgrp }
|
||||
floating_network: { get_param: external_net }
|
||||
net_name:
|
||||
str_replace:
|
||||
template: openshift-ansible-cluster_id-net
|
||||
params:
|
||||
cluster_id: { get_param: cluster_id }
|
||||
depends_on:
|
||||
- interface
|
||||
|
||||
compute_nodes:
|
||||
type: OS::Heat::ResourceGroup
|
||||
properties:
|
||||
count: { get_param: num_nodes }
|
||||
resource_def:
|
||||
type: heat_stack_server.yaml
|
||||
properties:
|
||||
name:
|
||||
str_replace:
|
||||
template: cluster_id-k8s_type-sub_host_type-%index%
|
||||
params:
|
||||
cluster_id: { get_param: cluster_id }
|
||||
k8s_type: node
|
||||
sub_host_type: compute
|
||||
cluster_env: { get_param: cluster_env }
|
||||
cluster_id: { get_param: cluster_id }
|
||||
type: node
|
||||
subtype: compute
|
||||
image: { get_param: node_image }
|
||||
flavor: { get_param: node_flavor }
|
||||
key_name: { get_resource: keypair }
|
||||
net: { get_resource: net }
|
||||
subnet: { get_resource: subnet }
|
||||
secgrp:
|
||||
- { get_resource: node-secgrp }
|
||||
floating_network: { get_param: external_net }
|
||||
net_name:
|
||||
str_replace:
|
||||
template: openshift-ansible-cluster_id-net
|
||||
params:
|
||||
cluster_id: { get_param: cluster_id }
|
||||
depends_on:
|
||||
- interface
|
||||
|
||||
infra_nodes:
|
||||
type: OS::Heat::ResourceGroup
|
||||
properties:
|
||||
count: { get_param: num_infra }
|
||||
resource_def:
|
||||
type: heat_stack_server.yaml
|
||||
properties:
|
||||
name:
|
||||
str_replace:
|
||||
template: cluster_id-k8s_type-sub_host_type-%index%
|
||||
params:
|
||||
cluster_id: { get_param: cluster_id }
|
||||
k8s_type: node
|
||||
sub_host_type: infra
|
||||
cluster_env: { get_param: cluster_env }
|
||||
cluster_id: { get_param: cluster_id }
|
||||
type: node
|
||||
subtype: infra
|
||||
image: { get_param: infra_image }
|
||||
flavor: { get_param: infra_flavor }
|
||||
key_name: { get_resource: keypair }
|
||||
net: { get_resource: net }
|
||||
subnet: { get_resource: subnet }
|
||||
secgrp:
|
||||
- { get_resource: node-secgrp }
|
||||
- { get_resource: infra-secgrp }
|
||||
floating_network: { get_param: external_net }
|
||||
net_name:
|
||||
str_replace:
|
||||
template: openshift-ansible-cluster_id-net
|
||||
params:
|
||||
cluster_id: { get_param: cluster_id }
|
||||
depends_on:
|
||||
- interface
|
||||
@@ -1,152 +0,0 @@
|
||||
heat_template_version: 2014-10-16
|
||||
|
||||
description: OpenShift cluster server
|
||||
|
||||
parameters:
|
||||
|
||||
name:
|
||||
type: string
|
||||
label: Name
|
||||
description: Name
|
||||
|
||||
cluster_env:
|
||||
type: string
|
||||
label: Cluster environment
|
||||
description: Environment of the cluster
|
||||
|
||||
cluster_id:
|
||||
type: string
|
||||
label: Cluster ID
|
||||
description: Identifier of the cluster
|
||||
|
||||
type:
|
||||
type: string
|
||||
label: Type
|
||||
description: Type master or node
|
||||
|
||||
subtype:
|
||||
type: string
|
||||
label: Sub-type
|
||||
description: Sub-type compute or infra for nodes, default otherwise
|
||||
default: default
|
||||
|
||||
key_name:
|
||||
type: string
|
||||
label: Key name
|
||||
description: Key name of keypair
|
||||
|
||||
image:
|
||||
type: string
|
||||
label: Image
|
||||
description: Name of the image
|
||||
|
||||
flavor:
|
||||
type: string
|
||||
label: Flavor
|
||||
description: Name of the flavor
|
||||
|
||||
net:
|
||||
type: string
|
||||
label: Net ID
|
||||
description: Net resource
|
||||
|
||||
net_name:
|
||||
type: string
|
||||
label: Net name
|
||||
description: Net name
|
||||
|
||||
subnet:
|
||||
type: string
|
||||
label: Subnet ID
|
||||
description: Subnet resource
|
||||
|
||||
secgrp:
|
||||
type: comma_delimited_list
|
||||
label: Security groups
|
||||
description: Security group resources
|
||||
|
||||
floating_network:
|
||||
type: string
|
||||
label: Floating network
|
||||
description: Network to allocate floating IP from
|
||||
|
||||
outputs:
|
||||
|
||||
name:
|
||||
description: Name of the server
|
||||
value: { get_attr: [ server, name ] }
|
||||
|
||||
private_ip:
|
||||
description: Private IP of the server
|
||||
value:
|
||||
get_attr:
|
||||
- server
|
||||
- addresses
|
||||
- { get_param: net_name }
|
||||
- 0
|
||||
- addr
|
||||
|
||||
floating_ip:
|
||||
description: Floating IP of the server
|
||||
value:
|
||||
get_attr:
|
||||
- server
|
||||
- addresses
|
||||
- { get_param: net_name }
|
||||
- 1
|
||||
- addr
|
||||
|
||||
resources:
|
||||
|
||||
server:
|
||||
type: OS::Nova::Server
|
||||
properties:
|
||||
name: { get_param: name }
|
||||
key_name: { get_param: key_name }
|
||||
image: { get_param: image }
|
||||
flavor: { get_param: flavor }
|
||||
networks:
|
||||
- port: { get_resource: port }
|
||||
user_data: { get_resource: config }
|
||||
user_data_format: RAW
|
||||
metadata:
|
||||
environment: { get_param: cluster_env }
|
||||
clusterid: { get_param: cluster_id }
|
||||
host-type: { get_param: type }
|
||||
sub-host-type: { get_param: subtype }
|
||||
|
||||
port:
|
||||
type: OS::Neutron::Port
|
||||
properties:
|
||||
network: { get_param: net }
|
||||
fixed_ips:
|
||||
- subnet: { get_param: subnet }
|
||||
security_groups: { get_param: secgrp }
|
||||
|
||||
floating-ip:
|
||||
type: OS::Neutron::FloatingIP
|
||||
properties:
|
||||
floating_network: { get_param: floating_network }
|
||||
port_id: { get_resource: port }
|
||||
|
||||
config:
|
||||
type: OS::Heat::CloudConfig
|
||||
properties:
|
||||
cloud_config:
|
||||
disable_root: true
|
||||
|
||||
hostname: { get_param: name }
|
||||
|
||||
system_info:
|
||||
default_user:
|
||||
name: openshift
|
||||
sudo: ["ALL=(ALL) NOPASSWD: ALL"]
|
||||
|
||||
write_files:
|
||||
- path: /etc/sudoers.d/00-openshift-no-requiretty
|
||||
permissions: 440
|
||||
# content: Defaults:openshift !requiretty
|
||||
# Encoded in base64 to be sure that we do not forget the trailing newline or
|
||||
# sudo will not be able to parse that file
|
||||
encoding: b64
|
||||
content: RGVmYXVsdHM6b3BlbnNoaWZ0ICFyZXF1aXJldHR5Cg==
|
||||
@@ -1 +0,0 @@
|
||||
../../../filter_plugins
|
||||
@@ -1,191 +0,0 @@
|
||||
---
|
||||
- name: Launch instance(s)
|
||||
hosts: localhost
|
||||
become: no
|
||||
connection: local
|
||||
gather_facts: no
|
||||
vars_files:
|
||||
- vars.yml
|
||||
tasks:
|
||||
# TODO: Write an Ansible module for dealing with HEAT stacks
|
||||
# Dealing with the outputs is currently terrible
|
||||
|
||||
- name: Check OpenStack stack
|
||||
command: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack'
|
||||
register: stack_show_result
|
||||
changed_when: false
|
||||
failed_when: stack_show_result.rc != 0 and 'Stack not found' not in stack_show_result.stderr
|
||||
|
||||
- set_fact:
|
||||
heat_stack_action: 'stack-create'
|
||||
when: stack_show_result.rc == 1
|
||||
- set_fact:
|
||||
heat_stack_action: 'stack-update'
|
||||
when: stack_show_result.rc == 0
|
||||
|
||||
- name: Create or Update OpenStack Stack
|
||||
command: 'heat {{ heat_stack_action }} -f {{ openstack_infra_heat_stack }}
|
||||
--timeout {{ openstack_heat_timeout }}
|
||||
-P cluster_env={{ cluster_env }}
|
||||
-P cluster_id={{ cluster_id }}
|
||||
-P subnet_24_prefix={{ openstack_subnet_24_prefix }}
|
||||
-P dns_nameservers={{ openstack_network_dns | join(",") }}
|
||||
-P external_net={{ openstack_network_external_net }}
|
||||
-P ssh_public_key="{{ openstack_ssh_public_key }}"
|
||||
-P ssh_incoming={{ openstack_ssh_access_from }}
|
||||
-P node_port_incoming={{ openstack_node_port_access_from }}
|
||||
-P num_etcd={{ num_etcd }}
|
||||
-P num_masters={{ num_masters }}
|
||||
-P num_nodes={{ num_nodes }}
|
||||
-P num_infra={{ num_infra }}
|
||||
-P etcd_image={{ deployment_vars[deployment_type].image }}
|
||||
-P master_image={{ deployment_vars[deployment_type].image }}
|
||||
-P node_image={{ deployment_vars[deployment_type].image }}
|
||||
-P infra_image={{ deployment_vars[deployment_type].image }}
|
||||
-P etcd_flavor={{ openstack_flavor["etcd"] }}
|
||||
-P master_flavor={{ openstack_flavor["master"] }}
|
||||
-P node_flavor={{ openstack_flavor["node"] }}
|
||||
-P infra_flavor={{ openstack_flavor["infra"] }}
|
||||
openshift-ansible-{{ cluster_id }}-stack'
|
||||
args:
|
||||
chdir: '{{ playbook_dir }}'
|
||||
|
||||
- name: Wait for OpenStack Stack readiness
|
||||
shell: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack | awk ''$2 == "stack_status" {print $4}'''
|
||||
register: stack_show_status_result
|
||||
until: stack_show_status_result.stdout not in ['CREATE_IN_PROGRESS', 'UPDATE_IN_PROGRESS']
|
||||
retries: 30
|
||||
delay: 5
|
||||
|
||||
- name: Display the stack resources
|
||||
command: 'heat resource-list openshift-ansible-{{ cluster_id }}-stack'
|
||||
register: stack_resource_list_result
|
||||
when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']
|
||||
|
||||
- name: Display the stack status
|
||||
command: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack'
|
||||
register: stack_show_result
|
||||
when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']
|
||||
|
||||
- name: Delete the stack
|
||||
command: 'heat stack-delete openshift-ansible-{{ cluster_id }}-stack'
|
||||
when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']
|
||||
|
||||
- fail:
|
||||
msg: |
|
||||
|
||||
+--------------------------------------+
|
||||
| ^ |
|
||||
| /!\ Failed to create the heat stack |
|
||||
| /___\ |
|
||||
+--------------------------------------+
|
||||
|
||||
Here is the list of stack resources and their status:
|
||||
{{ stack_resource_list_result.stdout }}
|
||||
|
||||
Here is the status of the stack:
|
||||
{{ stack_show_result.stdout }}
|
||||
|
||||
^ Failed to create the heat stack
|
||||
/!\
|
||||
/___\ Please check the `stack_status_reason` line in the above array to know why.
|
||||
when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']
|
||||
|
||||
- name: Read OpenStack Stack outputs
|
||||
command: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack'
|
||||
register: stack_show_result
|
||||
|
||||
- set_fact:
|
||||
parsed_outputs: "{{ stack_show_result | oo_parse_heat_stack_outputs }}"
|
||||
|
||||
- name: Add new etcd instances groups and variables
|
||||
add_host:
|
||||
hostname: '{{ item[0] }}'
|
||||
ansible_ssh_host: '{{ item[2] }}'
|
||||
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
ansible_become: "{{ deployment_vars[deployment_type].become }}"
|
||||
groups: 'meta-environment_{{ cluster_env }}, meta-host-type_etcd, meta-sub-host-type_default, meta-clusterid_{{ cluster_id }}'
|
||||
openshift_node_labels:
|
||||
type: "etcd"
|
||||
openstack:
|
||||
public_v4: '{{ item[2] }}'
|
||||
private_v4: '{{ item[1] }}'
|
||||
with_together:
|
||||
- '{{ parsed_outputs.etcd_names }}'
|
||||
- '{{ parsed_outputs.etcd_ips }}'
|
||||
- '{{ parsed_outputs.etcd_floating_ips }}'
|
||||
|
||||
- name: Add new master instances groups and variables
|
||||
add_host:
|
||||
hostname: '{{ item[0] }}'
|
||||
ansible_ssh_host: '{{ item[2] }}'
|
||||
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
ansible_become: "{{ deployment_vars[deployment_type].become }}"
|
||||
groups: 'meta-environment_{{ cluster_env }}, meta-host-type_master, meta-sub-host-type_default, meta-clusterid_{{ cluster_id }}'
|
||||
openshift_node_labels:
|
||||
type: "master"
|
||||
openstack:
|
||||
public_v4: '{{ item[2] }}'
|
||||
private_v4: '{{ item[1] }}'
|
||||
with_together:
|
||||
- '{{ parsed_outputs.master_names }}'
|
||||
- '{{ parsed_outputs.master_ips }}'
|
||||
- '{{ parsed_outputs.master_floating_ips }}'
|
||||
|
||||
- name: Add new node instances groups and variables
|
||||
add_host:
|
||||
hostname: '{{ item[0] }}'
|
||||
ansible_ssh_host: '{{ item[2] }}'
|
||||
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
ansible_become: "{{ deployment_vars[deployment_type].become }}"
|
||||
groups: 'meta-environment_{{ cluster_env }}, meta-host-type_node, meta-sub-host-type_compute, meta-clusterid_{{ cluster_id }}'
|
||||
openshift_node_labels:
|
||||
type: "compute"
|
||||
openstack:
|
||||
public_v4: '{{ item[2] }}'
|
||||
private_v4: '{{ item[1] }}'
|
||||
with_together:
|
||||
- '{{ parsed_outputs.node_names }}'
|
||||
- '{{ parsed_outputs.node_ips }}'
|
||||
- '{{ parsed_outputs.node_floating_ips }}'
|
||||
|
||||
- name: Add new infra instances groups and variables
|
||||
add_host:
|
||||
hostname: '{{ item[0] }}'
|
||||
ansible_ssh_host: '{{ item[2] }}'
|
||||
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
ansible_become: "{{ deployment_vars[deployment_type].become }}"
|
||||
groups: 'meta-environment_{{ cluster_env }}, meta-host-type_node, meta-sub-host-type_infra, meta-clusterid_{{ cluster_id }}'
|
||||
openshift_node_labels:
|
||||
type: "infra"
|
||||
openstack:
|
||||
public_v4: '{{ item[2] }}'
|
||||
private_v4: '{{ item[1] }}'
|
||||
with_together:
|
||||
- '{{ parsed_outputs.infra_names }}'
|
||||
- '{{ parsed_outputs.infra_ips }}'
|
||||
- '{{ parsed_outputs.infra_floating_ips }}'
|
||||
|
||||
- name: Wait for ssh
|
||||
wait_for:
|
||||
host: '{{ item }}'
|
||||
port: 22
|
||||
with_flattened:
|
||||
- '{{ parsed_outputs.master_floating_ips }}'
|
||||
- '{{ parsed_outputs.node_floating_ips }}'
|
||||
- '{{ parsed_outputs.infra_floating_ips }}'
|
||||
|
||||
- name: Wait for user setup
|
||||
command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ deployment_vars[deployment_type].ssh_user }}@{{ item }} echo {{ deployment_vars[deployment_type].ssh_user }} user is setup'
|
||||
register: result
|
||||
until: result.rc == 0
|
||||
retries: 30
|
||||
delay: 1
|
||||
with_flattened:
|
||||
- '{{ parsed_outputs.master_floating_ips }}'
|
||||
- '{{ parsed_outputs.node_floating_ips }}'
|
||||
- '{{ parsed_outputs.infra_floating_ips }}'
|
||||
|
||||
- include: update.yml
|
||||
|
||||
- include: list.yml
|
||||
@@ -1,24 +0,0 @@
|
||||
---
|
||||
- name: Generate oo_list_hosts group
|
||||
hosts: localhost
|
||||
become: no
|
||||
connection: local
|
||||
gather_facts: no
|
||||
vars_files:
|
||||
- vars.yml
|
||||
tasks:
|
||||
- set_fact: scratch_group=meta-clusterid_{{ cluster_id }}
|
||||
when: cluster_id != ''
|
||||
- set_fact: scratch_group=all
|
||||
when: cluster_id == ''
|
||||
- add_host:
|
||||
name: "{{ item }}"
|
||||
groups: oo_list_hosts
|
||||
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
ansible_ssh_host: "{{ hostvars[item].ansible_ssh_host | default(item) }}"
|
||||
ansible_become: "{{ deployment_vars[deployment_type].become }}"
|
||||
oo_public_ipv4: "{{ hostvars[item].openstack.public_v4 }}"
|
||||
oo_private_ipv4: "{{ hostvars[item].openstack.private_v4 }}"
|
||||
with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
|
||||
- debug:
|
||||
msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster('meta-') }}"
|
||||
@@ -1 +0,0 @@
|
||||
../../../lookup_plugins
|
||||
@@ -1 +0,0 @@
|
||||
../../../roles
|
||||
@@ -1,49 +0,0 @@
|
||||
---
|
||||
- name: Terminate instance(s)
|
||||
hosts: localhost
|
||||
become: no
|
||||
connection: local
|
||||
gather_facts: no
|
||||
vars_files:
|
||||
- vars.yml
|
||||
tasks:
|
||||
- add_host:
|
||||
name: "{{ item }}"
|
||||
groups: oo_hosts_to_terminate
|
||||
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
ansible_become: "{{ deployment_vars[deployment_type].become }}"
|
||||
with_items: "{{ (groups['meta-environment_' ~ cluster_env]|default([])) | intersect(groups['meta-clusterid_' ~ cluster_id ]|default([])) }}"
|
||||
|
||||
- name: Unsubscribe VMs
|
||||
hosts: oo_hosts_to_terminate
|
||||
vars_files:
|
||||
- vars.yml
|
||||
roles:
|
||||
- role: rhel_unsubscribe
|
||||
when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and
|
||||
ansible_distribution == "RedHat" and
|
||||
lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
|
||||
default('no', True) | lower in ['no', 'false']
|
||||
|
||||
- hosts: localhost
|
||||
become: no
|
||||
connection: local
|
||||
gather_facts: no
|
||||
vars_files:
|
||||
- vars.yml
|
||||
tasks:
|
||||
- name: Delete the OpenStack Stack
|
||||
command: 'heat stack-delete openshift-ansible-{{ cluster_id }}-stack'
|
||||
register: stack_delete_result
|
||||
changed_when: stack_delete_result.rc == 0
|
||||
failed_when: stack_delete_result.rc != 0 and 'could not be found' not in stack_delete_result.stdout
|
||||
|
||||
- name: Wait for the completion of the OpenStack Stack deletion
|
||||
shell: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack | awk ''$2 == "stack_status" {print $4}'''
|
||||
when: stack_delete_result.changed
|
||||
register: stack_show_result
|
||||
until: stack_show_result.stdout != 'DELETE_IN_PROGRESS'
|
||||
retries: 60
|
||||
delay: 5
|
||||
failed_when: '"Stack not found" not in stack_show_result.stderr and
|
||||
stack_show_result.stdout != "DELETE_COMPLETE"'
|
||||
@@ -1,34 +0,0 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- include_vars: vars.yml
|
||||
- include_vars: cluster_hosts.yml
|
||||
- add_host:
|
||||
name: "{{ item }}"
|
||||
groups: l_oo_all_hosts
|
||||
with_items: "{{ g_all_hosts }}"
|
||||
|
||||
- hosts: l_oo_all_hosts
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- include_vars: vars.yml
|
||||
- include_vars: cluster_hosts.yml
|
||||
|
||||
- name: Populate oo_hosts_to_update group
|
||||
hosts: localhost
|
||||
connection: local
|
||||
become: no
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- name: Evaluate oo_hosts_to_update
|
||||
add_host:
|
||||
name: "{{ item }}"
|
||||
groups: oo_hosts_to_update
|
||||
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
|
||||
ansible_become: "{{ deployment_vars[deployment_type].become }}"
|
||||
with_items: "{{ g_all_hosts | default([]) }}"
|
||||
|
||||
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
|
||||
|
||||
- include: config.yml
|
||||
@@ -1,38 +0,0 @@
|
||||
# yamllint disable rule:colons
|
||||
---
|
||||
debug_level: 2
|
||||
openstack_infra_heat_stack: "{{ lookup('oo_option', 'infra_heat_stack' ) |
|
||||
default('files/heat_stack.yaml', True) }}"
|
||||
openstack_subnet_24_prefix: "{{ lookup('oo_option', 'subnet_24_prefix' ) |
|
||||
default('192.168.' + ( ( 1048576 | random % 256 ) | string() ), True) }}"
|
||||
openstack_network_external_net: "{{ lookup('oo_option', 'external_net' ) |
|
||||
default('external', True) }}"
|
||||
openstack_network_dns: "{{ lookup('oo_option', 'dns' ) |
|
||||
default('8.8.8.8,8.8.4.4', True) | oo_split() }}"
|
||||
openstack_ssh_public_key: "{{ lookup('file', lookup('oo_option', 'public_key') |
|
||||
default('~/.ssh/id_rsa.pub', True)) }}"
|
||||
openstack_ssh_access_from: "{{ lookup('oo_option', 'ssh_from') |
|
||||
default('0.0.0.0/0', True) }}"
|
||||
openstack_node_port_access_from: "{{ lookup('oo_option', 'node_port_from') |
|
||||
default('0.0.0.0/0', True) }}"
|
||||
openstack_heat_timeout: "{{ lookup('oo_option', 'heat_timeout') |
|
||||
default('3', True) }}"
|
||||
openstack_flavor:
|
||||
etcd: "{{ lookup('oo_option', 'etcd_flavor' ) | default('m1.small', True) }}"
|
||||
master: "{{ lookup('oo_option', 'master_flavor' ) | default('m1.small', True) }}"
|
||||
infra: "{{ lookup('oo_option', 'infra_flavor' ) | default('m1.small', True) }}"
|
||||
node: "{{ lookup('oo_option', 'node_flavor' ) | default('m1.medium', True) }}"
|
||||
|
||||
deployment_rhel7_ent_base:
|
||||
image: "{{ lookup('oo_option', 'image_name') | default('rhel-guest-image-7.2-20151102.0.x86_64', True) }}"
|
||||
ssh_user: openshift
|
||||
become: yes
|
||||
|
||||
deployment_vars:
|
||||
origin:
|
||||
image: "{{ lookup('oo_option', 'image_name') | default('centos-70-raw', True) }}"
|
||||
ssh_user: openshift
|
||||
become: yes
|
||||
enterprise: "{{ deployment_rhel7_ent_base }}"
|
||||
openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
|
||||
atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
|
||||
Reference in New Issue
Block a user