mirror of
https://github.com/openshift/openshift-ansible-contrib.git
synced 2026-02-05 09:45:58 +01:00
moving roles up one level and deleting more old playbooks
This commit is contained in:
@@ -0,0 +1,13 @@
|
||||
---
|
||||
- name: create /etc/origin/cloudprovider
|
||||
file:
|
||||
state: directory
|
||||
path: "{{ vsphere_conf_dir }}"
|
||||
|
||||
- name: create the vsphere.conf file
|
||||
template:
|
||||
src: "{{ role_path }}/templates/vsphere.conf.j2"
|
||||
dest: /etc/origin/cloudprovider/vsphere.conf
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
@@ -0,0 +1,15 @@
|
||||
[Global]
|
||||
user = "{{ openshift_cloudprovider_vsphere_username }}"
|
||||
password = "{{ openshift_cloudprovider_vsphere_password }}"
|
||||
server = "{{ openshift_cloudprovider_vsphere_host }}"
|
||||
port = 443
|
||||
insecure-flag = 1
|
||||
datacenter = {{ openshift_cloudprovider_vsphere_datacenter }}
|
||||
datastore = {{ openshift_cloudprovider_vsphere_datastore }}
|
||||
{% if openshift_cloudprovider_vsphere_folder is defined %}
|
||||
working-dir = /{{ openshift_cloudprovider_vsphere_datacenter }}/vm/{{ openshift_cloudprovider_vsphere_folder }}/
|
||||
{% else %}
|
||||
working-dir = /{{ openshift_cloudprovider_vsphere_datacenter }}/vm/
|
||||
{% endif %}
|
||||
[Disk]
|
||||
scsicontrollertype = pvscsi
|
||||
@@ -0,0 +1,3 @@
|
||||
---
|
||||
vsphere_conf_dir: /etc/origin/cloudprovider
|
||||
vsphere_conf: "{{vsphere_conf_dir }}/vsphere.conf"
|
||||
@@ -0,0 +1,46 @@
|
||||
---
|
||||
- name: Assign app memory for container_storage
|
||||
set_fact:
|
||||
app_memory: 32768
|
||||
when: "'cns' in container_storage"
|
||||
|
||||
- name: Assign app memory for container_storage
|
||||
set_fact:
|
||||
app_memory: 8192
|
||||
when: "'cns' not in container_storage"
|
||||
|
||||
- name: Create additional production VMs on vCenter
|
||||
vmware_guest:
|
||||
hostname: "{{ openshift_cloudprovider_vsphere_host }}"
|
||||
username: "{{ openshift_cloudprovider_vsphere_username }}"
|
||||
password: "{{ openshift_cloudprovider_vsphere_password }}"
|
||||
validate_certs: False
|
||||
name: "{{ item.value.guestname }}"
|
||||
cluster: "{{ openshift_cloudprovider_vsphere_cluster}}"
|
||||
datacenter: "{{ openshift_cloudprovider_vsphere_datacenter }}"
|
||||
resource_pool: "{{ openshift_cloudprovider_vsphere_resource_pool }}"
|
||||
template: "{{openshift_cloudprovider_vsphere_template}}"
|
||||
state: poweredon
|
||||
wait_for_ip_address: True
|
||||
folder: "{{ openshift_cloudprovider_vsphere_datacenter }}/vm/{{ openshift_cloudprovider_vsphere_folder }}"
|
||||
annotation: "{{ item.value.tag }}"
|
||||
hardware:
|
||||
memory_mb: "{{ app_memory }}"
|
||||
networks:
|
||||
- name: "{{ openshift_cloudprovider_vsphere_vm_network }}"
|
||||
ip: "{{ item.value.ip4addr }}"
|
||||
netmask: "{{ openshift_cloudprovider_vsphere_vm_netmask }}"
|
||||
gateway: "{{ openshift_cloudprovider_vsphere_vm_gateway }}"
|
||||
customization:
|
||||
domain: "{{default_subdomain}}"
|
||||
dns_servers:
|
||||
- "{{ openshift_cloudprovider_vsphere_vm_dns }}"
|
||||
dns_suffix: "{{default_subdomain}}"
|
||||
hostname: "{{ item.value.guestname}}"
|
||||
with_dict: "{{host_inventory}}"
|
||||
when: "'master' in item.value.guestname or 'app' in item.value.guestname or 'infra' in item.value.guestname"
|
||||
|
||||
- name: Add additional production VMs to inventory
|
||||
add_host: hostname="{{ item.value.guestname }}" ansible_ssh_host="{{ item.value.ip4addr }}" groups="{{ item.value.tag }}, production_group, new_nodes"
|
||||
with_dict: "{{host_inventory}}"
|
||||
when: "'master' in item.value.guestname or 'app' in item.value.guestname or 'infra' in item.value.guestname"
|
||||
@@ -0,0 +1,43 @@
|
||||
---
|
||||
- name: Create CNS production VMs on vCenter
|
||||
vmware_guest:
|
||||
hostname: "{{ openshift_cloudprovider_vsphere_host }}"
|
||||
username: "{{ openshift_cloudprovider_vsphere_username }}"
|
||||
password: "{{ openshift_cloudprovider_vsphere_password }}"
|
||||
validate_certs: False
|
||||
name: "{{ hostvars[item].inventory_hostname }}"
|
||||
cluster: "{{ openshift_cloudprovider_vsphere_cluster}}"
|
||||
datacenter: "{{ openshift_cloudprovider_vsphere_datacenter }}"
|
||||
resource_pool: "{{ openshift_cloudprovider_vsphere_resource_pool }}"
|
||||
template: "{{openshift_cloudprovider_vsphere_template}}"
|
||||
state: poweredon
|
||||
wait_for_ip_address: true
|
||||
folder: "{{ openshift_cloudprovider_vsphere_datacenter }}/vm/{{ openshift_cloudprovider_vsphere_folder }}"
|
||||
annotation: "{{ hostvars[item].openshift_node_labels }}"
|
||||
disk:
|
||||
- size_gb: 60
|
||||
datastore: "{{ openshift_cloudprovider_vsphere_datastore }}"
|
||||
type: thin
|
||||
- size_gb: 40
|
||||
datastore: "{{ openshift_cloudprovider_vsphere_datastore }}"
|
||||
type: thin
|
||||
- size_gb: 40
|
||||
datastore: "{{ openshift_cloudprovider_vsphere_datastore }}"
|
||||
type: thin
|
||||
- size_gb: 300
|
||||
datastore: "{{ openshift_cloudprovider_vsphere_datastore }}"
|
||||
type: eagerZeroedThick
|
||||
hardware:
|
||||
memory_mb: 32768
|
||||
networks:
|
||||
- name: "{{ openshift_cloudprovider_vsphere_vm_network }}"
|
||||
ip: "{{ hostvars[item].ipv4addr }}"
|
||||
netmask: "{{ openshift_cloudprovider_vsphere_vm_netmask }}"
|
||||
gateway: "{{ openshift_cloudprovider_vsphere_vm_gateway }}"
|
||||
customization:
|
||||
domain: "{{ default_subdomain }}"
|
||||
dns_servers:
|
||||
- "{{ openshift_cloudprovider_vsphere_vm_dns }}"
|
||||
dns_suffix: "{{ default_subdomain }}"
|
||||
hostname: "{{ hostvars[item].inventory_hostname }}"
|
||||
with_items: "{{ groups['storage'] }}"
|
||||
@@ -0,0 +1,53 @@
|
||||
---
|
||||
- name: Set cluster_id crs annoation
|
||||
set_fact:
|
||||
crs_annoation: "{{ cluster_id }}-crs"
|
||||
|
||||
- name: Create CRS production VMs on vCenter
|
||||
vmware_guest:
|
||||
hostname: "{{ openshift_cloudprovider_vsphere_host }}"
|
||||
username: "{{ openshift_cloudprovider_vsphere_username }}"
|
||||
password: "{{ openshift_cloudprovider_vsphere_password }}"
|
||||
validate_certs: False
|
||||
name: "{{ item.value.guestname }}"
|
||||
cluster: "{{ openshift_cloudprovider_vsphere_cluster}}"
|
||||
datacenter: "{{ openshift_cloudprovider_vsphere_datacenter }}"
|
||||
resource_pool: "{{ openshift_cloudprovider_vsphere_resource_pool }}"
|
||||
template: "{{openshift_cloudprovider_vsphere_template}}"
|
||||
state: poweredon
|
||||
wait_for_ip_address: true
|
||||
folder: "{{ openshift_cloudprovider_vsphere_datacenter }}/vm/{{ openshift_cloudprovider_vsphere_folder }}"
|
||||
annotation: "{{ crs_annoation }}"
|
||||
disk:
|
||||
- size_gb: 60
|
||||
datastore: "{{ openshift_cloudprovider_vsphere_datastore }}"
|
||||
type: thin
|
||||
- size_gb: 40
|
||||
datastore: "{{ openshift_cloudprovider_vsphere_datastore }}"
|
||||
type: thin
|
||||
- size_gb: 40
|
||||
datastore: "{{ openshift_cloudprovider_vsphere_datastore }}"
|
||||
type: thin
|
||||
- size_gb: 300
|
||||
datastore: "{{ openshift_cloudprovider_vsphere_datastore }}"
|
||||
type: eagerZeroedThick
|
||||
hardware:
|
||||
memory_mb: 32768
|
||||
networks:
|
||||
- name: "{{ openshift_cloudprovider_vsphere_vm_network }}"
|
||||
ip: "{{ item.value.ip4addr }}"
|
||||
netmask: "{{ openshift_cloudprovider_vsphere_vm_netmask }}"
|
||||
gateway: "{{ openshift_cloudprovider_vsphere_vm_gateway }}"
|
||||
customization:
|
||||
domain: "{{default_subdomain}}"
|
||||
dns_servers:
|
||||
- "{{ openshift_cloudprovider_vsphere_vm_dns }}"
|
||||
dns_suffix: "{{default_subdomain}}"
|
||||
hostname: "{{ item.value.guestname}}"
|
||||
with_dict: "{{host_inventory}}"
|
||||
when: "'crs' in item.value.guestname"
|
||||
|
||||
- name: Add CRS production VMs to inventory
|
||||
add_host: hostname="{{ item.value.guestname }}" ansible_ssh_host="{{ item.value.ip4addr }}" groups="{{ crs_annoation }}, crs, production_group"
|
||||
with_dict: "{{host_inventory}}"
|
||||
when: "'crs' in item.value.guestname"
|
||||
@@ -0,0 +1,39 @@
|
||||
---
|
||||
- name: Create haproxy VM on vCenter
|
||||
vmware_guest:
|
||||
hostname: "{{ openshift_cloudprovider_vsphere_host }}"
|
||||
username: "{{ openshift_cloudprovider_vsphere_username }}"
|
||||
password: "{{ openshift_cloudprovider_vsphere_password }}"
|
||||
validate_certs: False
|
||||
name: "{{ hostvars[item].vm_name }}"
|
||||
cluster: "{{ openshift_cloudprovider_vsphere_cluster}}"
|
||||
datacenter: "{{ openshift_cloudprovider_vsphere_datacenter }}"
|
||||
resource_pool: "{{ openshift_cloudprovider_vsphere_resource_pool }}"
|
||||
template: "{{ openshift_cloudprovider_vsphere_template }}"
|
||||
folder: "{{ openshift_cloudprovider_vsphere_datacenter }}/vm/{{ openshift_cloudprovider_vsphere_folder }}"
|
||||
annotation: "{{ hostvars[item].openshift_node_labels }}"
|
||||
state: poweredon
|
||||
wait_for_ip_address: true
|
||||
disk:
|
||||
- size_gb: 60
|
||||
datastore: "{{ openshift_cloudprovider_vsphere_datastore }}"
|
||||
type: thin
|
||||
- size_gb: 40
|
||||
datastore: "{{ openshift_cloudprovider_vsphere_datastore }}"
|
||||
type: thin
|
||||
- size_gb: 40
|
||||
datastore: "{{ openshift_cloudprovider_vsphere_datastore }}"
|
||||
type: thin
|
||||
networks:
|
||||
- name: "{{ openshift_cloudprovider_vsphere_vm_network }}"
|
||||
ip: "{{ hostvars[item].ipv4addr }}"
|
||||
netmask: "{{ openshift_cloudprovider_vsphere_vm_netmask }}"
|
||||
gateway: "{{ openshift_cloudprovider_vsphere_vm_gateway }}"
|
||||
customization:
|
||||
domain: "{{ default_subdomain }}"
|
||||
dns_servers:
|
||||
- "{{ openshift_cloudprovider_vsphere_vm_dns }}"
|
||||
dns_suffix: "{{ default_subdomain }}"
|
||||
hostname: "{{ hostvars[item].vm_name }}"
|
||||
with_items:
|
||||
- "{{ groups['haproxy'] }}"
|
||||
@@ -0,0 +1,52 @@
|
||||
---
|
||||
#- name: Assign app memory for container_storage
|
||||
# set_fact:
|
||||
# app_memory: 32768
|
||||
# when: "'cns' in container_storage"
|
||||
|
||||
#- name: Assign app memory for container_storage
|
||||
# set_fact:
|
||||
# app_memory: 8192
|
||||
# when:"'cns' not in container_storage"
|
||||
|
||||
- name: Create production app node VMs on vCenter
|
||||
vmware_guest:
|
||||
hostname: "{{ openshift_cloudprovider_vsphere_host }}"
|
||||
username: "{{ openshift_cloudprovider_vsphere_username }}"
|
||||
password: "{{ openshift_cloudprovider_vsphere_password }}"
|
||||
validate_certs: False
|
||||
name: "{{ hostvars[item].inventory_hostname }}"
|
||||
cluster: "{{ openshift_cloudprovider_vsphere_cluster}}"
|
||||
datacenter: "{{ openshift_cloudprovider_vsphere_datacenter }}"
|
||||
resource_pool: "{{ openshift_cloudprovider_vsphere_resource_pool }}"
|
||||
template: "{{ openshift_cloudprovider_vsphere_template }}"
|
||||
state: poweredon
|
||||
wait_for_ip_address: true
|
||||
disk:
|
||||
- size_gb: 60
|
||||
datastore: "{{ openshift_cloudprovider_vsphere_datastore }}"
|
||||
type: thin
|
||||
- size_gb: 40
|
||||
datastore: "{{ openshift_cloudprovider_vsphere_datastore }}"
|
||||
type: thin
|
||||
- size_gb: 40
|
||||
datastore: "{{ openshift_cloudprovider_vsphere_datastore }}"
|
||||
type: thin
|
||||
- size_gb: 300
|
||||
datastore: "{{ openshift_cloudprovider_vsphere_datastore }}"
|
||||
folder: "{{ openshift_cloudprovider_vsphere_datacenter }}/vm/{{ openshift_cloudprovider_vsphere_folder }}"
|
||||
annotation: "{{ hostvars[item].openshift_node_labels }}"
|
||||
hardware:
|
||||
memory_mb: "32768"
|
||||
networks:
|
||||
- name: "{{ openshift_cloudprovider_vsphere_vm_network }}"
|
||||
ip: "{{ hostvars[item].ipv4addr }}"
|
||||
netmask: "{{ openshift_cloudprovider_vsphere_vm_netmask }}"
|
||||
gateway: "{{ openshift_cloudprovider_vsphere_vm_gateway }}"
|
||||
customization:
|
||||
domain: "{{ default_subdomain }}"
|
||||
dns_servers:
|
||||
- "{{ openshift_cloudprovider_vsphere_vm_dns }}"
|
||||
dns_suffix: "{{ default_subdomain }}"
|
||||
hostname: "{{ hostvars[item].inventory_hostname}}"
|
||||
with_items: "{{ groups['apps'] }}"
|
||||
@@ -0,0 +1,139 @@
|
||||
---
|
||||
#- name: Assign app memory for container_storage
|
||||
# set_fact:
|
||||
# app_memory: 32768
|
||||
# when: "'cns' in container_storage"
|
||||
|
||||
#- name: Assign app memory for container_storage
|
||||
# set_fact:
|
||||
# app_memory: 8192
|
||||
# when:"'cns' not in container_storage"
|
||||
|
||||
- name: Create production master node VMs on vCenter
|
||||
vmware_guest:
|
||||
hostname: "{{ openshift_cloudprovider_vsphere_host }}"
|
||||
username: "{{ openshift_cloudprovider_vsphere_username }}"
|
||||
password: "{{ openshift_cloudprovider_vsphere_password }}"
|
||||
validate_certs: False
|
||||
name: "{{ hostvars[item].vm_name }}"
|
||||
cluster: "{{ openshift_cloudprovider_vsphere_cluster}}"
|
||||
datacenter: "{{ openshift_cloudprovider_vsphere_datacenter }}"
|
||||
resource_pool: "{{ openshift_cloudprovider_vsphere_resource_pool }}"
|
||||
template: "{{ openshift_cloudprovider_vsphere_template }}"
|
||||
state: poweredon
|
||||
wait_for_ip_address: true
|
||||
folder: "{{ openshift_cloudprovider_vsphere_datacenter }}/vm/{{ openshift_cloudprovider_vsphere_folder }}"
|
||||
annotation: "{{ hostvars[item].openshift_node_labels }}"
|
||||
disk:
|
||||
- size_gb: 60
|
||||
datastore: "{{ openshift_cloudprovider_vsphere_datastore }}"
|
||||
type: thin
|
||||
- size_gb: 40
|
||||
datastore: "{{ openshift_cloudprovider_vsphere_datastore }}"
|
||||
type: thin
|
||||
- size_gb: 40
|
||||
datastore: "{{ openshift_cloudprovider_vsphere_datastore }}"
|
||||
type: thin
|
||||
- size_gb: 40
|
||||
datastore: "{{ openshift_cloudprovider_vsphere_datastore }}"
|
||||
type: thin
|
||||
hardware:
|
||||
memory_mb: 16384
|
||||
networks:
|
||||
- name: "{{ openshift_cloudprovider_vsphere_vm_network }}"
|
||||
ip: "{{ hostvars[item].ipv4addr }}"
|
||||
netmask: "{{ openshift_cloudprovider_vsphere_vm_netmask }}"
|
||||
gateway: "{{ openshift_cloudprovider_vsphere_vm_gateway }}"
|
||||
customization:
|
||||
domain: "{{ default_subdomain }}"
|
||||
dns_servers:
|
||||
- "{{ openshift_cloudprovider_vsphere_vm_dns }}"
|
||||
dns_suffix: "{{ default_subdomain }}"
|
||||
hostname: "{{ hostvars[item].vm_name }}"
|
||||
with_items: "{{ groups['masters'] }}"
|
||||
|
||||
- name: Create production infra node VMs on vCenter
|
||||
vmware_guest:
|
||||
hostname: "{{ openshift_cloudprovider_vsphere_host }}"
|
||||
username: "{{ openshift_cloudprovider_vsphere_username }}"
|
||||
password: "{{ openshift_cloudprovider_vsphere_password }}"
|
||||
validate_certs: False
|
||||
name: "{{ hostvars[item].vm_name }}"
|
||||
cluster: "{{ openshift_cloudprovider_vsphere_cluster}}"
|
||||
datacenter: "{{ openshift_cloudprovider_vsphere_datacenter }}"
|
||||
resource_pool: "{{ openshift_cloudprovider_vsphere_resource_pool }}"
|
||||
template: "{{ openshift_cloudprovider_vsphere_template }}"
|
||||
state: poweredon
|
||||
wait_for_ip_address: true
|
||||
disk:
|
||||
- size_gb: 60
|
||||
datastore: "{{ openshift_cloudprovider_vsphere_datastore }}"
|
||||
type: thin
|
||||
- size_gb: 40
|
||||
datastore: "{{ openshift_cloudprovider_vsphere_datastore }}"
|
||||
type: thin
|
||||
- size_gb: 40
|
||||
datastore: "{{ openshift_cloudprovider_vsphere_datastore }}"
|
||||
type: thin
|
||||
- size_gb: 300
|
||||
datastore: "{{ openshift_cloudprovider_vsphere_datastore }}"
|
||||
type: eagerZeroedThick
|
||||
folder: "{{ openshift_cloudprovider_vsphere_datacenter }}/vm/{{ openshift_cloudprovider_vsphere_folder }}"
|
||||
annotation: "{{ hostvars[item].openshift_node_labels }}"
|
||||
hardware:
|
||||
memory_mb: 8192
|
||||
networks:
|
||||
- name: "{{ openshift_cloudprovider_vsphere_vm_network }}"
|
||||
ip: "{{ hostvars[item].ipv4addr }}"
|
||||
netmask: "{{ openshift_cloudprovider_vsphere_vm_netmask }}"
|
||||
gateway: "{{ openshift_cloudprovider_vsphere_vm_gateway }}"
|
||||
customization:
|
||||
domain: "{{ default_subdomain }}"
|
||||
dns_servers:
|
||||
- "{{ openshift_cloudprovider_vsphere_vm_dns }}"
|
||||
dns_suffix: "{{ default_subdomain }}"
|
||||
hostname: "{{ hostvars[item].vm_name }}"
|
||||
with_items: "{{ groups['infras'] }}"
|
||||
|
||||
- name: Create production app node VMs on vCenter
|
||||
vmware_guest:
|
||||
hostname: "{{ openshift_cloudprovider_vsphere_host }}"
|
||||
username: "{{ openshift_cloudprovider_vsphere_username }}"
|
||||
password: "{{ openshift_cloudprovider_vsphere_password }}"
|
||||
validate_certs: False
|
||||
name: "{{ hostvars[item].vm_name }}"
|
||||
cluster: "{{ openshift_cloudprovider_vsphere_cluster}}"
|
||||
datacenter: "{{ openshift_cloudprovider_vsphere_datacenter }}"
|
||||
resource_pool: "{{ openshift_cloudprovider_vsphere_resource_pool }}"
|
||||
template: "{{ openshift_cloudprovider_vsphere_template }}"
|
||||
state: poweredon
|
||||
wait_for_ip_address: true
|
||||
disk:
|
||||
- size_gb: 60
|
||||
datastore: "{{ openshift_cloudprovider_vsphere_datastore }}"
|
||||
type: thin
|
||||
- size_gb: 40
|
||||
datastore: "{{ openshift_cloudprovider_vsphere_datastore }}"
|
||||
type: thin
|
||||
- size_gb: 40
|
||||
datastore: "{{ openshift_cloudprovider_vsphere_datastore }}"
|
||||
type: thin
|
||||
- size_gb: 300
|
||||
datastore: "{{ openshift_cloudprovider_vsphere_datastore }}"
|
||||
type: eagerZeroedThick
|
||||
folder: "{{ openshift_cloudprovider_vsphere_datacenter }}/vm/{{ openshift_cloudprovider_vsphere_folder }}"
|
||||
annotation: "{{ hostvars[item].openshift_node_labels }}"
|
||||
hardware:
|
||||
memory_mb: "8192"
|
||||
networks:
|
||||
- name: "{{ openshift_cloudprovider_vsphere_vm_network }}"
|
||||
ip: "{{ hostvars[item].ipv4addr }}"
|
||||
netmask: "{{ openshift_cloudprovider_vsphere_vm_netmask }}"
|
||||
gateway: "{{ openshift_cloudprovider_vsphere_vm_gateway }}"
|
||||
customization:
|
||||
domain: "{{ default_subdomain }}"
|
||||
dns_servers:
|
||||
- "{{ openshift_cloudprovider_vsphere_vm_dns }}"
|
||||
dns_suffix: "{{ default_subdomain }}"
|
||||
hostname: "{{ hostvars[item].vm_name }}"
|
||||
with_items: "{{ groups['apps'] }}"
|
||||
@@ -0,0 +1,7 @@
|
||||
---
|
||||
docker_dev: "/dev/sdb"
|
||||
docker_vg: "docker-vol"
|
||||
docker_data_size: "95%VG"
|
||||
docker_dm_basesize: "3G"
|
||||
container_root_lv_name: "dockerlv"
|
||||
container_root_lv_mount_path: "/var/lib/docker"
|
||||
@@ -0,0 +1,34 @@
|
||||
---
|
||||
- name: remove any existing docker-storage config file
|
||||
file:
|
||||
path: /etc/sysconfig/docker-storage
|
||||
state: absent
|
||||
|
||||
- block:
|
||||
- name: create the docker-storage config file
|
||||
template:
|
||||
src: "{{ role_path }}/templates/docker-storage-setup-overlayfs.j2"
|
||||
dest: /etc/sysconfig/docker-storage-setup
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
|
||||
when:
|
||||
- ansible_distribution_version | version_compare('7.4', '>=')
|
||||
- ansible_distribution == "RedHat"
|
||||
|
||||
- block:
|
||||
- name: create the docker-storage-setup config file
|
||||
template:
|
||||
src: "{{ role_path }}/templates/docker-storage-setup-dm.j2"
|
||||
dest: /etc/sysconfig/docker-storage-setup
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
|
||||
when:
|
||||
- ansible_distribution_version | version_compare('7.4', '<')
|
||||
- ansible_distribution == "RedHat"
|
||||
|
||||
- name: start docker
|
||||
service: name=docker state=started enabled=true
|
||||
@@ -0,0 +1,4 @@
|
||||
DEVS="{{ docker_dev }}"
|
||||
VG="{{ docker_vg }}"
|
||||
DATA_SIZE="{{ docker_data_size }}"
|
||||
EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize={{ docker_dm_basesize }}"
|
||||
@@ -0,0 +1,7 @@
|
||||
DEVS="{{ docker_dev }}"
|
||||
VG="{{ docker_vg }}"
|
||||
DATA_SIZE="{{ docker_data_size }}"
|
||||
STORAGE_DRIVER=overlay2
|
||||
CONTAINER_ROOT_LV_NAME="{{ container_root_lv_name }}"
|
||||
CONTAINER_ROOT_LV_MOUNT_PATH="{{ container_root_lv_mount_path }}"
|
||||
CONTAINER_ROOT_LV_SIZE=100%FREE
|
||||
@@ -0,0 +1,24 @@
|
||||
---
|
||||
- name: Create openshift volume group
|
||||
lvg: vg=etcd_vg pvs=/dev/sdd
|
||||
|
||||
- name: Create lvm volumes
|
||||
lvol: vg=etcd_vg lv=etcd_lv size=95%FREE state=present shrink=no
|
||||
|
||||
- name: Create local partition on lvm lv
|
||||
filesystem:
|
||||
fstype: xfs
|
||||
dev: /dev/etcd_vg/etcd_lv
|
||||
|
||||
- name: Make mounts owned by nfsnobody
|
||||
file: path=/var/lib/etcd state=directory mode=0755
|
||||
|
||||
- name: Mount the partition
|
||||
mount:
|
||||
name: /var/lib/etcd
|
||||
src: /dev/etcd_vg/etcd_lv
|
||||
fstype: xfs
|
||||
state: present
|
||||
|
||||
- name: Remount new partition
|
||||
command: "mount -a"
|
||||
@@ -0,0 +1,24 @@
|
||||
---
|
||||
haproxy_socket: /var/lib/haproxy/stats
|
||||
haproxy_chroot: /var/lib/haproxy
|
||||
haproxy_user: haproxy
|
||||
haproxy_group: haproxy
|
||||
|
||||
# Frontend settings.
|
||||
haproxy_frontend_name: 'hafrontend'
|
||||
haproxy_frontend_bind_address: '*'
|
||||
haproxy_frontend_port: 80
|
||||
haproxy_frontend_mode: 'http'
|
||||
|
||||
# Backend settings.
|
||||
haproxy_backend_name: 'habackend'
|
||||
haproxy_backend_mode: 'http'
|
||||
haproxy_backend_balance_method: 'roundrobin'
|
||||
haproxy_backend_httpchk: 'HEAD / HTTP/1.1\r\nHost:localhost'
|
||||
|
||||
# List of backend servers.
|
||||
haproxy_backend_servers: []
|
||||
# - name: app1
|
||||
# address: 192.168.0.1:80
|
||||
# - name: app2
|
||||
# address: 192.168.0.2:80
|
||||
@@ -0,0 +1,3 @@
|
||||
---
|
||||
- name: restart haproxy
|
||||
service: name=haproxy state=restarted
|
||||
@@ -0,0 +1,12 @@
|
||||
---
|
||||
- stat: path=/etc/haproxy/haproxy.cfg
|
||||
register: haproxy_cfg
|
||||
|
||||
- name: Copy HAProxy configuration in place.
|
||||
template:
|
||||
src: haproxy.cfg.j2
|
||||
dest: /etc/haproxy/haproxy.cfg
|
||||
mode: 0644
|
||||
validate: haproxy -f %s -c -q
|
||||
notify: restart haproxy
|
||||
when: haproxy_cfg.stat.exists == True
|
||||
@@ -0,0 +1,66 @@
|
||||
global
|
||||
log 127.0.0.1 local2
|
||||
|
||||
chroot /var/lib/haproxy
|
||||
pidfile /var/run/haproxy.pid
|
||||
maxconn 4000
|
||||
user haproxy
|
||||
group haproxy
|
||||
daemon
|
||||
|
||||
# turn on stats unix socket
|
||||
stats socket /var/lib/haproxy/stats
|
||||
|
||||
defaults
|
||||
log global
|
||||
option httplog
|
||||
option dontlognull
|
||||
option http-server-close
|
||||
# option forwardfor except 127.0.0.0/8
|
||||
option redispatch
|
||||
retries 3
|
||||
timeout http-request 10s
|
||||
timeout queue 1m
|
||||
timeout connect 10s
|
||||
timeout client 1m
|
||||
timeout server 1m
|
||||
timeout http-keep-alive 10s
|
||||
timeout check 10s
|
||||
maxconn 3000
|
||||
listen stats :9000
|
||||
stats enable
|
||||
stats realm Haproxy\ Statistics
|
||||
stats uri /haproxy_stats
|
||||
stats auth admin:password
|
||||
stats refresh 30
|
||||
mode http
|
||||
|
||||
frontend main *:80
|
||||
default_backend router80
|
||||
|
||||
backend router80
|
||||
balance source
|
||||
mode tcp
|
||||
{% for host in groups['infras'] %}
|
||||
server {{ hostvars[host]['ansible_fqdn'] }} {{ hostvars[host]['ansible_fqdn'] }}:80 check
|
||||
{% endfor %}
|
||||
|
||||
frontend main *:443
|
||||
default_backend router443
|
||||
|
||||
backend router443
|
||||
balance source
|
||||
mode tcp
|
||||
{% for host in groups['infras'] %}
|
||||
server {{ hostvars[host]['ansible_fqdn'] }} {{ hostvars[host]['ansible_fqdn'] }}:443 check
|
||||
{% endfor %}
|
||||
|
||||
frontend main *:8443
|
||||
default_backend mgmt8443
|
||||
|
||||
backend mgmt8443
|
||||
balance source
|
||||
mode tcp
|
||||
{% for host in groups['masters'] %}
|
||||
server {{ hostvars[host]['ansible_fqdn'] }} {{ hostvars[host]['ansible_fqdn'] }}:8443 check
|
||||
{% endfor %}
|
||||
@@ -0,0 +1,24 @@
|
||||
---
|
||||
haproxy_socket: /var/lib/haproxy/stats
|
||||
haproxy_chroot: /var/lib/haproxy
|
||||
haproxy_user: haproxy
|
||||
haproxy_group: haproxy
|
||||
|
||||
# Frontend settings.
|
||||
haproxy_frontend_name: 'hafrontend'
|
||||
haproxy_frontend_bind_address: '*'
|
||||
haproxy_frontend_port: 80
|
||||
haproxy_frontend_mode: 'http'
|
||||
|
||||
# Backend settings.
|
||||
haproxy_backend_name: 'habackend'
|
||||
haproxy_backend_mode: 'http'
|
||||
haproxy_backend_balance_method: 'roundrobin'
|
||||
haproxy_backend_httpchk: 'HEAD / HTTP/1.1\r\nHost:localhost'
|
||||
|
||||
# List of backend servers.
|
||||
haproxy_backend_servers: []
|
||||
# - name: app1
|
||||
# address: 192.168.0.1:80
|
||||
# - name: app2
|
||||
# address: 192.168.0.2:80
|
||||
@@ -0,0 +1,3 @@
|
||||
---
|
||||
- name: restart haproxy
|
||||
service: name=haproxy state=restarted
|
||||
@@ -0,0 +1,25 @@
|
||||
---
|
||||
- name: Ensure HAProxy is installed.
|
||||
yum: name=haproxy state=installed
|
||||
|
||||
- name: Get HAProxy version.
|
||||
command: haproxy -v
|
||||
register: haproxy_version_result
|
||||
changed_when: false
|
||||
always_run: yes
|
||||
|
||||
- name: open firewall for Openshift services
|
||||
command: iptables -I INPUT -p tcp --dport {{item}} -j ACCEPT
|
||||
with_items:
|
||||
- 8443
|
||||
- 443
|
||||
- 80
|
||||
|
||||
- name: Save the iptables rules
|
||||
command: iptables-save
|
||||
|
||||
- name: Ensure Firewalld is disabled
|
||||
service: name=firewalld state=stopped enabled=no
|
||||
|
||||
- name: Ensure HAProxy is started and enabled on boot.
|
||||
service: name=haproxy state=started enabled=yes
|
||||
@@ -0,0 +1,66 @@
|
||||
global
|
||||
log 127.0.0.1 local2
|
||||
|
||||
chroot /var/lib/haproxy
|
||||
pidfile /var/run/haproxy.pid
|
||||
maxconn 4000
|
||||
user haproxy
|
||||
group haproxy
|
||||
daemon
|
||||
|
||||
# turn on stats unix socket
|
||||
stats socket /var/lib/haproxy/stats
|
||||
|
||||
defaults
|
||||
log global
|
||||
option httplog
|
||||
option dontlognull
|
||||
option http-server-close
|
||||
# option forwardfor except 127.0.0.0/8
|
||||
option redispatch
|
||||
retries 3
|
||||
timeout http-request 10s
|
||||
timeout queue 1m
|
||||
timeout connect 10s
|
||||
timeout client 1m
|
||||
timeout server 1m
|
||||
timeout http-keep-alive 10s
|
||||
timeout check 10s
|
||||
maxconn 3000
|
||||
listen stats :9000
|
||||
stats enable
|
||||
stats realm Haproxy\ Statistics
|
||||
stats uri /haproxy_stats
|
||||
stats auth admin:password
|
||||
stats refresh 30
|
||||
mode http
|
||||
|
||||
frontend main *:80
|
||||
default_backend router80
|
||||
|
||||
backend router80
|
||||
balance source
|
||||
mode tcp
|
||||
{% for host in groups['infras'] %}
|
||||
server {{ hostvars[host]['ansible_fqdn'] }} {{ hostvars[host]['ansible_fqdn'] }}:80 check
|
||||
{% endfor %}
|
||||
|
||||
frontend main *:443
|
||||
default_backend router443
|
||||
|
||||
backend router443
|
||||
balance source
|
||||
mode tcp
|
||||
{% for host in groups['infras'] %}
|
||||
server {{ hostvars[host]['ansible_fqdn'] }} {{ hostvars[host]['ansible_fqdn'] }}:443 check
|
||||
{% endfor %}
|
||||
|
||||
frontend main *:8443
|
||||
default_backend mgmt8443
|
||||
|
||||
backend mgmt8443
|
||||
balance source
|
||||
mode tcp
|
||||
{% for host in groups['masters'] %}
|
||||
server {{ hostvars[host]['ansible_fqdn'] }} {{ hostvars[host]['ansible_fqdn'] }}:8443 check
|
||||
{% endfor %}
|
||||
@@ -0,0 +1,60 @@
|
||||
---
|
||||
- stat: path=~/.bashrc
|
||||
register: bashrc_file
|
||||
|
||||
- name: Add heketi_cli_server ENV var
|
||||
lineinfile:
|
||||
dest: ~/.bashrc
|
||||
line: "export HEKETI_CLI_SERVER=http://{{ ansible_default_ipv4.address }}:8080"
|
||||
|
||||
- name: Add heketi_cli_user ENV var
|
||||
lineinfile:
|
||||
dest: ~/.bashrc
|
||||
line: "export HEKETI_CLI_USER=admin"
|
||||
|
||||
- name: Add heketi_cli_password ENV var
|
||||
lineinfile:
|
||||
dest: ~/.bashrc
|
||||
line: "export HEKETI_CLI_KEY={{ admin_key }}"
|
||||
|
||||
- name: source bashrc
|
||||
action: shell source ~/.bashrc
|
||||
|
||||
- name: Copy topology config file to single_crs
|
||||
template:
|
||||
src: ../topology.json
|
||||
dest: ~/topology.json
|
||||
|
||||
- name: load topology file
|
||||
command: "heketi-cli topology load --json=topology.json"
|
||||
register: load_top
|
||||
|
||||
- name: store heketi secret
|
||||
shell: "echo -n {{ admin_key }} | base64"
|
||||
register: stored_secret
|
||||
|
||||
- name: assign secret to a variable for use in template
|
||||
set_fact:
|
||||
heketi_secret: "{{ stored_secret.stdout }}"
|
||||
|
||||
- name: Copy heketi secret config file
|
||||
template:
|
||||
src: heketi-secret.yaml.j2
|
||||
dest: ~/heketi-secret.yaml
|
||||
|
||||
- name: Fetch heketi secret to copy to master
|
||||
fetch:
|
||||
src: ~/heketi-secret.yaml
|
||||
dest: ~/heketi-secret.yaml
|
||||
flat: yes
|
||||
|
||||
- name: Copy storage-crs config file
|
||||
template:
|
||||
src: storage-crs.json.j2
|
||||
dest: ~/storage-crs.json
|
||||
|
||||
- name: Fetch storage-crs to copy to master
|
||||
fetch:
|
||||
src: ~/storage-crs.json
|
||||
dest: ~/storage-crs.json
|
||||
flat: yes
|
||||
@@ -0,0 +1,9 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: heketi-secret
|
||||
namespace: default
|
||||
data:
|
||||
key: "{{ heketi_secret }}"
|
||||
type: kubernetes.io/glusterfs
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
apiVersion: storage.k8s.io/v1beta1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: crs-gluster
|
||||
provisioner: kubernetes.io/glusterfs
|
||||
parameters:
|
||||
resturl: "http://{{ansible_default_ipv4.address }}:8080"
|
||||
restauthenabled: "true"
|
||||
restuser: "admin"
|
||||
secretNamespace: "default"
|
||||
secretName: "heketi-secret"
|
||||
@@ -0,0 +1,3 @@
|
||||
---
|
||||
- name: restart heketi
|
||||
service: name=heketi state=restarted
|
||||
@@ -0,0 +1,41 @@
|
||||
---
|
||||
- name: Install heketi RPMS
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: latest
|
||||
with_items:
|
||||
- heketi
|
||||
- heketi-client
|
||||
|
||||
- name: Copy ssh private key in place
|
||||
template:
|
||||
src: ~/.ssh/id_rsa
|
||||
dest: /etc/heketi/heketi_key
|
||||
owner: heketi
|
||||
group: heketi
|
||||
|
||||
- name: Copy ssh private key in place
|
||||
template:
|
||||
src: ~/.ssh/id_rsa.pub
|
||||
dest: /etc/heketi/heketi_key.pub
|
||||
owner: heketi
|
||||
group: heketi
|
||||
|
||||
- stat: path=/etc/heketi/heketi.json
|
||||
register: heketi_cfg
|
||||
|
||||
- name: Copy heketi configuration in place.
|
||||
template:
|
||||
src: heketi.json.j2
|
||||
dest: /etc/heketi/heketi.json
|
||||
notify: restart heketi
|
||||
when: heketi_cfg.stat.exists == True
|
||||
|
||||
- name: restart heketi
|
||||
service: name=heketi state=restarted enabled=yes
|
||||
|
||||
- name: Verify heketi is started and configured properly
|
||||
uri:
|
||||
url: http://{{ ansible_default_ipv4.address }}:8080/hello
|
||||
status_code: 200
|
||||
method: GET
|
||||
@@ -0,0 +1,62 @@
|
||||
{
|
||||
"_port_comment": "Heketi Server Port Number",
|
||||
"port": "8080",
|
||||
|
||||
"_use_auth": "Enable JWT authorization. Please enable for deployment",
|
||||
"use_auth": false,
|
||||
|
||||
"_jwt": "Private keys for access",
|
||||
"jwt": {
|
||||
"_admin": "Admin has access to all APIs",
|
||||
"admin": {
|
||||
"key": "{{ admin_key }}"
|
||||
},
|
||||
"_user": "User only has access to /volumes endpoint",
|
||||
"user": {
|
||||
"key": "{{ user_key }}"
|
||||
}
|
||||
},
|
||||
|
||||
"_glusterfs_comment": "GlusterFS Configuration",
|
||||
"glusterfs": {
|
||||
"_executor_comment": [
|
||||
"Execute plugin. Possible choices: mock, ssh",
|
||||
"mock: This setting is used for testing and development.",
|
||||
" It will not send commands to any node.",
|
||||
"ssh: This setting will notify Heketi to ssh to the nodes.",
|
||||
" It will need the values in sshexec to be configured.",
|
||||
"kubernetes: Communicate with GlusterFS containers over",
|
||||
" Kubernetes exec api."
|
||||
],
|
||||
"executor": "ssh",
|
||||
|
||||
"_sshexec_comment": "SSH username and private key file information",
|
||||
"sshexec": {
|
||||
"keyfile": "/etc/heketi/heketi_key",
|
||||
"user": "root",
|
||||
"port": "22",
|
||||
"fstab": "/etc/fstab"
|
||||
},
|
||||
|
||||
"_kubeexec_comment": "Kubernetes configuration",
|
||||
"kubeexec": {
|
||||
"host" :"https://kubernetes.host:8443",
|
||||
"cert" : "/path/to/crt.file",
|
||||
"insecure": false,
|
||||
"user": "kubernetes username",
|
||||
"password": "password for kubernetes user",
|
||||
"namespace": "OpenShift project or Kubernetes namespace",
|
||||
"fstab": "Optional: Specify fstab file on node. Default is /etc/fstab"
|
||||
},
|
||||
|
||||
"_db_comment": "Database file name",
|
||||
"db": "/var/lib/heketi/heketi.db",
|
||||
|
||||
"_loglevel_comment": [
|
||||
"Set log level. Choices are:",
|
||||
" none, critical, error, warning, info, debug",
|
||||
"Default is warning"
|
||||
],
|
||||
"loglevel" : "debug"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,15 @@
|
||||
---
|
||||
- name: Switch to default project
|
||||
command: oc project default
|
||||
|
||||
- name: Check to see if heketi secret is already created
|
||||
command: "oc get secrets"
|
||||
register: oc_secrets
|
||||
|
||||
- name: Check to see if storage class is already created
|
||||
command: "oc get storageclass"
|
||||
register: storage_class
|
||||
|
||||
- name: Remove storage class from OCP
|
||||
command: "oc delete storageclass crs-gluster"
|
||||
when: "'crs-gluster' in storage_class.stdout"
|
||||
@@ -0,0 +1,9 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: heketi-secret
|
||||
namespace: default
|
||||
data:
|
||||
key: "{{ heketi_secret }}"
|
||||
type: kubernetes.io/glusterfs
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
apiVersion: storage.k8s.io/v1beta1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: crs-gluster
|
||||
provisioner: kubernetes.io/glusterfs
|
||||
parameters:
|
||||
resturl: "http://{{ansible_default_ipv4.address }}:8080"
|
||||
restauthenabled: "true"
|
||||
restuser: "admin"
|
||||
secretNamespace: "default"
|
||||
secretName: "heketi-secret"
|
||||
@@ -0,0 +1,29 @@
|
||||
---
|
||||
- name: Copy heketi secret config file to master
|
||||
copy:
|
||||
src: ~/heketi-secret.yaml
|
||||
dest: ~/heketi-secret.yaml
|
||||
|
||||
- name: Copy heketi secret config file to master
|
||||
copy:
|
||||
src: ~/storage-crs.json
|
||||
dest: ~/storage-crs.json
|
||||
|
||||
- name: Switch to default project
|
||||
command: oc project default
|
||||
|
||||
- name: Check to see if heketi secret is already created
|
||||
command: "oc get secrets"
|
||||
register: oc_secrets
|
||||
|
||||
- name: Check to see if storage class is already created
|
||||
command: "oc get storageclass"
|
||||
register: storage_class
|
||||
|
||||
- name: Add heketi secret
|
||||
command: "oc create -f ~/heketi-secret.yaml"
|
||||
when: "'heketi-secret' not in oc_secrets.stdout"
|
||||
|
||||
- name: Create storage class
|
||||
command: "oc create -f ~/storage-crs.json"
|
||||
when: "'crs-gluster' not in storage_class.stdout"
|
||||
@@ -0,0 +1,9 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: heketi-secret
|
||||
namespace: default
|
||||
data:
|
||||
key: "{{ heketi_secret }}"
|
||||
type: kubernetes.io/glusterfs
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
apiVersion: storage.k8s.io/v1beta1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: crs-gluster
|
||||
provisioner: kubernetes.io/glusterfs
|
||||
parameters:
|
||||
resturl: "http://{{ansible_default_ipv4.address }}:8080"
|
||||
restauthenabled: "true"
|
||||
restuser: "admin"
|
||||
secretNamespace: "default"
|
||||
secretName: "heketi-secret"
|
||||
@@ -0,0 +1,2 @@
|
||||
---
|
||||
keepalived_priority_start: 100
|
||||
@@ -0,0 +1,3 @@
|
||||
---
|
||||
- name: restart keepalived
|
||||
service: name=keepalived state=restarted
|
||||
@@ -0,0 +1,3 @@
|
||||
---
|
||||
- name: restart keepalived
|
||||
service: name=keepalived state=restarted
|
||||
@@ -0,0 +1,48 @@
|
||||
---
|
||||
- block:
|
||||
- name: Install packages
|
||||
yum: name={{ item }} state=present
|
||||
with_items:
|
||||
- keepalived
|
||||
- psmisc
|
||||
|
||||
- name: Determine interface name on single node
|
||||
set_fact:
|
||||
external_interface: "{{ ansible_default_ipv4.interface }}"
|
||||
|
||||
- name: Allow connections between haproxy nodes
|
||||
template:
|
||||
src: firewall.sh.j2
|
||||
dest: /tmp/firewall.sh
|
||||
mode: "u=rwx,g=,o="
|
||||
|
||||
- command: /tmp/firewall.sh
|
||||
|
||||
- file:
|
||||
path: /tmp/firewall.sh
|
||||
state: absent
|
||||
|
||||
- name: Generate OCP public IP for play
|
||||
set_fact:
|
||||
openshift_master_cluster_public_ip: "{{ lb_ha_ip }}"
|
||||
|
||||
- name: Generate random external password
|
||||
shell: uuidgen
|
||||
run_once: true
|
||||
register: keepalived_pass
|
||||
|
||||
- name: Start keepalived
|
||||
service:
|
||||
name: keepalived
|
||||
state: started
|
||||
enabled: yes
|
||||
|
||||
- name: Configure keepalived
|
||||
template:
|
||||
src: keepalived.conf.j2
|
||||
dest: /etc/keepalived/keepalived.conf
|
||||
notify: restart keepalived
|
||||
|
||||
when:
|
||||
- lb_ha_ip is defined
|
||||
- lb_ha_ip|trim != ''
|
||||
@@ -0,0 +1,6 @@
|
||||
#!/bin/bash
|
||||
{% for host in groups['haproxy_group'] %}
|
||||
iptables -A INPUT -s {{ hostvars[host].ansible_default_ipv4 }} -j ACCEPT
|
||||
{% endfor %}
|
||||
|
||||
iptables-save > /etc/sysconfig/iptables
|
||||
@@ -0,0 +1,31 @@
|
||||
global_defs {
|
||||
router_id LVS_DEVEL
|
||||
}
|
||||
|
||||
vrrp_script haproxy_check {
|
||||
script "killall -0 haproxy"
|
||||
interval 2
|
||||
weight 2
|
||||
}
|
||||
|
||||
vrrp_instance OCP_EXT {
|
||||
interface {{ external_interface }}
|
||||
|
||||
virtual_router_id 51
|
||||
|
||||
priority {% if groups.haproxy_group.index(inventory_hostname) == 0 %} {{ keepalived_priority_start }}{% else %} {{ keepalived_priority_start - 2 }}{% endif %}
|
||||
|
||||
state {% if groups.haproxy_group.index(inventory_hostname) == 0 %} {{ "MASTER" }}{% else %} {{ "BACKUP" }}{% endif %}
|
||||
|
||||
virtual_ipaddress {
|
||||
{{ openshift_master_cluster_public_ip }} dev {{ external_interface }}
|
||||
|
||||
}
|
||||
track_script {
|
||||
haproxy_check
|
||||
}
|
||||
authentication {
|
||||
auth_type PASS
|
||||
auth_pass {{ keepalived_pass.stdout }}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,2 @@
|
||||
---
|
||||
keepalived_priority_start: 100
|
||||
@@ -0,0 +1,3 @@
|
||||
---
|
||||
- name: restart keepalived
|
||||
service: name=keepalived state=restarted
|
||||
@@ -0,0 +1,3 @@
|
||||
---
|
||||
- name: restart keepalived
|
||||
service: name=keepalived state=restarted
|
||||
@@ -0,0 +1,48 @@
|
||||
---
|
||||
- block:
|
||||
- name: Install packages
|
||||
yum: name={{ item }} state=present
|
||||
with_items:
|
||||
- keepalived
|
||||
- psmisc
|
||||
|
||||
- name: Determine interface name on single node
|
||||
set_fact:
|
||||
external_interface: "{{ ansible_default_ipv4.interface }}"
|
||||
|
||||
- name: Allow connections between haproxy nodes
|
||||
template:
|
||||
src: firewall.sh.j2
|
||||
dest: /tmp/firewall.sh
|
||||
mode: "u=rwx,g=,o="
|
||||
|
||||
- command: /tmp/firewall.sh
|
||||
|
||||
- file:
|
||||
path: /tmp/firewall.sh
|
||||
state: absent
|
||||
|
||||
- name: Generate OCP public IP for play
|
||||
set_fact:
|
||||
openshift_master_cluster_public_ip: "{{ lb_ha_ip }}"
|
||||
|
||||
- name: Generate random external password
|
||||
shell: uuidgen
|
||||
run_once: true
|
||||
register: keepalived_pass
|
||||
|
||||
- name: Start keepalived
|
||||
service:
|
||||
name: keepalived
|
||||
state: started
|
||||
enabled: yes
|
||||
|
||||
- name: Configure keepalived
|
||||
template:
|
||||
src: keepalived.conf.j2
|
||||
dest: /etc/keepalived/keepalived.conf
|
||||
notify: restart keepalived
|
||||
|
||||
when:
|
||||
- lb_ha_ip is defined
|
||||
- lb_ha_ip|trim != ''
|
||||
@@ -0,0 +1,6 @@
|
||||
#!/bin/bash
|
||||
{% for host in groups['haproxy'] %}
|
||||
iptables -A INPUT -s {{ hostvars[host].ansible_default_ipv4 }} -j ACCEPT
|
||||
{% endfor %}
|
||||
|
||||
iptables-save > /etc/sysconfig/iptables
|
||||
@@ -0,0 +1,31 @@
|
||||
global_defs {
|
||||
router_id LVS_DEVEL
|
||||
}
|
||||
|
||||
vrrp_script haproxy_check {
|
||||
script "killall -0 haproxy"
|
||||
interval 2
|
||||
weight 2
|
||||
}
|
||||
|
||||
vrrp_instance OCP_EXT {
|
||||
interface {{ external_interface }}
|
||||
|
||||
virtual_router_id 51
|
||||
|
||||
priority {% if groups.haproxy.index(inventory_hostname) == 0 %} {{ keepalived_priority_start }}{% else %} {{ keepalived_priority_start - 2 }}{% endif %}
|
||||
|
||||
state {% if groups.haproxy.index(inventory_hostname) == 0 %} {{ "MASTER" }}{% else %} {{ "BACKUP" }}{% endif %}
|
||||
|
||||
virtual_ipaddress {
|
||||
{{ openshift_master_cluster_public_ip }} dev {{ external_interface }}
|
||||
|
||||
}
|
||||
track_script {
|
||||
haproxy_check
|
||||
}
|
||||
authentication {
|
||||
auth_type PASS
|
||||
auth_pass {{ keepalived_pass.stdout }}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,6 @@
|
||||
---
|
||||
- name: restart chronyd
|
||||
service: name=chronyd state=restarted
|
||||
|
||||
- name: restart networking
|
||||
service: name=networking state=restarted
|
||||
@@ -0,0 +1,66 @@
|
||||
---
|
||||
- name: Determine if Atomic
|
||||
stat: path=/run/ostree-booted
|
||||
register: s
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
|
||||
- name: Init the is_atomic fact
|
||||
set_fact:
|
||||
is_atomic: false
|
||||
|
||||
- name: Set the is_atomic fact
|
||||
set_fact:
|
||||
is_atomic: true
|
||||
when: s.stat.exists
|
||||
|
||||
- block:
|
||||
- name: be sure all pre-req packages are installed
|
||||
yum: name={{item}} state=installed
|
||||
with_items:
|
||||
- open-vm-tools
|
||||
- PyYAML
|
||||
- perl
|
||||
- net-tools
|
||||
- python-six
|
||||
- iptables
|
||||
- iptables-services
|
||||
- docker
|
||||
|
||||
- name: be sure katello-agent is installed
|
||||
yum: name=katello-agent state=installed
|
||||
when: rhsm_activation_key is defined and rhsm_activation_key
|
||||
|
||||
- name: be sure openvmtools is running and enabled
|
||||
service: name=vmtoolsd state=started enabled=yes
|
||||
|
||||
when:
|
||||
- not is_atomic | bool
|
||||
- ansible_distribution == "RedHat"
|
||||
|
||||
- name: set link to localtime
|
||||
command: timedatectl set-timezone {{timezone}}
|
||||
|
||||
- block:
|
||||
- name: (Atomic) Remove extra docker lv from root vg
|
||||
lvol:
|
||||
lv: docker-pool
|
||||
vg: atomicos
|
||||
state: absent
|
||||
force: yes
|
||||
- name: (Atomic) Grow root lv to fill vg
|
||||
lvol:
|
||||
lv: root
|
||||
vg: atomicos
|
||||
size: +100%FREE
|
||||
- name: (Atomic) Grow root fs to match lv
|
||||
filesystem:
|
||||
dev: /dev/mapper/atomicos-root
|
||||
fstype: xfs
|
||||
resizefs: yes
|
||||
- name: (Atomic) Force Ansible to re-gather disk facts
|
||||
setup:
|
||||
filter: 'ansible_mounts'
|
||||
when:
|
||||
- is_atomic | bool
|
||||
- ansible_distribution == "RedHat"
|
||||
@@ -0,0 +1,3 @@
|
||||
---
|
||||
locale: en_US.UTF-8
|
||||
timezone: UTC
|
||||
Reference in New Issue
Block a user