diff --git a/upi/vsphere/main.tf b/upi/vsphere/main.tf index 920991067b..288b25ac2c 100644 --- a/upi/vsphere/main.tf +++ b/upi/vsphere/main.tf @@ -1,10 +1,25 @@ locals { + failure_domains = length(var.failure_domains) == 0 ? [{ + datacenter = var.vsphere_datacenter + cluster = var.vsphere_cluster + datastore = var.vsphere_datastore + network = var.vm_network + distributed_virtual_switch_uuid = "" + }] : var.failure_domains + + failure_domain_count = length(local.failure_domains) bootstrap_fqdns = ["bootstrap-0.${var.cluster_domain}"] lb_fqdns = ["lb-0.${var.cluster_domain}"] api_lb_fqdns = formatlist("%s.%s", ["api", "api-int", "*.apps"], var.cluster_domain) - control_plane_fqdns = [for idx in range(var.control_plane_count) : "control-plane-${idx}.${var.cluster_domain}"] - compute_fqdns = [for idx in range(var.compute_count) : "compute-${idx}.${var.cluster_domain}"] + control_plane_fqdns = [for idx in range(length(var.control_plane_ip_addresses)) : "control-plane-${idx}.${var.cluster_domain}"] + compute_fqdns = [for idx in range(length(var.compute_ip_addresses)) : "compute-${idx}.${var.cluster_domain}"] + datastores = [for idx in range(length(local.failure_domains)) : local.failure_domains[idx]["datastore"]] + datacenters = [for idx in range(length(local.failure_domains)) : local.failure_domains[idx]["datacenter"]] + datacenters_distinct = distinct([for idx in range(length(local.failure_domains)) : local.failure_domains[idx]["datacenter"]]) + clusters = [for idx in range(length(local.failure_domains)) : local.failure_domains[idx]["cluster"]] + networks = [for idx in range(length(local.failure_domains)) : local.failure_domains[idx]["cluster"]] + folders = [for idx in range(length(local.datacenters)) : "/${local.datacenters[idx]}/vm/${var.cluster_id}"] } provider "vsphere" { @@ -15,171 +30,174 @@ provider "vsphere" { } data "vsphere_datacenter" "dc" { - name = var.vsphere_datacenter + count = length(local.datacenters_distinct) + name = local.datacenters_distinct[count.index] } data "vsphere_compute_cluster" "compute_cluster" { - name = var.vsphere_cluster - datacenter_id = data.vsphere_datacenter.dc.id + count = length(local.failure_domains) + name = local.clusters[count.index] + datacenter_id = data.vsphere_datacenter.dc[index(data.vsphere_datacenter.dc.*.name, local.datacenters[count.index])].id } - +# data "vsphere_datastore" "datastore" { - name = var.vsphere_datastore - datacenter_id = data.vsphere_datacenter.dc.id + count = length(local.failure_domains) + name = local.datastores[count.index] + datacenter_id = data.vsphere_datacenter.dc[index(data.vsphere_datacenter.dc.*.name, local.datacenters[count.index])].id } +# data "vsphere_network" "network" { - name = var.vm_network - datacenter_id = data.vsphere_datacenter.dc.id + count = length(local.failure_domains) + name = local.failure_domains[count.index]["network"] + datacenter_id = data.vsphere_datacenter.dc[index(data.vsphere_datacenter.dc.*.name, local.failure_domains[count.index]["datacenter"])].id + distributed_virtual_switch_uuid = local.failure_domains[count.index]["distributed_virtual_switch_uuid"] } - + data "vsphere_virtual_machine" "template" { + count = length(local.datacenters_distinct) name = var.vm_template - datacenter_id = data.vsphere_datacenter.dc.id + datacenter_id = data.vsphere_datacenter.dc[index(data.vsphere_datacenter.dc.*.name, local.datacenters_distinct[count.index])].id } - + resource "vsphere_resource_pool" "resource_pool" { + count = length(data.vsphere_compute_cluster.compute_cluster) name = var.cluster_id - parent_resource_pool_id = data.vsphere_compute_cluster.compute_cluster.resource_pool_id + parent_resource_pool_id = data.vsphere_compute_cluster.compute_cluster[count.index].resource_pool_id } - + resource "vsphere_folder" "folder" { + count = length(local.datacenters_distinct) path = var.cluster_id - type = "vm" - datacenter_id = data.vsphere_datacenter.dc.id + type = "vm" + datacenter_id = data.vsphere_datacenter.dc[index(data.vsphere_datacenter.dc.*.name, local.datacenters_distinct[count.index])].id } - -// Request from phpIPAM a new IP address for the bootstrap node -module "ipam_bootstrap" { - source = "./ipam" - - // The hostname that will be added to phpIPAM when requesting an ip address - hostnames = local.bootstrap_fqdns - - // Hostname or IP address of the phpIPAM server - ipam = var.ipam - - // Access token for phpIPAM - ipam_token = var.ipam_token - - // Subnet where we will request an ip address from phpIPAM - machine_cidr = var.machine_cidr - - static_ip_addresses = var.bootstrap_ip_address == "" ? [] : [var.bootstrap_ip_address] - -} - -// Request from phpIPAM a new IP addresses for the control-plane nodes -module "ipam_control_plane" { - source = "./ipam" - hostnames = local.control_plane_fqdns - ipam = var.ipam - ipam_token = var.ipam_token - machine_cidr = var.machine_cidr - static_ip_addresses = var.control_plane_ip_addresses -} - -// Request from phpIPAM a new IP addresses for the compute nodes -module "ipam_compute" { - source = "./ipam" - hostnames = local.compute_fqdns - ipam = var.ipam - ipam_token = var.ipam_token - machine_cidr = var.machine_cidr - static_ip_addresses = var.compute_ip_addresses -} - -// Request from phpIPAM a new IP addresses for the load balancer nodes -module "ipam_lb" { - source = "./ipam" - hostnames = local.lb_fqdns - ipam = var.ipam - ipam_token = var.ipam_token - machine_cidr = var.machine_cidr - static_ip_addresses = var.lb_ip_address == "" ? [] : [var.lb_ip_address] -} - -module "lb" { - source = "./lb" - lb_ip_address = module.ipam_lb.ip_addresses[0] - - api_backend_addresses = flatten([ - module.ipam_bootstrap.ip_addresses[0], - module.ipam_control_plane.ip_addresses] - ) - - ingress_backend_addresses = module.ipam_compute.ip_addresses - ssh_public_key_path = var.ssh_public_key_path -} - -module "dns_cluster_domain" { - source = "./cluster_domain" - cluster_domain = var.cluster_domain - base_domain = var.base_domain -} - -module "lb_a_records" { - source = "./host_a_record" - zone_id = module.dns_cluster_domain.zone_id - records = zipmap( - local.api_lb_fqdns, - [for name in local.api_lb_fqdns : module.ipam_lb.ip_addresses[0]] - ) -} - -module "control_plane_a_records" { - source = "./host_a_record" - zone_id = module.dns_cluster_domain.zone_id - records = zipmap(local.control_plane_fqdns, module.ipam_control_plane.ip_addresses) -} - -module "compute_a_records" { - source = "./host_a_record" - zone_id = module.dns_cluster_domain.zone_id - records = zipmap(local.compute_fqdns, module.ipam_compute.ip_addresses) -} - -module "lb_vm" { - source = "./vm" - - ignition = module.lb.ignition - hostnames_ip_addresses = zipmap(local.lb_fqdns, module.ipam_lb.ip_addresses) - - resource_pool_id = vsphere_resource_pool.resource_pool.id - datastore_id = data.vsphere_datastore.datastore.id - datacenter_id = data.vsphere_datacenter.dc.id - network_id = data.vsphere_network.network.id - folder_id = vsphere_folder.folder.path - guest_id = data.vsphere_virtual_machine.template.guest_id - template_uuid = data.vsphere_virtual_machine.template.id - disk_thin_provisioned = data.vsphere_virtual_machine.template.disks[0].thin_provisioned - - cluster_domain = var.cluster_domain - machine_cidr = var.machine_cidr - - num_cpus = 2 - memory = 2096 - dns_addresses = var.vm_dns_addresses -} - + + // Request from phpIPAM a new IP address for the bootstrap node + module "ipam_bootstrap" { + source = "./ipam" + + // The hostname that will be added to phpIPAM when requesting an ip address + hostnames = local.bootstrap_fqdns + + // Hostname or IP address of the phpIPAM server + ipam = var.ipam + + // Access token for phpIPAM + ipam_token = var.ipam_token + + // Subnet where we will request an ip address from phpIPAM + machine_cidr = var.machine_cidr + + static_ip_addresses = var.bootstrap_ip_address == "" ? [] : [var.bootstrap_ip_address] + + } + + // Request from phpIPAM a new IP addresses for the control-plane nodes + module "ipam_control_plane" { + source = "./ipam" + hostnames = local.control_plane_fqdns + ipam = var.ipam + ipam_token = var.ipam_token + machine_cidr = var.machine_cidr + static_ip_addresses = var.control_plane_ip_addresses + } + + // Request from phpIPAM a new IP addresses for the compute nodes + module "ipam_compute" { + source = "./ipam" + hostnames = local.compute_fqdns + ipam = var.ipam + ipam_token = var.ipam_token + machine_cidr = var.machine_cidr + static_ip_addresses = var.compute_ip_addresses + } + + // Request from phpIPAM a new IP addresses for the load balancer nodes + module "ipam_lb" { + source = "./ipam" + hostnames = local.lb_fqdns + ipam = var.ipam + ipam_token = var.ipam_token + machine_cidr = var.machine_cidr + static_ip_addresses = var.lb_ip_address == "" ? [] : [var.lb_ip_address] + } + + module "lb" { + source = "./lb" + lb_ip_address = module.ipam_lb.ip_addresses[0] + + api_backend_addresses = flatten([ + module.ipam_bootstrap.ip_addresses[0], + module.ipam_control_plane.ip_addresses] + ) + + ingress_backend_addresses = module.ipam_compute.ip_addresses + ssh_public_key_path = var.ssh_public_key_path + } + + module "dns_cluster_domain" { + source = "./cluster_domain" + cluster_domain = var.cluster_domain + base_domain = var.base_domain + } + + module "lb_a_records" { + source = "./host_a_record" + zone_id = module.dns_cluster_domain.zone_id + records = zipmap( + local.api_lb_fqdns, + [for name in local.api_lb_fqdns : module.ipam_lb.ip_addresses[0]] + ) + } + + module "control_plane_a_records" { + source = "./host_a_record" + zone_id = module.dns_cluster_domain.zone_id + records = zipmap(local.control_plane_fqdns, module.ipam_control_plane.ip_addresses) + } + + module "compute_a_records" { + source = "./host_a_record" + zone_id = module.dns_cluster_domain.zone_id + records = zipmap(local.compute_fqdns, module.ipam_compute.ip_addresses) + } + + module "lb_vm" { + source = "./vm" + vmname = element(split(".", local.lb_fqdns[0]), 0) + ipaddress = module.ipam_lb.ip_addresses[0] + ignition = module.lb.ignition + resource_pool_id = vsphere_resource_pool.resource_pool[0].id + datastore_id = data.vsphere_datastore.datastore[0].id + datacenter_id = data.vsphere_datacenter.dc[0].id + network_id = data.vsphere_network.network[0].id + folder_id = vsphere_folder.folder[0].path + guest_id = data.vsphere_virtual_machine.template[0].guest_id + template_uuid = data.vsphere_virtual_machine.template[0].id + disk_thin_provisioned = data.vsphere_virtual_machine.template[0].disks[0].thin_provisioned + cluster_domain = var.cluster_domain + machine_cidr = var.machine_cidr + num_cpus = 2 + memory = 2096 + dns_addresses = var.vm_dns_addresses + } + module "bootstrap" { source = "./vm" ignition = file(var.bootstrap_ignition_path) - hostnames_ip_addresses = zipmap( - local.bootstrap_fqdns, - module.ipam_bootstrap.ip_addresses - ) - - resource_pool_id = vsphere_resource_pool.resource_pool.id - datastore_id = data.vsphere_datastore.datastore.id - datacenter_id = data.vsphere_datacenter.dc.id - network_id = data.vsphere_network.network.id - folder_id = vsphere_folder.folder.path - guest_id = data.vsphere_virtual_machine.template.guest_id - template_uuid = data.vsphere_virtual_machine.template.id - disk_thin_provisioned = data.vsphere_virtual_machine.template.disks[0].thin_provisioned + vmname = element(split(".", local.bootstrap_fqdns[0]), 0) + ipaddress = module.ipam_bootstrap.ip_addresses[0] + resource_pool_id = vsphere_resource_pool.resource_pool[0].id + datastore_id = data.vsphere_datastore.datastore[0].id + datacenter_id = data.vsphere_datacenter.dc[0].id + network_id = data.vsphere_network.network[0].id + folder_id = vsphere_folder.folder[0].path + guest_id = data.vsphere_virtual_machine.template[0].guest_id + template_uuid = data.vsphere_virtual_machine.template[0].id + disk_thin_provisioned = data.vsphere_virtual_machine.template[0].disks[0].thin_provisioned cluster_domain = var.cluster_domain machine_cidr = var.machine_cidr @@ -188,60 +206,50 @@ module "bootstrap" { memory = 8192 dns_addresses = var.vm_dns_addresses } - + module "control_plane_vm" { + count = length(module.control_plane_a_records.fqdns) source = "./vm" - // Using the output from control_plane_a_records // is on purpose. I want the A records to be created before // the virtual machines which gives additional time to // replicate the records. - hostnames_ip_addresses = zipmap( - module.control_plane_a_records.fqdns, - module.ipam_control_plane.ip_addresses - ) + + vmname = element(split(".", module.control_plane_a_records.fqdns[count.index]), 0) + ipaddress = module.ipam_control_plane.ip_addresses[count.index] ignition = file(var.control_plane_ignition_path) - - resource_pool_id = vsphere_resource_pool.resource_pool.id - datastore_id = data.vsphere_datastore.datastore.id - datacenter_id = data.vsphere_datacenter.dc.id - network_id = data.vsphere_network.network.id - folder_id = vsphere_folder.folder.path - guest_id = data.vsphere_virtual_machine.template.guest_id - template_uuid = data.vsphere_virtual_machine.template.id - disk_thin_provisioned = data.vsphere_virtual_machine.template.disks[0].thin_provisioned - + resource_pool_id = vsphere_resource_pool.resource_pool[count.index % local.failure_domain_count].id + datastore_id = data.vsphere_datastore.datastore[count.index % local.failure_domain_count].id + datacenter_id = data.vsphere_datacenter.dc[index(data.vsphere_datacenter.dc.*.name, local.failure_domains[count.index % local.failure_domain_count]["datacenter"])].id + network_id = data.vsphere_network.network[count.index % local.failure_domain_count].id + folder_id = vsphere_folder.folder[index(data.vsphere_datacenter.dc.*.name, local.failure_domains[count.index % local.failure_domain_count]["datacenter"])].path + guest_id = data.vsphere_virtual_machine.template[index(data.vsphere_datacenter.dc.*.name, local.failure_domains[count.index % local.failure_domain_count]["datacenter"])].guest_id + template_uuid = data.vsphere_virtual_machine.template[index(data.vsphere_datacenter.dc.*.name, local.failure_domains[count.index % local.failure_domain_count ]["datacenter"])].id + disk_thin_provisioned = data.vsphere_virtual_machine.template[index(data.vsphere_datacenter.dc.*.name, local.failure_domains[count.index % local.failure_domain_count]["datacenter"])].disks[0].thin_provisioned cluster_domain = var.cluster_domain machine_cidr = var.machine_cidr - num_cpus = var.control_plane_num_cpus memory = var.control_plane_memory dns_addresses = var.vm_dns_addresses } - module "compute_vm" { + count = length(module.compute_a_records.fqdns) source = "./vm" - - hostnames_ip_addresses = zipmap( - module.compute_a_records.fqdns, - module.ipam_compute.ip_addresses - ) - ignition = file(var.compute_ignition_path) + vmname = element(split(".", module.compute_a_records.fqdns[count.index]), 0) + ipaddress = module.ipam_compute.ip_addresses[count.index] - resource_pool_id = vsphere_resource_pool.resource_pool.id - datastore_id = data.vsphere_datastore.datastore.id - datacenter_id = data.vsphere_datacenter.dc.id - network_id = data.vsphere_network.network.id - folder_id = vsphere_folder.folder.path - guest_id = data.vsphere_virtual_machine.template.guest_id - template_uuid = data.vsphere_virtual_machine.template.id - disk_thin_provisioned = data.vsphere_virtual_machine.template.disks[0].thin_provisioned - + resource_pool_id = vsphere_resource_pool.resource_pool[count.index % local.failure_domain_count].id + datastore_id = data.vsphere_datastore.datastore[count.index % local.failure_domain_count].id + datacenter_id = data.vsphere_datacenter.dc[index(data.vsphere_datacenter.dc.*.name, local.failure_domains[count.index % local.failure_domain_count]["datacenter"])].id + network_id = data.vsphere_network.network[count.index % local.failure_domain_count].id + folder_id = vsphere_folder.folder[index(data.vsphere_datacenter.dc.*.name, local.failure_domains[count.index % local.failure_domain_count]["datacenter"])].path + guest_id = data.vsphere_virtual_machine.template[index(data.vsphere_datacenter.dc.*.name, local.failure_domains[count.index % local.failure_domain_count]["datacenter"])].guest_id + template_uuid = data.vsphere_virtual_machine.template[index(data.vsphere_datacenter.dc.*.name, local.failure_domains[count.index % local.failure_domain_count]["datacenter"])].id + disk_thin_provisioned = data.vsphere_virtual_machine.template[index(data.vsphere_datacenter.dc.*.name, local.failure_domains[count.index % local.failure_domain_count]["datacenter"])].disks[0].thin_provisioned cluster_domain = var.cluster_domain machine_cidr = var.machine_cidr - num_cpus = var.compute_num_cpus memory = var.compute_memory dns_addresses = var.vm_dns_addresses diff --git a/upi/vsphere/terraform.tfvars.example b/upi/vsphere/terraform.tfvars.example index 962581dcc0..78452fef23 100644 --- a/upi/vsphere/terraform.tfvars.example +++ b/upi/vsphere/terraform.tfvars.example @@ -16,15 +16,6 @@ vsphere_user = "YOUR_USER" // Password of the user on the vSphere server. vsphere_password = "YOUR_PASSWORD" -// Name of the vSphere cluster. The dev cluster is "devel". -vsphere_cluster = "devel" - -// Name of the vSphere data center. The dev cluster is "dc1". -vsphere_datacenter = "dc1" - -// Name of the vSphere data store to use for the VMs. The dev cluster uses "nvme-ds1". -vsphere_datastore = "nvme-ds1" - // Name of the VM template to clone to create VMs for the cluster. The dev cluster has a template named "rhcos-latest". vm_template = "rhcos-latest" @@ -68,3 +59,33 @@ ipam_token = "TOKEN_FOR_THE_IPAM_SERVER" // The IP addresses to assign to the compute VMs. The length of this list must // match the value of compute_count. //compute_ips = ["10.0.0.30", "10.0.0.31", "10.0.0.32"] + +// A list of maps where each map defines a specific failure domain. Atleast 1 failure domain must be +// specified. When multiple failure domains are specified, control plane and compute nodes are distributed +// among the defined failure domains. +failure_domains = [ + { + // Name of the vSphere data center. + datacenter = "dc1" + // Name of the vSphere cluster. + cluster = "devel" + // Name of the vSphere data store to use for the VMs. + datastore = "nvme-ds1" + // Name of the vSphere network to use for the VMs. + network = "ci-segment-151" + // UUID of the distrubted switch which is hosting the portgroup. This can derived from the MOB. + distributed_virtual_switch_uuid = "50 05 1b 07 19 2b 0b 0a-eb 90 98 54 1d c5 b5 19" + }, + { + // Name of the vSphere data center. + datacenter = "dc2" + // Name of the vSphere cluster. + cluster = "devel2" + // Name of the vSphere data store to use for the VMs. + datastore = "nvme-ds2" + // Name of the vSphere network to use for the VMs. + network = "ci-segment-151" + // UUID of the distrubted switch which is hosting the portgroup. This can derived from the MOB. + distributed_virtual_switch_uuid = "50 05 1b 07 19 2b 0b 0a-eb 90 98 54 1d c5 b5 19" + } +] diff --git a/upi/vsphere/variables.tf b/upi/vsphere/variables.tf index 701f78de4c..1775dc268e 100644 --- a/upi/vsphere/variables.tf +++ b/upi/vsphere/variables.tf @@ -45,18 +45,17 @@ variable "vsphere_datastore" { type = string description = "This is the name of the vSphere data store." } - -variable "vm_template" { - type = string - description = "This is the name of the VM template to clone." -} - variable "vm_network" { type = string description = "This is the name of the publicly accessible network for cluster ingress and access." default = "VM Network" } +variable "vm_template" { + type = string + description = "This is the name of the VM template to clone." +} + variable "vm_dns_addresses" { type = list(string) default = ["1.1.1.1", "9.9.9.9"] @@ -170,3 +169,23 @@ variable "ssh_public_key_path" { type = string default = "~/.ssh/id_rsa.pub" } + +/////////////////////////////////////////// +///// failure domains +///// if not defined, a default failure domain is created which consists of: +///// vsphere_cluster, vsphere_datacenter, vsphere_datastore, vmware_network +///// +///// each element in the list must consist of: +/////{ +///// datacenter = "the-datacenter" +///// cluster = "the-cluster" +///// datastore = "the-datastore" +///// network = "the-portgroup" +///// distributed_virtual_switch_uuid = "uuid-of-the-dvs-where-the-portgroup-attached" +/////} +/////////////////////////////////////////// +variable "failure_domains" { + type = list(map(string)) + description = "defines a list of failure domains" + default = [] +} \ No newline at end of file diff --git a/upi/vsphere/vm/main.tf b/upi/vsphere/vm/main.tf index 62a24320ab..3e7bccb316 100644 --- a/upi/vsphere/vm/main.tf +++ b/upi/vsphere/vm/main.tf @@ -1,7 +1,5 @@ resource "vsphere_virtual_machine" "vm" { - for_each = var.hostnames_ip_addresses - - name = element(split(".", each.key), 0) + name = var.vmname resource_pool_id = var.resource_pool_id datastore_id = var.datastore_id @@ -31,7 +29,7 @@ resource "vsphere_virtual_machine" "vm" { extra_config = { "guestinfo.ignition.config.data" = base64encode(var.ignition) "guestinfo.ignition.config.data.encoding" = "base64" - "guestinfo.afterburn.initrd.network-kargs" = "ip=${each.value}::${cidrhost(var.machine_cidr, 1)}:${cidrnetmask(var.machine_cidr)}:${element(split(".", each.key), 0)}:ens192:none:${join(":", var.dns_addresses)}" + "guestinfo.afterburn.initrd.network-kargs" = "ip=${var.ipaddress}::${cidrhost(var.machine_cidr, 1)}:${cidrnetmask(var.machine_cidr)}:${var.vmname}:ens192:none:${join(":", var.dns_addresses)}" "stealclock.enable" = "TRUE" } } diff --git a/upi/vsphere/vm/variables.tf b/upi/vsphere/vm/variables.tf index f621abeb89..a9f4fa6e8c 100644 --- a/upi/vsphere/vm/variables.tf +++ b/upi/vsphere/vm/variables.tf @@ -1,7 +1,3 @@ -variable "hostnames_ip_addresses" { - type = map(string) -} - variable "ignition" { type = string sensitive = true @@ -60,3 +56,10 @@ variable "dns_addresses" { type = list(string) } +variable "vmname" { + type = string +} + +variable "ipaddress" { + type = string +}