1
0
mirror of https://github.com/openshift/installer.git synced 2026-02-06 00:48:45 +01:00

Merge pull request #1446 from staebler/vsphere_tf

upi/vshpere: Add initial support for vSphere UPI
This commit is contained in:
OpenShift Merge Robot
2019-03-21 22:52:04 -07:00
committed by GitHub
11 changed files with 592 additions and 0 deletions

56
upi/vsphere/README.md Normal file
View File

@@ -0,0 +1,56 @@
1. Create an install-config.yaml.
There is not a vsphere platform yet, so use the none platform.
The machine CIDR for the dev cluster is 139.178.89.192/26.
```
apiVersion: v1beta4
baseDomain: devcluster.openshift.com
metadata:
name: mstaeble
networking:
machineCIDR: "139.178.89.192/26"
platform:
none: {}
pullSecret: YOUR_PULL_SECRET
sshKey: YOUR_SSH_KEY
```
2. Run `openshift-install create ignition-configs`.
3. Fill out a terraform.tfvars file with the ignition configs generated.
There is an example terraform.tfvars file in this directory named terraform.tfvars.example. The example file is set up for use with the dev cluster running at vcsa.vmware.devcluster.openshift.com. At a minimum, you need to set values for `cluster_id`, `cluster_domain`, `vsphere_user`, `vsphere_password`, `bootstrap_ignition_url`, `control_plane_ignition`, and `compute_ignition`.
The bootstrap ignition config must be placed in a location that will be accessible by the bootstrap machine. For example, you could store the bootstrap ignition config in a gist.
Initially, the `bootstrap_complete` variable must be false, the `bootstrap_ip` variable must be an empty string, and the `control_plane_ips variable must be an empty list.
4. Run `terraform init`.
5. Run `terraform apply -auto-approve`.
6. Find the IP address of the bootstrap machine.
If you provided an extra user, you can use that user to log into the bootstrap machine via the vSphere web console.
Alternatively, you could iterate through the IP addresses in the 139.178.89.192/26 block looking for one that has the expected hostname, which is bootstrap-0.{cluster_domain}. For example, `ssh -i ~/.ssh/libra.pem -o StrictHostNameChecking=no -q core@139.178.89.199 hostname`
7. Update the terraform.tfvars file with the IP address of the bootstrap machine.
8. Run `terraform apply -auto-approve`.
From this point forward, route53 resources will be managed by terraform. You will need to have your AWS profile set and a region specified.
9. Find the IP addresses of the control plane machines. See step 6 for examples of how to do this. The expected hostnames are control-plane-{0,1,2}.{cluster_domain}. The control plane machines will change their IP addresses once. You need the final IP addresses. If you happen to use the first set of IP addresses, you can later update the IP addresses in the terraform.tfvars file and re-run terraform.
10. Update the terraform.tfvars file with the IP addresses of the control plane machines.
11. Run `terraform apply -auto-approve`.
12. Run `openshift-install user-provided-infrastructure`. Wait for the bootstrapping to complete.
You *may* need to log into each of the control plane machines. It would seem that, for some reason, the etcd-member pod does not start until the machine is logged into.
13. Update the terraform.tfvars file to set the `bootstrap_complete` variable to "true".
14. Run `terraform apply -auto-approve`.
15. Run `openshift-install user-provided-infrastructure finish`. Wait for the cluster install to finish.
Currently, the cluster install does not finish. There is an outstanding issue with the openshift-console operator not installing successfully. The cluster should still be usable save for the console, however.
16. Enjoy your new OpenShift cluster.
17. Run `terraform destroy -auto-approve`.

View File

@@ -0,0 +1,96 @@
locals {
ignition_encoded = "data:text/plain;charset=utf-8;base64,${base64encode(var.ignition)}"
}
data "ignition_file" "hostname" {
count = "${var.instance_count}"
filesystem = "root"
path = "/etc/hostname"
mode = "420"
content {
content = "${var.name}-${count.index}.${var.cluster_domain}"
}
}
data "ignition_user" "extra_users" {
count = "${length(var.extra_user_names)}"
name = "${var.extra_user_names[count.index]}"
password_hash = "${var.extra_user_password_hashes[count.index]}"
}
data "ignition_config" "ign" {
count = "${var.instance_count}"
append {
source = "${var.ignition_url != "" ? var.ignition_url : local.ignition_encoded}"
}
files = [
"${data.ignition_file.hostname.*.id[count.index]}",
]
users = ["${data.ignition_user.extra_users.*.id}"]
}
resource "vsphere_virtual_machine" "vm" {
count = "${var.instance_count}"
name = "${var.name}-${count.index}"
resource_pool_id = "${var.resource_pool_id}"
datastore_id = "${var.datastore_id}"
num_cpus = "4"
memory = "8192"
guest_id = "other26xLinux64Guest"
wait_for_guest_net_timeout = 0
wait_for_guest_net_routable = false
network_interface {
network_id = "${var.network_id}"
}
disk {
label = "disk0"
size = 60
// want to change this to thin provisioned. need to change template.
thin_provisioned = false
}
clone {
template_uuid = "${var.vm_template_id}"
}
vapp {
properties {
"guestinfo.coreos.config.data" = "${data.ignition_config.ign.*.rendered[count.index]}"
}
}
}
/*
"networkd": {
"units": [
{
"contents": "[Match]\nName=eth0\n\n[Network]\nDNS=8.8.8.8\nAddress=${var.bootstrap_ip}/${var.network_prefix}\nGateway=${var.gateway}",
"name": "00-eth0.network"
}
]
},
*/
/*
{
"filesystem":"root",
"path":"/etc/sysconfig/network-scripts/ifcfg-eth0",
"contents":
{
"source":"data:,TYPE%3DEthernet%0APROXY_METHOD%3Dnone%0ABROWSER_ONLY%3Dno%0ABOOTPROTO%3Dnone%0ADEFROUTE%3Dyes%0AIPV4_FAILURE_FATAL%3Dno%0AIPV6INIT%3Dyes%0AIPV6_AUTOCONF%3Dyes%0AIPV6_DEFROUTE%3Dyes%0AIPV6_FAILURE_FATAL%3Dno%0AIPV6_ADDR_GEN_MODE%3Dstable-privacy%0ANAME%3Deth0%0AUUID%3Dcc0fcac7-aabd-440a-b0e2-4c98ed3ef8b5%0ADEVICE%3Deth0%0AONBOOT%3Dyes%0AIPADDR%3D${var.bootstrap_ip}%0APREFIX%3D${var.network_prefix}%0AGATEWAY%3D${var.gateway}%0ADNS1%3D8.8.8.8%0A",
"verification":{}
},
"mode":420
}]
*/

View File

@@ -0,0 +1,45 @@
variable "name" {
type = "string"
}
variable "instance_count" {
type = "string"
}
variable "ignition" {
type = "string"
default = ""
}
variable "ignition_url" {
type = "string"
default = ""
}
variable "resource_pool_id" {
type = "string"
}
variable "datastore_id" {
type = "string"
}
variable "network_id" {
type = "string"
}
variable "vm_template_id" {
type = "string"
}
variable "cluster_domain" {
type = "string"
}
variable "extra_user_names" {
type = "list"
}
variable "extra_user_password_hashes" {
type = "list"
}

100
upi/vsphere/main.tf Normal file
View File

@@ -0,0 +1,100 @@
locals {
network_prefix = "${element(split("/", var.machine_cidr), 1)}"
gateway = "${cidrhost(var.machine_cidr,1)}"
}
provider "vsphere" {
user = "${var.vsphere_user}"
password = "${var.vsphere_password}"
vsphere_server = "${var.vsphere_server}"
allow_unverified_ssl = true
}
data "vsphere_datacenter" "dc" {
name = "${var.vsphere_datacenter}"
}
data "vsphere_compute_cluster" "compute_cluster" {
name = "${var.vsphere_cluster}"
datacenter_id = "${data.vsphere_datacenter.dc.id}"
}
data "vsphere_datastore" "datastore" {
name = "${var.vsphere_datastore}"
datacenter_id = "${data.vsphere_datacenter.dc.id}"
}
data "vsphere_network" "network" {
name = "${var.vm_network}"
datacenter_id = "${data.vsphere_datacenter.dc.id}"
}
data "vsphere_virtual_machine" "template" {
name = "${var.vm_template}"
datacenter_id = "${data.vsphere_datacenter.dc.id}"
}
module "resource_pool" {
source = "./resource_pool"
name = "${var.cluster_id}"
datacenter_id = "${data.vsphere_datacenter.dc.id}"
vsphere_cluster = "${var.vsphere_cluster}"
}
module "bootstrap" {
source = "./machine"
name = "bootstrap"
instance_count = "${var.bootstrap_complete ? 0 : 1}"
ignition_url = "${var.bootstrap_ignition_url}"
resource_pool_id = "${module.resource_pool.pool_id}"
datastore_id = "${data.vsphere_datastore.datastore.id}"
network_id = "${data.vsphere_network.network.id}"
vm_template_id = "${data.vsphere_virtual_machine.template.id}"
cluster_domain = "${var.cluster_domain}"
extra_user_names = ["${var.extra_user_names}"]
extra_user_password_hashes = ["${var.extra_user_password_hashes}"]
}
module "control_plane" {
source = "./machine"
name = "control-plane"
instance_count = "${var.control_plane_instance_count}"
ignition = "${var.control_plane_ignition}"
resource_pool_id = "${module.resource_pool.pool_id}"
datastore_id = "${data.vsphere_datastore.datastore.id}"
network_id = "${data.vsphere_network.network.id}"
vm_template_id = "${data.vsphere_virtual_machine.template.id}"
cluster_domain = "${var.cluster_domain}"
extra_user_names = ["${var.extra_user_names}"]
extra_user_password_hashes = ["${var.extra_user_password_hashes}"]
}
module "compute" {
source = "./machine"
name = "compute"
instance_count = "${var.compute_instance_count}"
ignition = "${var.compute_ignition}"
resource_pool_id = "${module.resource_pool.pool_id}"
datastore_id = "${data.vsphere_datastore.datastore.id}"
network_id = "${data.vsphere_network.network.id}"
vm_template_id = "${data.vsphere_virtual_machine.template.id}"
cluster_domain = "${var.cluster_domain}"
extra_user_names = ["${var.extra_user_names}"]
extra_user_password_hashes = ["${var.extra_user_password_hashes}"]
}
module "dns" {
source = "./route53"
base_domain = "${var.base_domain}"
cluster_domain = "${var.cluster_domain}"
bootstrap_ip = "${var.bootstrap_complete ? "" : var.bootstrap_ip}"
control_plane_ips = "${var.control_plane_ips}"
}

View File

@@ -0,0 +1,9 @@
data "vsphere_compute_cluster" "compute_cluster" {
name = "${var.vsphere_cluster}"
datacenter_id = "${var.datacenter_id}"
}
resource "vsphere_resource_pool" "resource_pool" {
name = "${var.name}"
parent_resource_pool_id = "${data.vsphere_compute_cluster.compute_cluster.resource_pool_id}"
}

View File

@@ -0,0 +1,3 @@
output "pool_id" {
value = "${vsphere_resource_pool.resource_pool.id}"
}

View File

@@ -0,0 +1,11 @@
variable "name" {
type = "string"
}
variable "datacenter_id" {
type = "string"
}
variable "vsphere_cluster" {
type = "string"
}

View File

@@ -0,0 +1,63 @@
locals {
route53_zone_count = "${length(var.control_plane_ips) + length(var.bootstrap_ip) == "0" ? "0" : "1"}"
}
data "aws_route53_zone" "base" {
name = "${var.base_domain}"
}
resource "aws_route53_zone" "cluster" {
count = "${local.route53_zone_count}"
name = "${var.cluster_domain}"
force_destroy = true
tags = "${map(
"Name", "${var.cluster_domain}",
"Platform", "vSphere")}"
}
resource "aws_route53_record" "name_server" {
count = "${local.route53_zone_count}"
name = "${var.cluster_domain}"
type = "NS"
ttl = "300"
zone_id = "${data.aws_route53_zone.base.zone_id}"
records = ["${aws_route53_zone.cluster.name_servers}"]
}
resource "aws_route53_record" "api" {
count = "${local.route53_zone_count}"
type = "A"
ttl = "60"
zone_id = "${aws_route53_zone.cluster.zone_id}"
name = "api.${var.cluster_domain}"
set_identifier = "api"
records = "${compact(concat(list(var.bootstrap_ip), var.control_plane_ips))}"
weighted_routing_policy {
weight = 90
}
}
resource "aws_route53_record" "etcd_a_nodes" {
count = "${length(var.control_plane_ips)}"
type = "A"
ttl = "60"
zone_id = "${aws_route53_zone.cluster.zone_id}"
name = "etcd-${count.index}.${var.cluster_domain}"
records = ["${var.control_plane_ips[count.index]}"]
}
resource "aws_route53_record" "etcd_cluster" {
count = "${length(var.control_plane_ips) == "0" ? "0" : "1"}"
type = "SRV"
ttl = "60"
zone_id = "${aws_route53_zone.cluster.zone_id}"
name = "_etcd-server-ssl._tcp"
records = ["${formatlist("0 10 2380 %s", aws_route53_record.etcd_a_nodes.*.fqdn)}"]
}

View File

@@ -0,0 +1,17 @@
variable "cluster_domain" {
description = "The domain for the cluster that all DNS records must belong"
type = "string"
}
variable "bootstrap_ip" {
type = "string"
}
variable "control_plane_ips" {
type = "list"
}
variable "base_domain" {
description = "The base domain used for public records."
type = "string"
}

View File

@@ -0,0 +1,61 @@
// Set to true once the bootstrapping is complete. The bootstrap machine will be destroyed if this variable is set to "true".
//bootstrap_complete = true
// The IP address of the bootstrap node.
// If using the dev vSphere cluster, this IP will be in the 139.178.89.192/26 block.
//bootstrap_ip = "139.178.89.xxx"
// The IP addresses of the control plan nodes.
// If using the dev vSphere cluster, this IP will be in the 139.178.89.192/26 block.
//control_plane_ips = ["139.178.89.xxx","139.178.89.xxx","139.178.89.xxx"]
// ID identifying the cluster to create. Use your username so that resources created can be tracked back to you.
cluster_id = "example-cluster"
// Domain of the cluster. This should be "${cluster_id}.${base_domain}".
cluster_domain = "example-cluster.devcluster.openshift.com"
// Base domain from which the cluster domain is a subdomain.
base_domain = "devcluster.openshift.com"
// Set extra users helpful for logging into the machines from the vSphere web UI.
//extra_user_names = ["me"]
//extra_user_password_hashes = ["$1$d51URPHU$NQGWywqfyGHYJrKK7GsOQ1"]
// Name of the vSphere server. The dev cluster is on "vcsa.vmware.devcluster.openshift.com".
vsphere_server = "vcsa.vmware.devcluster.openshift.com"
// User on the vSphere server.
vsphere_user = "YOUR_USER"
// Password of the user on the vSphere server.
vsphere_password = "YOUR_PASSWORD"
// Name of the vSphere cluster. The dev cluster is "devel".
vsphere_cluster = "devel"
// Name of the vSphere data center. The dev cluster is "dc1".
vsphere_datacenter = "dc1"
// Name of the vSphere data store to use for the VMs. The dev cluster uses "nvme-ds1".
vsphere_datastore = "nvme-ds1"
// CIDR block for the VMs. The dev cluster uses "139.178.89.192/26".
machine_cidr = "139.178.89.192/26"
// Name of the VM template to clone to create VMs for the cluster. The dev cluster has a template named "rchos-davis-no-ig".
vm_template = "rhcos-davis-no-ig"
// URL of the bootstrap ignition. This needs to be publicly accessible so that the bootstrap machine can pull the ignition.
bootstrap_ignition_url = "URL_FOR_YOUR_BOOTSTRAP_IGNITION"
// Ignition config for the control plane machines. You should copy the contents of the master.ign generated by the installer.
control_plane_ignition = <<END_OF_MASTER_IGNITION
Copy the master ignition generated by the installer here.
END_OF_MASTER_IGNITION
// Ignition config for the compute machines. You should copy the contents of the worker.ign generated by the installer.
compute_ignition = <<END_OF_WORKER_IGNITION
Copy the worker ignition generated by the installer here.
END_OF_WORKER_IGNITION

131
upi/vsphere/variables.tf Normal file
View File

@@ -0,0 +1,131 @@
//////
// vSphere variables
//////
variable "vsphere_server" {
type = "string"
description = "This is the vSphere server for the environment."
}
variable "vsphere_user" {
type = "string"
description = "vSphere server user for the environment."
}
variable "vsphere_password" {
type = "string"
description = "vSphere server password"
}
variable "vsphere_cluster" {
type = "string"
description = "This is the name of the vSphere cluster."
}
variable "vsphere_datacenter" {
type = "string"
description = "This is the name of the vSphere data center."
}
variable "vsphere_datastore" {
type = "string"
description = "This is the name of the vSphere data store."
}
variable "vm_template" {
type = "string"
description = "This is the name of the VM template to clone."
}
variable "vm_network" {
type = "string"
description = "This is the name of the publicly accessible network for cluster ingress and access."
default = "VM Network"
}
variable "extra_user_names" {
type = "list"
default = []
}
variable "extra_user_password_hashes" {
type = "list"
default = []
}
/////////
// OpenShift cluster variables
/////////
variable "cluster_id" {
type = "string"
description = "This cluster id must be of max length 27 and must have only alphanumeric or hyphen characters."
}
variable "base_domain" {
type = "string"
description = "The base DNS zone to add the sub zone to."
}
variable "cluster_domain" {
type = "string"
description = "The base DNS zone to add the sub zone to."
}
variable "machine_cidr" {
type = "string"
description = "This is the public network netmask."
}
/////////
// Bootstrap machine variables
/////////
variable "bootstrap_ignition_url" {
type = "string"
}
variable "bootstrap_complete" {
type = "string"
default = "false"
}
variable "bootstrap_ip" {
type = "string"
description = "The IP address in the machine_cidr to apply to the bootstrap."
default = ""
}
///////////
// Control Plane machine variables
///////////
variable "control_plane_instance_count" {
type = "string"
description = "The number of control plane instances to deploy."
default = 3
}
variable "control_plane_ignition" {
type = "string"
}
variable "control_plane_ips" {
type = "list"
description = "The IP addresses in the machine_cidr to apply to the control plane machines."
default = []
}
//////////
// Compute machine variables
//////////
variable "compute_instance_count" {
type = "string"
description = "The number of compute instances to deploy."
default = 3
}
variable "compute_ignition" {
type = "string"
}